]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/hci_event.c
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
48 static int enable_le;
49
50 /* Handle HCI Event packets */
51
52 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
53 {
54 __u8 status = *((__u8 *) skb->data);
55
56 BT_DBG("%s status 0x%x", hdev->name, status);
57
58 if (status)
59 return;
60
61 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
62 test_bit(HCI_MGMT, &hdev->flags))
63 mgmt_discovering(hdev->id, 0);
64
65 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
66
67 hci_conn_check_pending(hdev);
68 }
69
70 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
71 {
72 __u8 status = *((__u8 *) skb->data);
73
74 BT_DBG("%s status 0x%x", hdev->name, status);
75
76 if (status)
77 return;
78
79 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
80 test_bit(HCI_MGMT, &hdev->flags))
81 mgmt_discovering(hdev->id, 0);
82
83 hci_conn_check_pending(hdev);
84 }
85
86 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
87 {
88 BT_DBG("%s", hdev->name);
89 }
90
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
95
96 BT_DBG("%s status 0x%x", hdev->name, rp->status);
97
98 if (rp->status)
99 return;
100
101 hci_dev_lock(hdev);
102
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn) {
105 if (rp->role)
106 conn->link_mode &= ~HCI_LM_MASTER;
107 else
108 conn->link_mode |= HCI_LM_MASTER;
109 }
110
111 hci_dev_unlock(hdev);
112 }
113
114 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
115 {
116 struct hci_rp_read_link_policy *rp = (void *) skb->data;
117 struct hci_conn *conn;
118
119 BT_DBG("%s status 0x%x", hdev->name, rp->status);
120
121 if (rp->status)
122 return;
123
124 hci_dev_lock(hdev);
125
126 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
127 if (conn)
128 conn->link_policy = __le16_to_cpu(rp->policy);
129
130 hci_dev_unlock(hdev);
131 }
132
133 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
134 {
135 struct hci_rp_write_link_policy *rp = (void *) skb->data;
136 struct hci_conn *conn;
137 void *sent;
138
139 BT_DBG("%s status 0x%x", hdev->name, rp->status);
140
141 if (rp->status)
142 return;
143
144 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145 if (!sent)
146 return;
147
148 hci_dev_lock(hdev);
149
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 if (conn)
152 conn->link_policy = get_unaligned_le16(sent + 2);
153
154 hci_dev_unlock(hdev);
155 }
156
157 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
158 {
159 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
160
161 BT_DBG("%s status 0x%x", hdev->name, rp->status);
162
163 if (rp->status)
164 return;
165
166 hdev->link_policy = __le16_to_cpu(rp->policy);
167 }
168
169 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
170 {
171 __u8 status = *((__u8 *) skb->data);
172 void *sent;
173
174 BT_DBG("%s status 0x%x", hdev->name, status);
175
176 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
177 if (!sent)
178 return;
179
180 if (!status)
181 hdev->link_policy = get_unaligned_le16(sent);
182
183 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
184 }
185
186 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
187 {
188 __u8 status = *((__u8 *) skb->data);
189
190 BT_DBG("%s status 0x%x", hdev->name, status);
191
192 clear_bit(HCI_RESET, &hdev->flags);
193
194 hci_req_complete(hdev, HCI_OP_RESET, status);
195 }
196
197 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
198 {
199 __u8 status = *((__u8 *) skb->data);
200 void *sent;
201
202 BT_DBG("%s status 0x%x", hdev->name, status);
203
204 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
205 if (!sent)
206 return;
207
208 if (test_bit(HCI_MGMT, &hdev->flags))
209 mgmt_set_local_name_complete(hdev->id, sent, status);
210
211 if (status)
212 return;
213
214 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
215 }
216
217 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
218 {
219 struct hci_rp_read_local_name *rp = (void *) skb->data;
220
221 BT_DBG("%s status 0x%x", hdev->name, rp->status);
222
223 if (rp->status)
224 return;
225
226 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
227 }
228
229 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
230 {
231 __u8 status = *((__u8 *) skb->data);
232 void *sent;
233
234 BT_DBG("%s status 0x%x", hdev->name, status);
235
236 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
237 if (!sent)
238 return;
239
240 if (!status) {
241 __u8 param = *((__u8 *) sent);
242
243 if (param == AUTH_ENABLED)
244 set_bit(HCI_AUTH, &hdev->flags);
245 else
246 clear_bit(HCI_AUTH, &hdev->flags);
247 }
248
249 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
250 }
251
252 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
253 {
254 __u8 status = *((__u8 *) skb->data);
255 void *sent;
256
257 BT_DBG("%s status 0x%x", hdev->name, status);
258
259 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
260 if (!sent)
261 return;
262
263 if (!status) {
264 __u8 param = *((__u8 *) sent);
265
266 if (param)
267 set_bit(HCI_ENCRYPT, &hdev->flags);
268 else
269 clear_bit(HCI_ENCRYPT, &hdev->flags);
270 }
271
272 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
273 }
274
275 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
276 {
277 __u8 status = *((__u8 *) skb->data);
278 void *sent;
279
280 BT_DBG("%s status 0x%x", hdev->name, status);
281
282 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
283 if (!sent)
284 return;
285
286 if (!status) {
287 __u8 param = *((__u8 *) sent);
288 int old_pscan, old_iscan;
289
290 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
291 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
292
293 if (param & SCAN_INQUIRY) {
294 set_bit(HCI_ISCAN, &hdev->flags);
295 if (!old_iscan)
296 mgmt_discoverable(hdev->id, 1);
297 } else if (old_iscan)
298 mgmt_discoverable(hdev->id, 0);
299
300 if (param & SCAN_PAGE) {
301 set_bit(HCI_PSCAN, &hdev->flags);
302 if (!old_pscan)
303 mgmt_connectable(hdev->id, 1);
304 } else if (old_pscan)
305 mgmt_connectable(hdev->id, 0);
306 }
307
308 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
309 }
310
311 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
312 {
313 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
314
315 BT_DBG("%s status 0x%x", hdev->name, rp->status);
316
317 if (rp->status)
318 return;
319
320 memcpy(hdev->dev_class, rp->dev_class, 3);
321
322 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
323 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
324 }
325
326 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
327 {
328 __u8 status = *((__u8 *) skb->data);
329 void *sent;
330
331 BT_DBG("%s status 0x%x", hdev->name, status);
332
333 if (status)
334 return;
335
336 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
337 if (!sent)
338 return;
339
340 memcpy(hdev->dev_class, sent, 3);
341 }
342
343 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
344 {
345 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
346 __u16 setting;
347
348 BT_DBG("%s status 0x%x", hdev->name, rp->status);
349
350 if (rp->status)
351 return;
352
353 setting = __le16_to_cpu(rp->voice_setting);
354
355 if (hdev->voice_setting == setting)
356 return;
357
358 hdev->voice_setting = setting;
359
360 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
361
362 if (hdev->notify) {
363 tasklet_disable(&hdev->tx_task);
364 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
365 tasklet_enable(&hdev->tx_task);
366 }
367 }
368
369 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
370 {
371 __u8 status = *((__u8 *) skb->data);
372 __u16 setting;
373 void *sent;
374
375 BT_DBG("%s status 0x%x", hdev->name, status);
376
377 if (status)
378 return;
379
380 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
381 if (!sent)
382 return;
383
384 setting = get_unaligned_le16(sent);
385
386 if (hdev->voice_setting == setting)
387 return;
388
389 hdev->voice_setting = setting;
390
391 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
392
393 if (hdev->notify) {
394 tasklet_disable(&hdev->tx_task);
395 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
396 tasklet_enable(&hdev->tx_task);
397 }
398 }
399
400 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
401 {
402 __u8 status = *((__u8 *) skb->data);
403
404 BT_DBG("%s status 0x%x", hdev->name, status);
405
406 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
407 }
408
409 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
410 {
411 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
412
413 BT_DBG("%s status 0x%x", hdev->name, rp->status);
414
415 if (rp->status)
416 return;
417
418 hdev->ssp_mode = rp->mode;
419 }
420
421 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
422 {
423 __u8 status = *((__u8 *) skb->data);
424 void *sent;
425
426 BT_DBG("%s status 0x%x", hdev->name, status);
427
428 if (status)
429 return;
430
431 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
432 if (!sent)
433 return;
434
435 hdev->ssp_mode = *((__u8 *) sent);
436 }
437
438 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
439 {
440 if (hdev->features[6] & LMP_EXT_INQ)
441 return 2;
442
443 if (hdev->features[3] & LMP_RSSI_INQ)
444 return 1;
445
446 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
447 hdev->lmp_subver == 0x0757)
448 return 1;
449
450 if (hdev->manufacturer == 15) {
451 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
452 return 1;
453 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
454 return 1;
455 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
456 return 1;
457 }
458
459 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
460 hdev->lmp_subver == 0x1805)
461 return 1;
462
463 return 0;
464 }
465
466 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
467 {
468 u8 mode;
469
470 mode = hci_get_inquiry_mode(hdev);
471
472 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
473 }
474
475 static void hci_setup_event_mask(struct hci_dev *hdev)
476 {
477 /* The second byte is 0xff instead of 0x9f (two reserved bits
478 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
479 * command otherwise */
480 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
481
482 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
483 * any event mask for pre 1.2 devices */
484 if (hdev->lmp_ver <= 1)
485 return;
486
487 events[4] |= 0x01; /* Flow Specification Complete */
488 events[4] |= 0x02; /* Inquiry Result with RSSI */
489 events[4] |= 0x04; /* Read Remote Extended Features Complete */
490 events[5] |= 0x08; /* Synchronous Connection Complete */
491 events[5] |= 0x10; /* Synchronous Connection Changed */
492
493 if (hdev->features[3] & LMP_RSSI_INQ)
494 events[4] |= 0x04; /* Inquiry Result with RSSI */
495
496 if (hdev->features[5] & LMP_SNIFF_SUBR)
497 events[5] |= 0x20; /* Sniff Subrating */
498
499 if (hdev->features[5] & LMP_PAUSE_ENC)
500 events[5] |= 0x80; /* Encryption Key Refresh Complete */
501
502 if (hdev->features[6] & LMP_EXT_INQ)
503 events[5] |= 0x40; /* Extended Inquiry Result */
504
505 if (hdev->features[6] & LMP_NO_FLUSH)
506 events[7] |= 0x01; /* Enhanced Flush Complete */
507
508 if (hdev->features[7] & LMP_LSTO)
509 events[6] |= 0x80; /* Link Supervision Timeout Changed */
510
511 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
512 events[6] |= 0x01; /* IO Capability Request */
513 events[6] |= 0x02; /* IO Capability Response */
514 events[6] |= 0x04; /* User Confirmation Request */
515 events[6] |= 0x08; /* User Passkey Request */
516 events[6] |= 0x10; /* Remote OOB Data Request */
517 events[6] |= 0x20; /* Simple Pairing Complete */
518 events[7] |= 0x04; /* User Passkey Notification */
519 events[7] |= 0x08; /* Keypress Notification */
520 events[7] |= 0x10; /* Remote Host Supported
521 * Features Notification */
522 }
523
524 if (hdev->features[4] & LMP_LE)
525 events[7] |= 0x20; /* LE Meta-Event */
526
527 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
528 }
529
530 static void hci_set_le_support(struct hci_dev *hdev)
531 {
532 struct hci_cp_write_le_host_supported cp;
533
534 memset(&cp, 0, sizeof(cp));
535
536 if (enable_le) {
537 cp.le = 1;
538 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
539 }
540
541 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
542 }
543
544 static void hci_setup(struct hci_dev *hdev)
545 {
546 hci_setup_event_mask(hdev);
547
548 if (hdev->lmp_ver > 1)
549 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
550
551 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
552 u8 mode = 0x01;
553 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
554 }
555
556 if (hdev->features[3] & LMP_RSSI_INQ)
557 hci_setup_inquiry_mode(hdev);
558
559 if (hdev->features[7] & LMP_INQ_TX_PWR)
560 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
561
562 if (hdev->features[7] & LMP_EXTFEATURES) {
563 struct hci_cp_read_local_ext_features cp;
564
565 cp.page = 0x01;
566 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
567 sizeof(cp), &cp);
568 }
569
570 if (hdev->features[4] & LMP_LE)
571 hci_set_le_support(hdev);
572 }
573
574 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
575 {
576 struct hci_rp_read_local_version *rp = (void *) skb->data;
577
578 BT_DBG("%s status 0x%x", hdev->name, rp->status);
579
580 if (rp->status)
581 return;
582
583 hdev->hci_ver = rp->hci_ver;
584 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
585 hdev->lmp_ver = rp->lmp_ver;
586 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
587 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
588
589 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
590 hdev->manufacturer,
591 hdev->hci_ver, hdev->hci_rev);
592
593 if (test_bit(HCI_INIT, &hdev->flags))
594 hci_setup(hdev);
595 }
596
597 static void hci_setup_link_policy(struct hci_dev *hdev)
598 {
599 u16 link_policy = 0;
600
601 if (hdev->features[0] & LMP_RSWITCH)
602 link_policy |= HCI_LP_RSWITCH;
603 if (hdev->features[0] & LMP_HOLD)
604 link_policy |= HCI_LP_HOLD;
605 if (hdev->features[0] & LMP_SNIFF)
606 link_policy |= HCI_LP_SNIFF;
607 if (hdev->features[1] & LMP_PARK)
608 link_policy |= HCI_LP_PARK;
609
610 link_policy = cpu_to_le16(link_policy);
611 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
612 sizeof(link_policy), &link_policy);
613 }
614
615 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
616 {
617 struct hci_rp_read_local_commands *rp = (void *) skb->data;
618
619 BT_DBG("%s status 0x%x", hdev->name, rp->status);
620
621 if (rp->status)
622 goto done;
623
624 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
625
626 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
627 hci_setup_link_policy(hdev);
628
629 done:
630 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
631 }
632
633 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
634 {
635 struct hci_rp_read_local_features *rp = (void *) skb->data;
636
637 BT_DBG("%s status 0x%x", hdev->name, rp->status);
638
639 if (rp->status)
640 return;
641
642 memcpy(hdev->features, rp->features, 8);
643
644 /* Adjust default settings according to features
645 * supported by device. */
646
647 if (hdev->features[0] & LMP_3SLOT)
648 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
649
650 if (hdev->features[0] & LMP_5SLOT)
651 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
652
653 if (hdev->features[1] & LMP_HV2) {
654 hdev->pkt_type |= (HCI_HV2);
655 hdev->esco_type |= (ESCO_HV2);
656 }
657
658 if (hdev->features[1] & LMP_HV3) {
659 hdev->pkt_type |= (HCI_HV3);
660 hdev->esco_type |= (ESCO_HV3);
661 }
662
663 if (hdev->features[3] & LMP_ESCO)
664 hdev->esco_type |= (ESCO_EV3);
665
666 if (hdev->features[4] & LMP_EV4)
667 hdev->esco_type |= (ESCO_EV4);
668
669 if (hdev->features[4] & LMP_EV5)
670 hdev->esco_type |= (ESCO_EV5);
671
672 if (hdev->features[5] & LMP_EDR_ESCO_2M)
673 hdev->esco_type |= (ESCO_2EV3);
674
675 if (hdev->features[5] & LMP_EDR_ESCO_3M)
676 hdev->esco_type |= (ESCO_3EV3);
677
678 if (hdev->features[5] & LMP_EDR_3S_ESCO)
679 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
680
681 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
682 hdev->features[0], hdev->features[1],
683 hdev->features[2], hdev->features[3],
684 hdev->features[4], hdev->features[5],
685 hdev->features[6], hdev->features[7]);
686 }
687
688 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
689 struct sk_buff *skb)
690 {
691 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
692
693 BT_DBG("%s status 0x%x", hdev->name, rp->status);
694
695 if (rp->status)
696 return;
697
698 memcpy(hdev->extfeatures, rp->features, 8);
699
700 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
701 }
702
703 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
704 {
705 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
706
707 BT_DBG("%s status 0x%x", hdev->name, rp->status);
708
709 if (rp->status)
710 return;
711
712 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
713 hdev->sco_mtu = rp->sco_mtu;
714 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
715 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
716
717 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
718 hdev->sco_mtu = 64;
719 hdev->sco_pkts = 8;
720 }
721
722 hdev->acl_cnt = hdev->acl_pkts;
723 hdev->sco_cnt = hdev->sco_pkts;
724
725 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
726 hdev->acl_mtu, hdev->acl_pkts,
727 hdev->sco_mtu, hdev->sco_pkts);
728 }
729
730 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
731 {
732 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
733
734 BT_DBG("%s status 0x%x", hdev->name, rp->status);
735
736 if (!rp->status)
737 bacpy(&hdev->bdaddr, &rp->bdaddr);
738
739 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
740 }
741
742 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
743 {
744 __u8 status = *((__u8 *) skb->data);
745
746 BT_DBG("%s status 0x%x", hdev->name, status);
747
748 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
749 }
750
751 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
752 struct sk_buff *skb)
753 {
754 __u8 status = *((__u8 *) skb->data);
755
756 BT_DBG("%s status 0x%x", hdev->name, status);
757
758 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
759 }
760
761 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
762 {
763 __u8 status = *((__u8 *) skb->data);
764
765 BT_DBG("%s status 0x%x", hdev->name, status);
766
767 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
768 }
769
770 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
771 struct sk_buff *skb)
772 {
773 __u8 status = *((__u8 *) skb->data);
774
775 BT_DBG("%s status 0x%x", hdev->name, status);
776
777 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
778 }
779
780 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
781 struct sk_buff *skb)
782 {
783 __u8 status = *((__u8 *) skb->data);
784
785 BT_DBG("%s status 0x%x", hdev->name, status);
786
787 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
788 }
789
790 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
791 {
792 __u8 status = *((__u8 *) skb->data);
793
794 BT_DBG("%s status 0x%x", hdev->name, status);
795
796 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
797 }
798
799 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
800 {
801 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
802 struct hci_cp_pin_code_reply *cp;
803 struct hci_conn *conn;
804
805 BT_DBG("%s status 0x%x", hdev->name, rp->status);
806
807 if (test_bit(HCI_MGMT, &hdev->flags))
808 mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
809
810 if (rp->status != 0)
811 return;
812
813 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
814 if (!cp)
815 return;
816
817 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
818 if (conn)
819 conn->pin_length = cp->pin_len;
820 }
821
822 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
823 {
824 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
825
826 BT_DBG("%s status 0x%x", hdev->name, rp->status);
827
828 if (test_bit(HCI_MGMT, &hdev->flags))
829 mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
830 rp->status);
831 }
832 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
833 struct sk_buff *skb)
834 {
835 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
836
837 BT_DBG("%s status 0x%x", hdev->name, rp->status);
838
839 if (rp->status)
840 return;
841
842 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
843 hdev->le_pkts = rp->le_max_pkt;
844
845 hdev->le_cnt = hdev->le_pkts;
846
847 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
848
849 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
850 }
851
852 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
853 {
854 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
855
856 BT_DBG("%s status 0x%x", hdev->name, rp->status);
857
858 if (test_bit(HCI_MGMT, &hdev->flags))
859 mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
860 rp->status);
861 }
862
863 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
864 struct sk_buff *skb)
865 {
866 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
867
868 BT_DBG("%s status 0x%x", hdev->name, rp->status);
869
870 if (test_bit(HCI_MGMT, &hdev->flags))
871 mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
872 rp->status);
873 }
874
875 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
876 struct sk_buff *skb)
877 {
878 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
879
880 BT_DBG("%s status 0x%x", hdev->name, rp->status);
881
882 mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
883 rp->randomizer, rp->status);
884 }
885
886 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
887 struct sk_buff *skb)
888 {
889 struct hci_cp_le_set_scan_enable *cp;
890 __u8 status = *((__u8 *) skb->data);
891
892 BT_DBG("%s status 0x%x", hdev->name, status);
893
894 if (status)
895 return;
896
897 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
898 if (!cp)
899 return;
900
901 if (cp->enable == 0x01) {
902 del_timer(&hdev->adv_timer);
903
904 hci_dev_lock(hdev);
905 hci_adv_entries_clear(hdev);
906 hci_dev_unlock(hdev);
907 } else if (cp->enable == 0x00) {
908 mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
909 }
910 }
911
912 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
913 {
914 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
915
916 BT_DBG("%s status 0x%x", hdev->name, rp->status);
917
918 if (rp->status)
919 return;
920
921 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
922 }
923
924 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
925 {
926 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
927
928 BT_DBG("%s status 0x%x", hdev->name, rp->status);
929
930 if (rp->status)
931 return;
932
933 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
934 }
935
936 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
937 struct sk_buff *skb)
938 {
939 struct hci_cp_read_local_ext_features cp;
940 __u8 status = *((__u8 *) skb->data);
941
942 BT_DBG("%s status 0x%x", hdev->name, status);
943
944 if (status)
945 return;
946
947 cp.page = 0x01;
948 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
949 }
950
951 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
952 {
953 BT_DBG("%s status 0x%x", hdev->name, status);
954
955 if (status) {
956 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
957 hci_conn_check_pending(hdev);
958 return;
959 }
960
961 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) &&
962 test_bit(HCI_MGMT, &hdev->flags))
963 mgmt_discovering(hdev->id, 1);
964 }
965
966 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
967 {
968 struct hci_cp_create_conn *cp;
969 struct hci_conn *conn;
970
971 BT_DBG("%s status 0x%x", hdev->name, status);
972
973 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
974 if (!cp)
975 return;
976
977 hci_dev_lock(hdev);
978
979 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
980
981 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
982
983 if (status) {
984 if (conn && conn->state == BT_CONNECT) {
985 if (status != 0x0c || conn->attempt > 2) {
986 conn->state = BT_CLOSED;
987 hci_proto_connect_cfm(conn, status);
988 hci_conn_del(conn);
989 } else
990 conn->state = BT_CONNECT2;
991 }
992 } else {
993 if (!conn) {
994 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
995 if (conn) {
996 conn->out = 1;
997 conn->link_mode |= HCI_LM_MASTER;
998 } else
999 BT_ERR("No memory for new connection");
1000 }
1001 }
1002
1003 hci_dev_unlock(hdev);
1004 }
1005
1006 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1007 {
1008 struct hci_cp_add_sco *cp;
1009 struct hci_conn *acl, *sco;
1010 __u16 handle;
1011
1012 BT_DBG("%s status 0x%x", hdev->name, status);
1013
1014 if (!status)
1015 return;
1016
1017 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1018 if (!cp)
1019 return;
1020
1021 handle = __le16_to_cpu(cp->handle);
1022
1023 BT_DBG("%s handle %d", hdev->name, handle);
1024
1025 hci_dev_lock(hdev);
1026
1027 acl = hci_conn_hash_lookup_handle(hdev, handle);
1028 if (acl) {
1029 sco = acl->link;
1030 if (sco) {
1031 sco->state = BT_CLOSED;
1032
1033 hci_proto_connect_cfm(sco, status);
1034 hci_conn_del(sco);
1035 }
1036 }
1037
1038 hci_dev_unlock(hdev);
1039 }
1040
1041 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1042 {
1043 struct hci_cp_auth_requested *cp;
1044 struct hci_conn *conn;
1045
1046 BT_DBG("%s status 0x%x", hdev->name, status);
1047
1048 if (!status)
1049 return;
1050
1051 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1052 if (!cp)
1053 return;
1054
1055 hci_dev_lock(hdev);
1056
1057 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1058 if (conn) {
1059 if (conn->state == BT_CONFIG) {
1060 hci_proto_connect_cfm(conn, status);
1061 hci_conn_put(conn);
1062 }
1063 }
1064
1065 hci_dev_unlock(hdev);
1066 }
1067
1068 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1069 {
1070 struct hci_cp_set_conn_encrypt *cp;
1071 struct hci_conn *conn;
1072
1073 BT_DBG("%s status 0x%x", hdev->name, status);
1074
1075 if (!status)
1076 return;
1077
1078 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1079 if (!cp)
1080 return;
1081
1082 hci_dev_lock(hdev);
1083
1084 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1085 if (conn) {
1086 if (conn->state == BT_CONFIG) {
1087 hci_proto_connect_cfm(conn, status);
1088 hci_conn_put(conn);
1089 }
1090 }
1091
1092 hci_dev_unlock(hdev);
1093 }
1094
1095 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1096 struct hci_conn *conn)
1097 {
1098 if (conn->state != BT_CONFIG || !conn->out)
1099 return 0;
1100
1101 if (conn->pending_sec_level == BT_SECURITY_SDP)
1102 return 0;
1103
1104 /* Only request authentication for SSP connections or non-SSP
1105 * devices with sec_level HIGH or if MITM protection is requested */
1106 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
1107 conn->pending_sec_level != BT_SECURITY_HIGH &&
1108 !(conn->auth_type & 0x01))
1109 return 0;
1110
1111 return 1;
1112 }
1113
1114 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1115 {
1116 struct hci_cp_remote_name_req *cp;
1117 struct hci_conn *conn;
1118
1119 BT_DBG("%s status 0x%x", hdev->name, status);
1120
1121 /* If successful wait for the name req complete event before
1122 * checking for the need to do authentication */
1123 if (!status)
1124 return;
1125
1126 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1127 if (!cp)
1128 return;
1129
1130 hci_dev_lock(hdev);
1131
1132 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1133 if (!conn)
1134 goto unlock;
1135
1136 if (!hci_outgoing_auth_needed(hdev, conn))
1137 goto unlock;
1138
1139 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1140 struct hci_cp_auth_requested cp;
1141 cp.handle = __cpu_to_le16(conn->handle);
1142 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1143 }
1144
1145 unlock:
1146 hci_dev_unlock(hdev);
1147 }
1148
1149 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1150 {
1151 struct hci_cp_read_remote_features *cp;
1152 struct hci_conn *conn;
1153
1154 BT_DBG("%s status 0x%x", hdev->name, status);
1155
1156 if (!status)
1157 return;
1158
1159 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1160 if (!cp)
1161 return;
1162
1163 hci_dev_lock(hdev);
1164
1165 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1166 if (conn) {
1167 if (conn->state == BT_CONFIG) {
1168 hci_proto_connect_cfm(conn, status);
1169 hci_conn_put(conn);
1170 }
1171 }
1172
1173 hci_dev_unlock(hdev);
1174 }
1175
1176 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1177 {
1178 struct hci_cp_read_remote_ext_features *cp;
1179 struct hci_conn *conn;
1180
1181 BT_DBG("%s status 0x%x", hdev->name, status);
1182
1183 if (!status)
1184 return;
1185
1186 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1187 if (!cp)
1188 return;
1189
1190 hci_dev_lock(hdev);
1191
1192 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1193 if (conn) {
1194 if (conn->state == BT_CONFIG) {
1195 hci_proto_connect_cfm(conn, status);
1196 hci_conn_put(conn);
1197 }
1198 }
1199
1200 hci_dev_unlock(hdev);
1201 }
1202
1203 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1204 {
1205 struct hci_cp_setup_sync_conn *cp;
1206 struct hci_conn *acl, *sco;
1207 __u16 handle;
1208
1209 BT_DBG("%s status 0x%x", hdev->name, status);
1210
1211 if (!status)
1212 return;
1213
1214 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1215 if (!cp)
1216 return;
1217
1218 handle = __le16_to_cpu(cp->handle);
1219
1220 BT_DBG("%s handle %d", hdev->name, handle);
1221
1222 hci_dev_lock(hdev);
1223
1224 acl = hci_conn_hash_lookup_handle(hdev, handle);
1225 if (acl) {
1226 sco = acl->link;
1227 if (sco) {
1228 sco->state = BT_CLOSED;
1229
1230 hci_proto_connect_cfm(sco, status);
1231 hci_conn_del(sco);
1232 }
1233 }
1234
1235 hci_dev_unlock(hdev);
1236 }
1237
1238 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1239 {
1240 struct hci_cp_sniff_mode *cp;
1241 struct hci_conn *conn;
1242
1243 BT_DBG("%s status 0x%x", hdev->name, status);
1244
1245 if (!status)
1246 return;
1247
1248 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1249 if (!cp)
1250 return;
1251
1252 hci_dev_lock(hdev);
1253
1254 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1255 if (conn) {
1256 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1257
1258 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1259 hci_sco_setup(conn, status);
1260 }
1261
1262 hci_dev_unlock(hdev);
1263 }
1264
1265 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1266 {
1267 struct hci_cp_exit_sniff_mode *cp;
1268 struct hci_conn *conn;
1269
1270 BT_DBG("%s status 0x%x", hdev->name, status);
1271
1272 if (!status)
1273 return;
1274
1275 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1276 if (!cp)
1277 return;
1278
1279 hci_dev_lock(hdev);
1280
1281 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1282 if (conn) {
1283 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1284
1285 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1286 hci_sco_setup(conn, status);
1287 }
1288
1289 hci_dev_unlock(hdev);
1290 }
1291
1292 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1293 {
1294 struct hci_cp_le_create_conn *cp;
1295 struct hci_conn *conn;
1296
1297 BT_DBG("%s status 0x%x", hdev->name, status);
1298
1299 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1300 if (!cp)
1301 return;
1302
1303 hci_dev_lock(hdev);
1304
1305 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1306
1307 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1308 conn);
1309
1310 if (status) {
1311 if (conn && conn->state == BT_CONNECT) {
1312 conn->state = BT_CLOSED;
1313 hci_proto_connect_cfm(conn, status);
1314 hci_conn_del(conn);
1315 }
1316 } else {
1317 if (!conn) {
1318 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1319 if (conn) {
1320 conn->dst_type = cp->peer_addr_type;
1321 conn->out = 1;
1322 } else {
1323 BT_ERR("No memory for new connection");
1324 }
1325 }
1326 }
1327
1328 hci_dev_unlock(hdev);
1329 }
1330
1331 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1332 {
1333 BT_DBG("%s status 0x%x", hdev->name, status);
1334 }
1335
1336 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1337 {
1338 __u8 status = *((__u8 *) skb->data);
1339
1340 BT_DBG("%s status %d", hdev->name, status);
1341
1342 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
1343 test_bit(HCI_MGMT, &hdev->flags))
1344 mgmt_discovering(hdev->id, 0);
1345
1346 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1347
1348 hci_conn_check_pending(hdev);
1349 }
1350
1351 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1352 {
1353 struct inquiry_data data;
1354 struct inquiry_info *info = (void *) (skb->data + 1);
1355 int num_rsp = *((__u8 *) skb->data);
1356
1357 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1358
1359 if (!num_rsp)
1360 return;
1361
1362 hci_dev_lock(hdev);
1363
1364 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
1365
1366 if (test_bit(HCI_MGMT, &hdev->flags))
1367 mgmt_discovering(hdev->id, 1);
1368 }
1369
1370 for (; num_rsp; num_rsp--, info++) {
1371 bacpy(&data.bdaddr, &info->bdaddr);
1372 data.pscan_rep_mode = info->pscan_rep_mode;
1373 data.pscan_period_mode = info->pscan_period_mode;
1374 data.pscan_mode = info->pscan_mode;
1375 memcpy(data.dev_class, info->dev_class, 3);
1376 data.clock_offset = info->clock_offset;
1377 data.rssi = 0x00;
1378 data.ssp_mode = 0x00;
1379 hci_inquiry_cache_update(hdev, &data);
1380 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0,
1381 NULL);
1382 }
1383
1384 hci_dev_unlock(hdev);
1385 }
1386
1387 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1388 {
1389 struct hci_ev_conn_complete *ev = (void *) skb->data;
1390 struct hci_conn *conn;
1391
1392 BT_DBG("%s", hdev->name);
1393
1394 hci_dev_lock(hdev);
1395
1396 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1397 if (!conn) {
1398 if (ev->link_type != SCO_LINK)
1399 goto unlock;
1400
1401 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1402 if (!conn)
1403 goto unlock;
1404
1405 conn->type = SCO_LINK;
1406 }
1407
1408 if (!ev->status) {
1409 conn->handle = __le16_to_cpu(ev->handle);
1410
1411 if (conn->type == ACL_LINK) {
1412 conn->state = BT_CONFIG;
1413 hci_conn_hold(conn);
1414 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1415 mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
1416 } else
1417 conn->state = BT_CONNECTED;
1418
1419 hci_conn_hold_device(conn);
1420 hci_conn_add_sysfs(conn);
1421
1422 if (test_bit(HCI_AUTH, &hdev->flags))
1423 conn->link_mode |= HCI_LM_AUTH;
1424
1425 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1426 conn->link_mode |= HCI_LM_ENCRYPT;
1427
1428 /* Get remote features */
1429 if (conn->type == ACL_LINK) {
1430 struct hci_cp_read_remote_features cp;
1431 cp.handle = ev->handle;
1432 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1433 sizeof(cp), &cp);
1434 }
1435
1436 /* Set packet type for incoming connection */
1437 if (!conn->out && hdev->hci_ver < 3) {
1438 struct hci_cp_change_conn_ptype cp;
1439 cp.handle = ev->handle;
1440 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1441 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1442 sizeof(cp), &cp);
1443 }
1444 } else {
1445 conn->state = BT_CLOSED;
1446 if (conn->type == ACL_LINK)
1447 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
1448 }
1449
1450 if (conn->type == ACL_LINK)
1451 hci_sco_setup(conn, ev->status);
1452
1453 if (ev->status) {
1454 hci_proto_connect_cfm(conn, ev->status);
1455 hci_conn_del(conn);
1456 } else if (ev->link_type != ACL_LINK)
1457 hci_proto_connect_cfm(conn, ev->status);
1458
1459 unlock:
1460 hci_dev_unlock(hdev);
1461
1462 hci_conn_check_pending(hdev);
1463 }
1464
1465 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1466 {
1467 struct hci_ev_conn_request *ev = (void *) skb->data;
1468 int mask = hdev->link_mode;
1469
1470 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1471 batostr(&ev->bdaddr), ev->link_type);
1472
1473 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1474
1475 if ((mask & HCI_LM_ACCEPT) &&
1476 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1477 /* Connection accepted */
1478 struct inquiry_entry *ie;
1479 struct hci_conn *conn;
1480
1481 hci_dev_lock(hdev);
1482
1483 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1484 if (ie)
1485 memcpy(ie->data.dev_class, ev->dev_class, 3);
1486
1487 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1488 if (!conn) {
1489 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1490 if (!conn) {
1491 BT_ERR("No memory for new connection");
1492 hci_dev_unlock(hdev);
1493 return;
1494 }
1495 }
1496
1497 memcpy(conn->dev_class, ev->dev_class, 3);
1498 conn->state = BT_CONNECT;
1499
1500 hci_dev_unlock(hdev);
1501
1502 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1503 struct hci_cp_accept_conn_req cp;
1504
1505 bacpy(&cp.bdaddr, &ev->bdaddr);
1506
1507 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1508 cp.role = 0x00; /* Become master */
1509 else
1510 cp.role = 0x01; /* Remain slave */
1511
1512 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1513 sizeof(cp), &cp);
1514 } else {
1515 struct hci_cp_accept_sync_conn_req cp;
1516
1517 bacpy(&cp.bdaddr, &ev->bdaddr);
1518 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1519
1520 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1521 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1522 cp.max_latency = cpu_to_le16(0xffff);
1523 cp.content_format = cpu_to_le16(hdev->voice_setting);
1524 cp.retrans_effort = 0xff;
1525
1526 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1527 sizeof(cp), &cp);
1528 }
1529 } else {
1530 /* Connection rejected */
1531 struct hci_cp_reject_conn_req cp;
1532
1533 bacpy(&cp.bdaddr, &ev->bdaddr);
1534 cp.reason = 0x0f;
1535 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1536 }
1537 }
1538
1539 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1540 {
1541 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1542 struct hci_conn *conn;
1543
1544 BT_DBG("%s status %d", hdev->name, ev->status);
1545
1546 if (ev->status) {
1547 mgmt_disconnect_failed(hdev->id);
1548 return;
1549 }
1550
1551 hci_dev_lock(hdev);
1552
1553 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1554 if (!conn)
1555 goto unlock;
1556
1557 conn->state = BT_CLOSED;
1558
1559 if (conn->type == ACL_LINK || conn->type == LE_LINK)
1560 mgmt_disconnected(hdev->id, &conn->dst);
1561
1562 hci_proto_disconn_cfm(conn, ev->reason);
1563 hci_conn_del(conn);
1564
1565 unlock:
1566 hci_dev_unlock(hdev);
1567 }
1568
1569 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1570 {
1571 struct hci_ev_auth_complete *ev = (void *) skb->data;
1572 struct hci_conn *conn;
1573
1574 BT_DBG("%s status %d", hdev->name, ev->status);
1575
1576 hci_dev_lock(hdev);
1577
1578 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1579 if (!conn)
1580 goto unlock;
1581
1582 if (!ev->status) {
1583 if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) &&
1584 test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) {
1585 BT_INFO("re-auth of legacy device is not possible.");
1586 } else {
1587 conn->link_mode |= HCI_LM_AUTH;
1588 conn->sec_level = conn->pending_sec_level;
1589 }
1590 } else {
1591 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1592 }
1593
1594 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1595 clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
1596
1597 if (conn->state == BT_CONFIG) {
1598 if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) {
1599 struct hci_cp_set_conn_encrypt cp;
1600 cp.handle = ev->handle;
1601 cp.encrypt = 0x01;
1602 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1603 &cp);
1604 } else {
1605 conn->state = BT_CONNECTED;
1606 hci_proto_connect_cfm(conn, ev->status);
1607 hci_conn_put(conn);
1608 }
1609 } else {
1610 hci_auth_cfm(conn, ev->status);
1611
1612 hci_conn_hold(conn);
1613 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1614 hci_conn_put(conn);
1615 }
1616
1617 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1618 if (!ev->status) {
1619 struct hci_cp_set_conn_encrypt cp;
1620 cp.handle = ev->handle;
1621 cp.encrypt = 0x01;
1622 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1623 &cp);
1624 } else {
1625 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1626 hci_encrypt_cfm(conn, ev->status, 0x00);
1627 }
1628 }
1629
1630 unlock:
1631 hci_dev_unlock(hdev);
1632 }
1633
1634 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1635 {
1636 struct hci_ev_remote_name *ev = (void *) skb->data;
1637 struct hci_conn *conn;
1638
1639 BT_DBG("%s", hdev->name);
1640
1641 hci_conn_check_pending(hdev);
1642
1643 hci_dev_lock(hdev);
1644
1645 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1646 mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
1647
1648 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1649 if (!conn)
1650 goto unlock;
1651
1652 if (!hci_outgoing_auth_needed(hdev, conn))
1653 goto unlock;
1654
1655 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1656 struct hci_cp_auth_requested cp;
1657 cp.handle = __cpu_to_le16(conn->handle);
1658 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1659 }
1660
1661 unlock:
1662 hci_dev_unlock(hdev);
1663 }
1664
1665 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1666 {
1667 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1668 struct hci_conn *conn;
1669
1670 BT_DBG("%s status %d", hdev->name, ev->status);
1671
1672 hci_dev_lock(hdev);
1673
1674 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1675 if (conn) {
1676 if (!ev->status) {
1677 if (ev->encrypt) {
1678 /* Encryption implies authentication */
1679 conn->link_mode |= HCI_LM_AUTH;
1680 conn->link_mode |= HCI_LM_ENCRYPT;
1681 conn->sec_level = conn->pending_sec_level;
1682 } else
1683 conn->link_mode &= ~HCI_LM_ENCRYPT;
1684 }
1685
1686 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1687
1688 if (conn->state == BT_CONFIG) {
1689 if (!ev->status)
1690 conn->state = BT_CONNECTED;
1691
1692 hci_proto_connect_cfm(conn, ev->status);
1693 hci_conn_put(conn);
1694 } else
1695 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1696 }
1697
1698 hci_dev_unlock(hdev);
1699 }
1700
1701 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1702 {
1703 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1704 struct hci_conn *conn;
1705
1706 BT_DBG("%s status %d", hdev->name, ev->status);
1707
1708 hci_dev_lock(hdev);
1709
1710 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1711 if (conn) {
1712 if (!ev->status)
1713 conn->link_mode |= HCI_LM_SECURE;
1714
1715 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1716
1717 hci_key_change_cfm(conn, ev->status);
1718 }
1719
1720 hci_dev_unlock(hdev);
1721 }
1722
1723 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1724 {
1725 struct hci_ev_remote_features *ev = (void *) skb->data;
1726 struct hci_conn *conn;
1727
1728 BT_DBG("%s status %d", hdev->name, ev->status);
1729
1730 hci_dev_lock(hdev);
1731
1732 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1733 if (!conn)
1734 goto unlock;
1735
1736 if (!ev->status)
1737 memcpy(conn->features, ev->features, 8);
1738
1739 if (conn->state != BT_CONFIG)
1740 goto unlock;
1741
1742 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
1743 struct hci_cp_read_remote_ext_features cp;
1744 cp.handle = ev->handle;
1745 cp.page = 0x01;
1746 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
1747 sizeof(cp), &cp);
1748 goto unlock;
1749 }
1750
1751 if (!ev->status) {
1752 struct hci_cp_remote_name_req cp;
1753 memset(&cp, 0, sizeof(cp));
1754 bacpy(&cp.bdaddr, &conn->dst);
1755 cp.pscan_rep_mode = 0x02;
1756 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1757 }
1758
1759 if (!hci_outgoing_auth_needed(hdev, conn)) {
1760 conn->state = BT_CONNECTED;
1761 hci_proto_connect_cfm(conn, ev->status);
1762 hci_conn_put(conn);
1763 }
1764
1765 unlock:
1766 hci_dev_unlock(hdev);
1767 }
1768
1769 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
1770 {
1771 BT_DBG("%s", hdev->name);
1772 }
1773
1774 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1775 {
1776 BT_DBG("%s", hdev->name);
1777 }
1778
1779 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1780 {
1781 struct hci_ev_cmd_complete *ev = (void *) skb->data;
1782 __u16 opcode;
1783
1784 skb_pull(skb, sizeof(*ev));
1785
1786 opcode = __le16_to_cpu(ev->opcode);
1787
1788 switch (opcode) {
1789 case HCI_OP_INQUIRY_CANCEL:
1790 hci_cc_inquiry_cancel(hdev, skb);
1791 break;
1792
1793 case HCI_OP_EXIT_PERIODIC_INQ:
1794 hci_cc_exit_periodic_inq(hdev, skb);
1795 break;
1796
1797 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
1798 hci_cc_remote_name_req_cancel(hdev, skb);
1799 break;
1800
1801 case HCI_OP_ROLE_DISCOVERY:
1802 hci_cc_role_discovery(hdev, skb);
1803 break;
1804
1805 case HCI_OP_READ_LINK_POLICY:
1806 hci_cc_read_link_policy(hdev, skb);
1807 break;
1808
1809 case HCI_OP_WRITE_LINK_POLICY:
1810 hci_cc_write_link_policy(hdev, skb);
1811 break;
1812
1813 case HCI_OP_READ_DEF_LINK_POLICY:
1814 hci_cc_read_def_link_policy(hdev, skb);
1815 break;
1816
1817 case HCI_OP_WRITE_DEF_LINK_POLICY:
1818 hci_cc_write_def_link_policy(hdev, skb);
1819 break;
1820
1821 case HCI_OP_RESET:
1822 hci_cc_reset(hdev, skb);
1823 break;
1824
1825 case HCI_OP_WRITE_LOCAL_NAME:
1826 hci_cc_write_local_name(hdev, skb);
1827 break;
1828
1829 case HCI_OP_READ_LOCAL_NAME:
1830 hci_cc_read_local_name(hdev, skb);
1831 break;
1832
1833 case HCI_OP_WRITE_AUTH_ENABLE:
1834 hci_cc_write_auth_enable(hdev, skb);
1835 break;
1836
1837 case HCI_OP_WRITE_ENCRYPT_MODE:
1838 hci_cc_write_encrypt_mode(hdev, skb);
1839 break;
1840
1841 case HCI_OP_WRITE_SCAN_ENABLE:
1842 hci_cc_write_scan_enable(hdev, skb);
1843 break;
1844
1845 case HCI_OP_READ_CLASS_OF_DEV:
1846 hci_cc_read_class_of_dev(hdev, skb);
1847 break;
1848
1849 case HCI_OP_WRITE_CLASS_OF_DEV:
1850 hci_cc_write_class_of_dev(hdev, skb);
1851 break;
1852
1853 case HCI_OP_READ_VOICE_SETTING:
1854 hci_cc_read_voice_setting(hdev, skb);
1855 break;
1856
1857 case HCI_OP_WRITE_VOICE_SETTING:
1858 hci_cc_write_voice_setting(hdev, skb);
1859 break;
1860
1861 case HCI_OP_HOST_BUFFER_SIZE:
1862 hci_cc_host_buffer_size(hdev, skb);
1863 break;
1864
1865 case HCI_OP_READ_SSP_MODE:
1866 hci_cc_read_ssp_mode(hdev, skb);
1867 break;
1868
1869 case HCI_OP_WRITE_SSP_MODE:
1870 hci_cc_write_ssp_mode(hdev, skb);
1871 break;
1872
1873 case HCI_OP_READ_LOCAL_VERSION:
1874 hci_cc_read_local_version(hdev, skb);
1875 break;
1876
1877 case HCI_OP_READ_LOCAL_COMMANDS:
1878 hci_cc_read_local_commands(hdev, skb);
1879 break;
1880
1881 case HCI_OP_READ_LOCAL_FEATURES:
1882 hci_cc_read_local_features(hdev, skb);
1883 break;
1884
1885 case HCI_OP_READ_LOCAL_EXT_FEATURES:
1886 hci_cc_read_local_ext_features(hdev, skb);
1887 break;
1888
1889 case HCI_OP_READ_BUFFER_SIZE:
1890 hci_cc_read_buffer_size(hdev, skb);
1891 break;
1892
1893 case HCI_OP_READ_BD_ADDR:
1894 hci_cc_read_bd_addr(hdev, skb);
1895 break;
1896
1897 case HCI_OP_WRITE_CA_TIMEOUT:
1898 hci_cc_write_ca_timeout(hdev, skb);
1899 break;
1900
1901 case HCI_OP_DELETE_STORED_LINK_KEY:
1902 hci_cc_delete_stored_link_key(hdev, skb);
1903 break;
1904
1905 case HCI_OP_SET_EVENT_MASK:
1906 hci_cc_set_event_mask(hdev, skb);
1907 break;
1908
1909 case HCI_OP_WRITE_INQUIRY_MODE:
1910 hci_cc_write_inquiry_mode(hdev, skb);
1911 break;
1912
1913 case HCI_OP_READ_INQ_RSP_TX_POWER:
1914 hci_cc_read_inq_rsp_tx_power(hdev, skb);
1915 break;
1916
1917 case HCI_OP_SET_EVENT_FLT:
1918 hci_cc_set_event_flt(hdev, skb);
1919 break;
1920
1921 case HCI_OP_PIN_CODE_REPLY:
1922 hci_cc_pin_code_reply(hdev, skb);
1923 break;
1924
1925 case HCI_OP_PIN_CODE_NEG_REPLY:
1926 hci_cc_pin_code_neg_reply(hdev, skb);
1927 break;
1928
1929 case HCI_OP_READ_LOCAL_OOB_DATA:
1930 hci_cc_read_local_oob_data_reply(hdev, skb);
1931 break;
1932
1933 case HCI_OP_LE_READ_BUFFER_SIZE:
1934 hci_cc_le_read_buffer_size(hdev, skb);
1935 break;
1936
1937 case HCI_OP_USER_CONFIRM_REPLY:
1938 hci_cc_user_confirm_reply(hdev, skb);
1939 break;
1940
1941 case HCI_OP_USER_CONFIRM_NEG_REPLY:
1942 hci_cc_user_confirm_neg_reply(hdev, skb);
1943 break;
1944
1945 case HCI_OP_LE_SET_SCAN_ENABLE:
1946 hci_cc_le_set_scan_enable(hdev, skb);
1947 break;
1948
1949 case HCI_OP_LE_LTK_REPLY:
1950 hci_cc_le_ltk_reply(hdev, skb);
1951 break;
1952
1953 case HCI_OP_LE_LTK_NEG_REPLY:
1954 hci_cc_le_ltk_neg_reply(hdev, skb);
1955 break;
1956
1957 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
1958 hci_cc_write_le_host_supported(hdev, skb);
1959 break;
1960
1961 default:
1962 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1963 break;
1964 }
1965
1966 if (ev->opcode != HCI_OP_NOP)
1967 del_timer(&hdev->cmd_timer);
1968
1969 if (ev->ncmd) {
1970 atomic_set(&hdev->cmd_cnt, 1);
1971 if (!skb_queue_empty(&hdev->cmd_q))
1972 tasklet_schedule(&hdev->cmd_task);
1973 }
1974 }
1975
1976 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1977 {
1978 struct hci_ev_cmd_status *ev = (void *) skb->data;
1979 __u16 opcode;
1980
1981 skb_pull(skb, sizeof(*ev));
1982
1983 opcode = __le16_to_cpu(ev->opcode);
1984
1985 switch (opcode) {
1986 case HCI_OP_INQUIRY:
1987 hci_cs_inquiry(hdev, ev->status);
1988 break;
1989
1990 case HCI_OP_CREATE_CONN:
1991 hci_cs_create_conn(hdev, ev->status);
1992 break;
1993
1994 case HCI_OP_ADD_SCO:
1995 hci_cs_add_sco(hdev, ev->status);
1996 break;
1997
1998 case HCI_OP_AUTH_REQUESTED:
1999 hci_cs_auth_requested(hdev, ev->status);
2000 break;
2001
2002 case HCI_OP_SET_CONN_ENCRYPT:
2003 hci_cs_set_conn_encrypt(hdev, ev->status);
2004 break;
2005
2006 case HCI_OP_REMOTE_NAME_REQ:
2007 hci_cs_remote_name_req(hdev, ev->status);
2008 break;
2009
2010 case HCI_OP_READ_REMOTE_FEATURES:
2011 hci_cs_read_remote_features(hdev, ev->status);
2012 break;
2013
2014 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2015 hci_cs_read_remote_ext_features(hdev, ev->status);
2016 break;
2017
2018 case HCI_OP_SETUP_SYNC_CONN:
2019 hci_cs_setup_sync_conn(hdev, ev->status);
2020 break;
2021
2022 case HCI_OP_SNIFF_MODE:
2023 hci_cs_sniff_mode(hdev, ev->status);
2024 break;
2025
2026 case HCI_OP_EXIT_SNIFF_MODE:
2027 hci_cs_exit_sniff_mode(hdev, ev->status);
2028 break;
2029
2030 case HCI_OP_DISCONNECT:
2031 if (ev->status != 0)
2032 mgmt_disconnect_failed(hdev->id);
2033 break;
2034
2035 case HCI_OP_LE_CREATE_CONN:
2036 hci_cs_le_create_conn(hdev, ev->status);
2037 break;
2038
2039 case HCI_OP_LE_START_ENC:
2040 hci_cs_le_start_enc(hdev, ev->status);
2041 break;
2042
2043 default:
2044 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2045 break;
2046 }
2047
2048 if (ev->opcode != HCI_OP_NOP)
2049 del_timer(&hdev->cmd_timer);
2050
2051 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2052 atomic_set(&hdev->cmd_cnt, 1);
2053 if (!skb_queue_empty(&hdev->cmd_q))
2054 tasklet_schedule(&hdev->cmd_task);
2055 }
2056 }
2057
2058 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2059 {
2060 struct hci_ev_role_change *ev = (void *) skb->data;
2061 struct hci_conn *conn;
2062
2063 BT_DBG("%s status %d", hdev->name, ev->status);
2064
2065 hci_dev_lock(hdev);
2066
2067 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2068 if (conn) {
2069 if (!ev->status) {
2070 if (ev->role)
2071 conn->link_mode &= ~HCI_LM_MASTER;
2072 else
2073 conn->link_mode |= HCI_LM_MASTER;
2074 }
2075
2076 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
2077
2078 hci_role_switch_cfm(conn, ev->status, ev->role);
2079 }
2080
2081 hci_dev_unlock(hdev);
2082 }
2083
2084 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2085 {
2086 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2087 __le16 *ptr;
2088 int i;
2089
2090 skb_pull(skb, sizeof(*ev));
2091
2092 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2093
2094 if (skb->len < ev->num_hndl * 4) {
2095 BT_DBG("%s bad parameters", hdev->name);
2096 return;
2097 }
2098
2099 tasklet_disable(&hdev->tx_task);
2100
2101 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
2102 struct hci_conn *conn;
2103 __u16 handle, count;
2104
2105 handle = get_unaligned_le16(ptr++);
2106 count = get_unaligned_le16(ptr++);
2107
2108 conn = hci_conn_hash_lookup_handle(hdev, handle);
2109 if (conn) {
2110 conn->sent -= count;
2111
2112 if (conn->type == ACL_LINK) {
2113 hdev->acl_cnt += count;
2114 if (hdev->acl_cnt > hdev->acl_pkts)
2115 hdev->acl_cnt = hdev->acl_pkts;
2116 } else if (conn->type == LE_LINK) {
2117 if (hdev->le_pkts) {
2118 hdev->le_cnt += count;
2119 if (hdev->le_cnt > hdev->le_pkts)
2120 hdev->le_cnt = hdev->le_pkts;
2121 } else {
2122 hdev->acl_cnt += count;
2123 if (hdev->acl_cnt > hdev->acl_pkts)
2124 hdev->acl_cnt = hdev->acl_pkts;
2125 }
2126 } else {
2127 hdev->sco_cnt += count;
2128 if (hdev->sco_cnt > hdev->sco_pkts)
2129 hdev->sco_cnt = hdev->sco_pkts;
2130 }
2131 }
2132 }
2133
2134 tasklet_schedule(&hdev->tx_task);
2135
2136 tasklet_enable(&hdev->tx_task);
2137 }
2138
2139 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2140 {
2141 struct hci_ev_mode_change *ev = (void *) skb->data;
2142 struct hci_conn *conn;
2143
2144 BT_DBG("%s status %d", hdev->name, ev->status);
2145
2146 hci_dev_lock(hdev);
2147
2148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2149 if (conn) {
2150 conn->mode = ev->mode;
2151 conn->interval = __le16_to_cpu(ev->interval);
2152
2153 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
2154 if (conn->mode == HCI_CM_ACTIVE)
2155 conn->power_save = 1;
2156 else
2157 conn->power_save = 0;
2158 }
2159
2160 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
2161 hci_sco_setup(conn, ev->status);
2162 }
2163
2164 hci_dev_unlock(hdev);
2165 }
2166
2167 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2168 {
2169 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2170 struct hci_conn *conn;
2171
2172 BT_DBG("%s", hdev->name);
2173
2174 hci_dev_lock(hdev);
2175
2176 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2177 if (!conn)
2178 goto unlock;
2179
2180 if (conn->state == BT_CONNECTED) {
2181 hci_conn_hold(conn);
2182 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2183 hci_conn_put(conn);
2184 }
2185
2186 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
2187 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2188 sizeof(ev->bdaddr), &ev->bdaddr);
2189 else if (test_bit(HCI_MGMT, &hdev->flags)) {
2190 u8 secure;
2191
2192 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2193 secure = 1;
2194 else
2195 secure = 0;
2196
2197 mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
2198 }
2199
2200 unlock:
2201 hci_dev_unlock(hdev);
2202 }
2203
2204 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2205 {
2206 struct hci_ev_link_key_req *ev = (void *) skb->data;
2207 struct hci_cp_link_key_reply cp;
2208 struct hci_conn *conn;
2209 struct link_key *key;
2210
2211 BT_DBG("%s", hdev->name);
2212
2213 if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
2214 return;
2215
2216 hci_dev_lock(hdev);
2217
2218 key = hci_find_link_key(hdev, &ev->bdaddr);
2219 if (!key) {
2220 BT_DBG("%s link key not found for %s", hdev->name,
2221 batostr(&ev->bdaddr));
2222 goto not_found;
2223 }
2224
2225 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2226 batostr(&ev->bdaddr));
2227
2228 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) &&
2229 key->type == HCI_LK_DEBUG_COMBINATION) {
2230 BT_DBG("%s ignoring debug key", hdev->name);
2231 goto not_found;
2232 }
2233
2234 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2235 if (conn) {
2236 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2237 conn->auth_type != 0xff &&
2238 (conn->auth_type & 0x01)) {
2239 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2240 goto not_found;
2241 }
2242
2243 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2244 conn->pending_sec_level == BT_SECURITY_HIGH) {
2245 BT_DBG("%s ignoring key unauthenticated for high \
2246 security", hdev->name);
2247 goto not_found;
2248 }
2249
2250 conn->key_type = key->type;
2251 conn->pin_length = key->pin_len;
2252 }
2253
2254 bacpy(&cp.bdaddr, &ev->bdaddr);
2255 memcpy(cp.link_key, key->val, 16);
2256
2257 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2258
2259 hci_dev_unlock(hdev);
2260
2261 return;
2262
2263 not_found:
2264 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2265 hci_dev_unlock(hdev);
2266 }
2267
2268 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2269 {
2270 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2271 struct hci_conn *conn;
2272 u8 pin_len = 0;
2273
2274 BT_DBG("%s", hdev->name);
2275
2276 hci_dev_lock(hdev);
2277
2278 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2279 if (conn) {
2280 hci_conn_hold(conn);
2281 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2282 pin_len = conn->pin_length;
2283
2284 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2285 conn->key_type = ev->key_type;
2286
2287 hci_conn_put(conn);
2288 }
2289
2290 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2291 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2292 ev->key_type, pin_len);
2293
2294 hci_dev_unlock(hdev);
2295 }
2296
2297 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2298 {
2299 struct hci_ev_clock_offset *ev = (void *) skb->data;
2300 struct hci_conn *conn;
2301
2302 BT_DBG("%s status %d", hdev->name, ev->status);
2303
2304 hci_dev_lock(hdev);
2305
2306 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2307 if (conn && !ev->status) {
2308 struct inquiry_entry *ie;
2309
2310 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2311 if (ie) {
2312 ie->data.clock_offset = ev->clock_offset;
2313 ie->timestamp = jiffies;
2314 }
2315 }
2316
2317 hci_dev_unlock(hdev);
2318 }
2319
2320 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2321 {
2322 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2323 struct hci_conn *conn;
2324
2325 BT_DBG("%s status %d", hdev->name, ev->status);
2326
2327 hci_dev_lock(hdev);
2328
2329 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2330 if (conn && !ev->status)
2331 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2332
2333 hci_dev_unlock(hdev);
2334 }
2335
2336 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2337 {
2338 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2339 struct inquiry_entry *ie;
2340
2341 BT_DBG("%s", hdev->name);
2342
2343 hci_dev_lock(hdev);
2344
2345 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2346 if (ie) {
2347 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2348 ie->timestamp = jiffies;
2349 }
2350
2351 hci_dev_unlock(hdev);
2352 }
2353
2354 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2355 {
2356 struct inquiry_data data;
2357 int num_rsp = *((__u8 *) skb->data);
2358
2359 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2360
2361 if (!num_rsp)
2362 return;
2363
2364 hci_dev_lock(hdev);
2365
2366 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2367
2368 if (test_bit(HCI_MGMT, &hdev->flags))
2369 mgmt_discovering(hdev->id, 1);
2370 }
2371
2372 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2373 struct inquiry_info_with_rssi_and_pscan_mode *info;
2374 info = (void *) (skb->data + 1);
2375
2376 for (; num_rsp; num_rsp--, info++) {
2377 bacpy(&data.bdaddr, &info->bdaddr);
2378 data.pscan_rep_mode = info->pscan_rep_mode;
2379 data.pscan_period_mode = info->pscan_period_mode;
2380 data.pscan_mode = info->pscan_mode;
2381 memcpy(data.dev_class, info->dev_class, 3);
2382 data.clock_offset = info->clock_offset;
2383 data.rssi = info->rssi;
2384 data.ssp_mode = 0x00;
2385 hci_inquiry_cache_update(hdev, &data);
2386 mgmt_device_found(hdev->id, &info->bdaddr,
2387 info->dev_class, info->rssi,
2388 NULL);
2389 }
2390 } else {
2391 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2392
2393 for (; num_rsp; num_rsp--, info++) {
2394 bacpy(&data.bdaddr, &info->bdaddr);
2395 data.pscan_rep_mode = info->pscan_rep_mode;
2396 data.pscan_period_mode = info->pscan_period_mode;
2397 data.pscan_mode = 0x00;
2398 memcpy(data.dev_class, info->dev_class, 3);
2399 data.clock_offset = info->clock_offset;
2400 data.rssi = info->rssi;
2401 data.ssp_mode = 0x00;
2402 hci_inquiry_cache_update(hdev, &data);
2403 mgmt_device_found(hdev->id, &info->bdaddr,
2404 info->dev_class, info->rssi,
2405 NULL);
2406 }
2407 }
2408
2409 hci_dev_unlock(hdev);
2410 }
2411
2412 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2413 {
2414 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2415 struct hci_conn *conn;
2416
2417 BT_DBG("%s", hdev->name);
2418
2419 hci_dev_lock(hdev);
2420
2421 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2422 if (!conn)
2423 goto unlock;
2424
2425 if (!ev->status && ev->page == 0x01) {
2426 struct inquiry_entry *ie;
2427
2428 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2429 if (ie)
2430 ie->data.ssp_mode = (ev->features[0] & 0x01);
2431
2432 conn->ssp_mode = (ev->features[0] & 0x01);
2433 }
2434
2435 if (conn->state != BT_CONFIG)
2436 goto unlock;
2437
2438 if (!ev->status) {
2439 struct hci_cp_remote_name_req cp;
2440 memset(&cp, 0, sizeof(cp));
2441 bacpy(&cp.bdaddr, &conn->dst);
2442 cp.pscan_rep_mode = 0x02;
2443 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2444 }
2445
2446 if (!hci_outgoing_auth_needed(hdev, conn)) {
2447 conn->state = BT_CONNECTED;
2448 hci_proto_connect_cfm(conn, ev->status);
2449 hci_conn_put(conn);
2450 }
2451
2452 unlock:
2453 hci_dev_unlock(hdev);
2454 }
2455
2456 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2457 {
2458 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2459 struct hci_conn *conn;
2460
2461 BT_DBG("%s status %d", hdev->name, ev->status);
2462
2463 hci_dev_lock(hdev);
2464
2465 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2466 if (!conn) {
2467 if (ev->link_type == ESCO_LINK)
2468 goto unlock;
2469
2470 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2471 if (!conn)
2472 goto unlock;
2473
2474 conn->type = SCO_LINK;
2475 }
2476
2477 switch (ev->status) {
2478 case 0x00:
2479 conn->handle = __le16_to_cpu(ev->handle);
2480 conn->state = BT_CONNECTED;
2481
2482 hci_conn_hold_device(conn);
2483 hci_conn_add_sysfs(conn);
2484 break;
2485
2486 case 0x11: /* Unsupported Feature or Parameter Value */
2487 case 0x1c: /* SCO interval rejected */
2488 case 0x1a: /* Unsupported Remote Feature */
2489 case 0x1f: /* Unspecified error */
2490 if (conn->out && conn->attempt < 2) {
2491 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2492 (hdev->esco_type & EDR_ESCO_MASK);
2493 hci_setup_sync(conn, conn->link->handle);
2494 goto unlock;
2495 }
2496 /* fall through */
2497
2498 default:
2499 conn->state = BT_CLOSED;
2500 break;
2501 }
2502
2503 hci_proto_connect_cfm(conn, ev->status);
2504 if (ev->status)
2505 hci_conn_del(conn);
2506
2507 unlock:
2508 hci_dev_unlock(hdev);
2509 }
2510
2511 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2512 {
2513 BT_DBG("%s", hdev->name);
2514 }
2515
2516 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2517 {
2518 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2519
2520 BT_DBG("%s status %d", hdev->name, ev->status);
2521 }
2522
2523 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2524 {
2525 struct inquiry_data data;
2526 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2527 int num_rsp = *((__u8 *) skb->data);
2528
2529 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2530
2531 if (!num_rsp)
2532 return;
2533
2534 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2535
2536 if (test_bit(HCI_MGMT, &hdev->flags))
2537 mgmt_discovering(hdev->id, 1);
2538 }
2539
2540 hci_dev_lock(hdev);
2541
2542 for (; num_rsp; num_rsp--, info++) {
2543 bacpy(&data.bdaddr, &info->bdaddr);
2544 data.pscan_rep_mode = info->pscan_rep_mode;
2545 data.pscan_period_mode = info->pscan_period_mode;
2546 data.pscan_mode = 0x00;
2547 memcpy(data.dev_class, info->dev_class, 3);
2548 data.clock_offset = info->clock_offset;
2549 data.rssi = info->rssi;
2550 data.ssp_mode = 0x01;
2551 hci_inquiry_cache_update(hdev, &data);
2552 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class,
2553 info->rssi, info->data);
2554 }
2555
2556 hci_dev_unlock(hdev);
2557 }
2558
2559 static inline u8 hci_get_auth_req(struct hci_conn *conn)
2560 {
2561 /* If remote requests dedicated bonding follow that lead */
2562 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2563 /* If both remote and local IO capabilities allow MITM
2564 * protection then require it, otherwise don't */
2565 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2566 return 0x02;
2567 else
2568 return 0x03;
2569 }
2570
2571 /* If remote requests no-bonding follow that lead */
2572 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2573 return conn->remote_auth | (conn->auth_type & 0x01);
2574
2575 return conn->auth_type;
2576 }
2577
2578 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2579 {
2580 struct hci_ev_io_capa_request *ev = (void *) skb->data;
2581 struct hci_conn *conn;
2582
2583 BT_DBG("%s", hdev->name);
2584
2585 hci_dev_lock(hdev);
2586
2587 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2588 if (!conn)
2589 goto unlock;
2590
2591 hci_conn_hold(conn);
2592
2593 if (!test_bit(HCI_MGMT, &hdev->flags))
2594 goto unlock;
2595
2596 if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2597 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2598 struct hci_cp_io_capability_reply cp;
2599
2600 bacpy(&cp.bdaddr, &ev->bdaddr);
2601 cp.capability = conn->io_capability;
2602 conn->auth_type = hci_get_auth_req(conn);
2603 cp.authentication = conn->auth_type;
2604
2605 if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
2606 hci_find_remote_oob_data(hdev, &conn->dst))
2607 cp.oob_data = 0x01;
2608 else
2609 cp.oob_data = 0x00;
2610
2611 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2612 sizeof(cp), &cp);
2613 } else {
2614 struct hci_cp_io_capability_neg_reply cp;
2615
2616 bacpy(&cp.bdaddr, &ev->bdaddr);
2617 cp.reason = 0x18; /* Pairing not allowed */
2618
2619 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2620 sizeof(cp), &cp);
2621 }
2622
2623 unlock:
2624 hci_dev_unlock(hdev);
2625 }
2626
2627 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2628 {
2629 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2630 struct hci_conn *conn;
2631
2632 BT_DBG("%s", hdev->name);
2633
2634 hci_dev_lock(hdev);
2635
2636 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2637 if (!conn)
2638 goto unlock;
2639
2640 conn->remote_cap = ev->capability;
2641 conn->remote_oob = ev->oob_data;
2642 conn->remote_auth = ev->authentication;
2643
2644 unlock:
2645 hci_dev_unlock(hdev);
2646 }
2647
2648 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2649 struct sk_buff *skb)
2650 {
2651 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2652 int loc_mitm, rem_mitm, confirm_hint = 0;
2653 struct hci_conn *conn;
2654
2655 BT_DBG("%s", hdev->name);
2656
2657 hci_dev_lock(hdev);
2658
2659 if (!test_bit(HCI_MGMT, &hdev->flags))
2660 goto unlock;
2661
2662 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2663 if (!conn)
2664 goto unlock;
2665
2666 loc_mitm = (conn->auth_type & 0x01);
2667 rem_mitm = (conn->remote_auth & 0x01);
2668
2669 /* If we require MITM but the remote device can't provide that
2670 * (it has NoInputNoOutput) then reject the confirmation
2671 * request. The only exception is when we're dedicated bonding
2672 * initiators (connect_cfm_cb set) since then we always have the MITM
2673 * bit set. */
2674 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
2675 BT_DBG("Rejecting request: remote device can't provide MITM");
2676 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
2677 sizeof(ev->bdaddr), &ev->bdaddr);
2678 goto unlock;
2679 }
2680
2681 /* If no side requires MITM protection; auto-accept */
2682 if ((!loc_mitm || conn->remote_cap == 0x03) &&
2683 (!rem_mitm || conn->io_capability == 0x03)) {
2684
2685 /* If we're not the initiators request authorization to
2686 * proceed from user space (mgmt_user_confirm with
2687 * confirm_hint set to 1). */
2688 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
2689 BT_DBG("Confirming auto-accept as acceptor");
2690 confirm_hint = 1;
2691 goto confirm;
2692 }
2693
2694 BT_DBG("Auto-accept of user confirmation with %ums delay",
2695 hdev->auto_accept_delay);
2696
2697 if (hdev->auto_accept_delay > 0) {
2698 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
2699 mod_timer(&conn->auto_accept_timer, jiffies + delay);
2700 goto unlock;
2701 }
2702
2703 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
2704 sizeof(ev->bdaddr), &ev->bdaddr);
2705 goto unlock;
2706 }
2707
2708 confirm:
2709 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey,
2710 confirm_hint);
2711
2712 unlock:
2713 hci_dev_unlock(hdev);
2714 }
2715
2716 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2717 {
2718 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
2719 struct hci_conn *conn;
2720
2721 BT_DBG("%s", hdev->name);
2722
2723 hci_dev_lock(hdev);
2724
2725 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2726 if (!conn)
2727 goto unlock;
2728
2729 /* To avoid duplicate auth_failed events to user space we check
2730 * the HCI_CONN_AUTH_PEND flag which will be set if we
2731 * initiated the authentication. A traditional auth_complete
2732 * event gets always produced as initiator and is also mapped to
2733 * the mgmt_auth_failed event */
2734 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
2735 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
2736
2737 hci_conn_put(conn);
2738
2739 unlock:
2740 hci_dev_unlock(hdev);
2741 }
2742
2743 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2744 {
2745 struct hci_ev_remote_host_features *ev = (void *) skb->data;
2746 struct inquiry_entry *ie;
2747
2748 BT_DBG("%s", hdev->name);
2749
2750 hci_dev_lock(hdev);
2751
2752 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2753 if (ie)
2754 ie->data.ssp_mode = (ev->features[0] & 0x01);
2755
2756 hci_dev_unlock(hdev);
2757 }
2758
2759 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2760 struct sk_buff *skb)
2761 {
2762 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
2763 struct oob_data *data;
2764
2765 BT_DBG("%s", hdev->name);
2766
2767 hci_dev_lock(hdev);
2768
2769 if (!test_bit(HCI_MGMT, &hdev->flags))
2770 goto unlock;
2771
2772 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
2773 if (data) {
2774 struct hci_cp_remote_oob_data_reply cp;
2775
2776 bacpy(&cp.bdaddr, &ev->bdaddr);
2777 memcpy(cp.hash, data->hash, sizeof(cp.hash));
2778 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
2779
2780 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
2781 &cp);
2782 } else {
2783 struct hci_cp_remote_oob_data_neg_reply cp;
2784
2785 bacpy(&cp.bdaddr, &ev->bdaddr);
2786 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
2787 &cp);
2788 }
2789
2790 unlock:
2791 hci_dev_unlock(hdev);
2792 }
2793
2794 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2795 {
2796 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
2797 struct hci_conn *conn;
2798
2799 BT_DBG("%s status %d", hdev->name, ev->status);
2800
2801 hci_dev_lock(hdev);
2802
2803 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
2804 if (!conn) {
2805 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
2806 if (!conn) {
2807 BT_ERR("No memory for new connection");
2808 hci_dev_unlock(hdev);
2809 return;
2810 }
2811
2812 conn->dst_type = ev->bdaddr_type;
2813 }
2814
2815 if (ev->status) {
2816 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
2817 hci_proto_connect_cfm(conn, ev->status);
2818 conn->state = BT_CLOSED;
2819 hci_conn_del(conn);
2820 goto unlock;
2821 }
2822
2823 mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
2824
2825 conn->sec_level = BT_SECURITY_LOW;
2826 conn->handle = __le16_to_cpu(ev->handle);
2827 conn->state = BT_CONNECTED;
2828
2829 hci_conn_hold_device(conn);
2830 hci_conn_add_sysfs(conn);
2831
2832 hci_proto_connect_cfm(conn, ev->status);
2833
2834 unlock:
2835 hci_dev_unlock(hdev);
2836 }
2837
2838 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
2839 struct sk_buff *skb)
2840 {
2841 u8 num_reports = skb->data[0];
2842 void *ptr = &skb->data[1];
2843
2844 hci_dev_lock(hdev);
2845
2846 while (num_reports--) {
2847 struct hci_ev_le_advertising_info *ev = ptr;
2848
2849 hci_add_adv_entry(hdev, ev);
2850
2851 ptr += sizeof(*ev) + ev->length + 1;
2852 }
2853
2854 hci_dev_unlock(hdev);
2855 }
2856
2857 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
2858 struct sk_buff *skb)
2859 {
2860 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
2861 struct hci_cp_le_ltk_reply cp;
2862 struct hci_cp_le_ltk_neg_reply neg;
2863 struct hci_conn *conn;
2864 struct link_key *ltk;
2865
2866 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
2867
2868 hci_dev_lock(hdev);
2869
2870 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2871 if (conn == NULL)
2872 goto not_found;
2873
2874 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
2875 if (ltk == NULL)
2876 goto not_found;
2877
2878 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
2879 cp.handle = cpu_to_le16(conn->handle);
2880 conn->pin_length = ltk->pin_len;
2881
2882 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
2883
2884 hci_dev_unlock(hdev);
2885
2886 return;
2887
2888 not_found:
2889 neg.handle = ev->handle;
2890 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
2891 hci_dev_unlock(hdev);
2892 }
2893
2894 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2895 {
2896 struct hci_ev_le_meta *le_ev = (void *) skb->data;
2897
2898 skb_pull(skb, sizeof(*le_ev));
2899
2900 switch (le_ev->subevent) {
2901 case HCI_EV_LE_CONN_COMPLETE:
2902 hci_le_conn_complete_evt(hdev, skb);
2903 break;
2904
2905 case HCI_EV_LE_ADVERTISING_REPORT:
2906 hci_le_adv_report_evt(hdev, skb);
2907 break;
2908
2909 case HCI_EV_LE_LTK_REQ:
2910 hci_le_ltk_request_evt(hdev, skb);
2911 break;
2912
2913 default:
2914 break;
2915 }
2916 }
2917
2918 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2919 {
2920 struct hci_event_hdr *hdr = (void *) skb->data;
2921 __u8 event = hdr->evt;
2922
2923 skb_pull(skb, HCI_EVENT_HDR_SIZE);
2924
2925 switch (event) {
2926 case HCI_EV_INQUIRY_COMPLETE:
2927 hci_inquiry_complete_evt(hdev, skb);
2928 break;
2929
2930 case HCI_EV_INQUIRY_RESULT:
2931 hci_inquiry_result_evt(hdev, skb);
2932 break;
2933
2934 case HCI_EV_CONN_COMPLETE:
2935 hci_conn_complete_evt(hdev, skb);
2936 break;
2937
2938 case HCI_EV_CONN_REQUEST:
2939 hci_conn_request_evt(hdev, skb);
2940 break;
2941
2942 case HCI_EV_DISCONN_COMPLETE:
2943 hci_disconn_complete_evt(hdev, skb);
2944 break;
2945
2946 case HCI_EV_AUTH_COMPLETE:
2947 hci_auth_complete_evt(hdev, skb);
2948 break;
2949
2950 case HCI_EV_REMOTE_NAME:
2951 hci_remote_name_evt(hdev, skb);
2952 break;
2953
2954 case HCI_EV_ENCRYPT_CHANGE:
2955 hci_encrypt_change_evt(hdev, skb);
2956 break;
2957
2958 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
2959 hci_change_link_key_complete_evt(hdev, skb);
2960 break;
2961
2962 case HCI_EV_REMOTE_FEATURES:
2963 hci_remote_features_evt(hdev, skb);
2964 break;
2965
2966 case HCI_EV_REMOTE_VERSION:
2967 hci_remote_version_evt(hdev, skb);
2968 break;
2969
2970 case HCI_EV_QOS_SETUP_COMPLETE:
2971 hci_qos_setup_complete_evt(hdev, skb);
2972 break;
2973
2974 case HCI_EV_CMD_COMPLETE:
2975 hci_cmd_complete_evt(hdev, skb);
2976 break;
2977
2978 case HCI_EV_CMD_STATUS:
2979 hci_cmd_status_evt(hdev, skb);
2980 break;
2981
2982 case HCI_EV_ROLE_CHANGE:
2983 hci_role_change_evt(hdev, skb);
2984 break;
2985
2986 case HCI_EV_NUM_COMP_PKTS:
2987 hci_num_comp_pkts_evt(hdev, skb);
2988 break;
2989
2990 case HCI_EV_MODE_CHANGE:
2991 hci_mode_change_evt(hdev, skb);
2992 break;
2993
2994 case HCI_EV_PIN_CODE_REQ:
2995 hci_pin_code_request_evt(hdev, skb);
2996 break;
2997
2998 case HCI_EV_LINK_KEY_REQ:
2999 hci_link_key_request_evt(hdev, skb);
3000 break;
3001
3002 case HCI_EV_LINK_KEY_NOTIFY:
3003 hci_link_key_notify_evt(hdev, skb);
3004 break;
3005
3006 case HCI_EV_CLOCK_OFFSET:
3007 hci_clock_offset_evt(hdev, skb);
3008 break;
3009
3010 case HCI_EV_PKT_TYPE_CHANGE:
3011 hci_pkt_type_change_evt(hdev, skb);
3012 break;
3013
3014 case HCI_EV_PSCAN_REP_MODE:
3015 hci_pscan_rep_mode_evt(hdev, skb);
3016 break;
3017
3018 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3019 hci_inquiry_result_with_rssi_evt(hdev, skb);
3020 break;
3021
3022 case HCI_EV_REMOTE_EXT_FEATURES:
3023 hci_remote_ext_features_evt(hdev, skb);
3024 break;
3025
3026 case HCI_EV_SYNC_CONN_COMPLETE:
3027 hci_sync_conn_complete_evt(hdev, skb);
3028 break;
3029
3030 case HCI_EV_SYNC_CONN_CHANGED:
3031 hci_sync_conn_changed_evt(hdev, skb);
3032 break;
3033
3034 case HCI_EV_SNIFF_SUBRATE:
3035 hci_sniff_subrate_evt(hdev, skb);
3036 break;
3037
3038 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3039 hci_extended_inquiry_result_evt(hdev, skb);
3040 break;
3041
3042 case HCI_EV_IO_CAPA_REQUEST:
3043 hci_io_capa_request_evt(hdev, skb);
3044 break;
3045
3046 case HCI_EV_IO_CAPA_REPLY:
3047 hci_io_capa_reply_evt(hdev, skb);
3048 break;
3049
3050 case HCI_EV_USER_CONFIRM_REQUEST:
3051 hci_user_confirm_request_evt(hdev, skb);
3052 break;
3053
3054 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3055 hci_simple_pair_complete_evt(hdev, skb);
3056 break;
3057
3058 case HCI_EV_REMOTE_HOST_FEATURES:
3059 hci_remote_host_features_evt(hdev, skb);
3060 break;
3061
3062 case HCI_EV_LE_META:
3063 hci_le_meta_evt(hdev, skb);
3064 break;
3065
3066 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3067 hci_remote_oob_data_request_evt(hdev, skb);
3068 break;
3069
3070 default:
3071 BT_DBG("%s event 0x%x", hdev->name, event);
3072 break;
3073 }
3074
3075 kfree_skb(skb);
3076 hdev->stat.evt_rx++;
3077 }
3078
3079 /* Generate internal stack event */
3080 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3081 {
3082 struct hci_event_hdr *hdr;
3083 struct hci_ev_stack_internal *ev;
3084 struct sk_buff *skb;
3085
3086 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
3087 if (!skb)
3088 return;
3089
3090 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
3091 hdr->evt = HCI_EV_STACK_INTERNAL;
3092 hdr->plen = sizeof(*ev) + dlen;
3093
3094 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
3095 ev->type = type;
3096 memcpy(ev->data, data, dlen);
3097
3098 bt_cb(skb)->incoming = 1;
3099 __net_timestamp(skb);
3100
3101 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3102 skb->dev = (void *) hdev;
3103 hci_send_to_sock(hdev, skb, NULL);
3104 kfree_skb(skb);
3105 }
3106
3107 module_param(enable_le, bool, 0444);
3108 MODULE_PARM_DESC(enable_le, "Enable LE support");