]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
mac802154: fix kbuild test robot warning
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46
JH
42#include "smp.h"
43
b78752cc 44static void hci_rx_work(struct work_struct *work);
c347b765 45static void hci_cmd_work(struct work_struct *work);
3eff45ea 46static void hci_tx_work(struct work_struct *work);
1da177e4 47
1da177e4
LT
48/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
54DEFINE_RWLOCK(hci_cb_list_lock);
55
3df92b31
SL
56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
899de765
MH
59/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
1da177e4
LT
68/* ---- HCI notifications ---- */
69
6516455d 70static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 71{
040030ef 72 hci_sock_dev_event(hdev, event);
1da177e4
LT
73}
74
baf27f6e
MH
75/* ---- HCI debugfs entries ---- */
76
4b4148e9
MH
77static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
111902f7 83 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
111902f7 109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
111902f7 130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
131
132 return count;
133}
134
135static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140};
141
1da177e4
LT
142/* ---- HCI requests ---- */
143
42c6b129 144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 145{
42c6b129 146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
147
148 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE;
151 wake_up_interruptible(&hdev->req_wait_q);
152 }
153}
154
155static void hci_req_cancel(struct hci_dev *hdev, int err)
156{
157 BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159 if (hdev->req_status == HCI_REQ_PEND) {
160 hdev->req_result = err;
161 hdev->req_status = HCI_REQ_CANCELED;
162 wake_up_interruptible(&hdev->req_wait_q);
163 }
164}
165
77a63e0a
FW
166static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167 u8 event)
75e84b7c
JH
168{
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
171 struct sk_buff *skb;
172
173 hci_dev_lock(hdev);
174
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
177
178 hci_dev_unlock(hdev);
179
180 if (!skb)
181 return ERR_PTR(-ENODATA);
182
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
185 goto failed;
186 }
187
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
7b1abbbe
JH
191 if (event) {
192 if (hdr->evt != event)
193 goto failed;
194 return skb;
195 }
196
75e84b7c
JH
197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199 goto failed;
200 }
201
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
204 goto failed;
205 }
206
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
209
210 if (opcode == __le16_to_cpu(ev->opcode))
211 return skb;
212
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
215
216failed:
217 kfree_skb(skb);
218 return ERR_PTR(-ENODATA);
219}
220
7b1abbbe 221struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 222 const void *param, u8 event, u32 timeout)
75e84b7c
JH
223{
224 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req;
226 int err = 0;
227
228 BT_DBG("%s", hdev->name);
229
230 hci_req_init(&req, hdev);
231
7b1abbbe 232 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
233
234 hdev->req_status = HCI_REQ_PEND;
235
75e84b7c
JH
236 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE);
238
039fada5
CP
239 err = hci_req_run(&req, hci_req_sync_complete);
240 if (err < 0) {
241 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 242 set_current_state(TASK_RUNNING);
039fada5
CP
243 return ERR_PTR(err);
244 }
245
75e84b7c
JH
246 schedule_timeout(timeout);
247
248 remove_wait_queue(&hdev->req_wait_q, &wait);
249
250 if (signal_pending(current))
251 return ERR_PTR(-EINTR);
252
253 switch (hdev->req_status) {
254 case HCI_REQ_DONE:
255 err = -bt_to_errno(hdev->req_result);
256 break;
257
258 case HCI_REQ_CANCELED:
259 err = -hdev->req_result;
260 break;
261
262 default:
263 err = -ETIMEDOUT;
264 break;
265 }
266
267 hdev->req_status = hdev->req_result = 0;
268
269 BT_DBG("%s end: err %d", hdev->name, err);
270
271 if (err < 0)
272 return ERR_PTR(err);
273
7b1abbbe
JH
274 return hci_get_cmd_complete(hdev, opcode, event);
275}
276EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 279 const void *param, u32 timeout)
7b1abbbe
JH
280{
281 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
282}
283EXPORT_SYMBOL(__hci_cmd_sync);
284
1da177e4 285/* Execute request and wait for completion. */
01178cd4 286static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
287 void (*func)(struct hci_request *req,
288 unsigned long opt),
01178cd4 289 unsigned long opt, __u32 timeout)
1da177e4 290{
42c6b129 291 struct hci_request req;
1da177e4
LT
292 DECLARE_WAITQUEUE(wait, current);
293 int err = 0;
294
295 BT_DBG("%s start", hdev->name);
296
42c6b129
JH
297 hci_req_init(&req, hdev);
298
1da177e4
LT
299 hdev->req_status = HCI_REQ_PEND;
300
42c6b129 301 func(&req, opt);
53cce22d 302
039fada5
CP
303 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE);
305
42c6b129
JH
306 err = hci_req_run(&req, hci_req_sync_complete);
307 if (err < 0) {
53cce22d 308 hdev->req_status = 0;
920c8300 309
039fada5 310 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 311 set_current_state(TASK_RUNNING);
039fada5 312
920c8300
AG
313 /* ENODATA means the HCI request command queue is empty.
314 * This can happen when a request with conditionals doesn't
315 * trigger any commands to be sent. This is normal behavior
316 * and should not trigger an error return.
42c6b129 317 */
920c8300
AG
318 if (err == -ENODATA)
319 return 0;
320
321 return err;
53cce22d
JH
322 }
323
1da177e4
LT
324 schedule_timeout(timeout);
325
326 remove_wait_queue(&hdev->req_wait_q, &wait);
327
328 if (signal_pending(current))
329 return -EINTR;
330
331 switch (hdev->req_status) {
332 case HCI_REQ_DONE:
e175072f 333 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
334 break;
335
336 case HCI_REQ_CANCELED:
337 err = -hdev->req_result;
338 break;
339
340 default:
341 err = -ETIMEDOUT;
342 break;
3ff50b79 343 }
1da177e4 344
a5040efa 345 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
346
347 BT_DBG("%s end: err %d", hdev->name, err);
348
349 return err;
350}
351
01178cd4 352static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
353 void (*req)(struct hci_request *req,
354 unsigned long opt),
01178cd4 355 unsigned long opt, __u32 timeout)
1da177e4
LT
356{
357 int ret;
358
7c6a329e
MH
359 if (!test_bit(HCI_UP, &hdev->flags))
360 return -ENETDOWN;
361
1da177e4
LT
362 /* Serialize all requests */
363 hci_req_lock(hdev);
01178cd4 364 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
365 hci_req_unlock(hdev);
366
367 return ret;
368}
369
42c6b129 370static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 371{
42c6b129 372 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
373
374 /* Reset device */
42c6b129
JH
375 set_bit(HCI_RESET, &req->hdev->flags);
376 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
377}
378
42c6b129 379static void bredr_init(struct hci_request *req)
1da177e4 380{
42c6b129 381 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 382
1da177e4 383 /* Read Local Supported Features */
42c6b129 384 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 385
1143e5a6 386 /* Read Local Version */
42c6b129 387 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
388
389 /* Read BD Address */
42c6b129 390 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
391}
392
42c6b129 393static void amp_init(struct hci_request *req)
e61ef499 394{
42c6b129 395 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 396
e61ef499 397 /* Read Local Version */
42c6b129 398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 399
f6996cfe
MH
400 /* Read Local Supported Commands */
401 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403 /* Read Local Supported Features */
404 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
6bcbc489 406 /* Read Local AMP Info */
42c6b129 407 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
408
409 /* Read Data Blk size */
42c6b129 410 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 411
f38ba941
MH
412 /* Read Flow Control Mode */
413 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
7528ca1c
MH
415 /* Read Location Data */
416 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
417}
418
42c6b129 419static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 420{
42c6b129 421 struct hci_dev *hdev = req->hdev;
e61ef499
AE
422
423 BT_DBG("%s %ld", hdev->name, opt);
424
11778716
AE
425 /* Reset */
426 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 427 hci_reset_req(req, 0);
11778716 428
e61ef499
AE
429 switch (hdev->dev_type) {
430 case HCI_BREDR:
42c6b129 431 bredr_init(req);
e61ef499
AE
432 break;
433
434 case HCI_AMP:
42c6b129 435 amp_init(req);
e61ef499
AE
436 break;
437
438 default:
439 BT_ERR("Unknown device type %d", hdev->dev_type);
440 break;
441 }
e61ef499
AE
442}
443
42c6b129 444static void bredr_setup(struct hci_request *req)
2177bab5 445{
2177bab5
JH
446 __le16 param;
447 __u8 flt_type;
448
449 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 450 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
451
452 /* Read Class of Device */
42c6b129 453 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
454
455 /* Read Local Name */
42c6b129 456 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
457
458 /* Read Voice Setting */
42c6b129 459 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 460
b4cb9fb2
MH
461 /* Read Number of Supported IAC */
462 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
4b836f39
MH
464 /* Read Current IAC LAP */
465 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
2177bab5
JH
467 /* Clear Event Filters */
468 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 469 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
470
471 /* Connection accept timeout ~20 secs */
dcf4adbf 472 param = cpu_to_le16(0x7d00);
42c6b129 473 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
474}
475
42c6b129 476static void le_setup(struct hci_request *req)
2177bab5 477{
c73eee91
JH
478 struct hci_dev *hdev = req->hdev;
479
2177bab5 480 /* Read LE Buffer Size */
42c6b129 481 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
482
483 /* Read LE Local Supported Features */
42c6b129 484 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 485
747d3f03
MH
486 /* Read LE Supported States */
487 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
2177bab5 489 /* Read LE White List Size */
42c6b129 490 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 491
747d3f03
MH
492 /* Clear LE White List */
493 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
494
495 /* LE-only controllers have LE implicitly enabled */
496 if (!lmp_bredr_capable(hdev))
497 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
498}
499
500static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
501{
502 if (lmp_ext_inq_capable(hdev))
503 return 0x02;
504
505 if (lmp_inq_rssi_capable(hdev))
506 return 0x01;
507
508 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
509 hdev->lmp_subver == 0x0757)
510 return 0x01;
511
512 if (hdev->manufacturer == 15) {
513 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
514 return 0x01;
515 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
516 return 0x01;
517 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
518 return 0x01;
519 }
520
521 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
522 hdev->lmp_subver == 0x1805)
523 return 0x01;
524
525 return 0x00;
526}
527
42c6b129 528static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
529{
530 u8 mode;
531
42c6b129 532 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 533
42c6b129 534 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
535}
536
42c6b129 537static void hci_setup_event_mask(struct hci_request *req)
2177bab5 538{
42c6b129
JH
539 struct hci_dev *hdev = req->hdev;
540
2177bab5
JH
541 /* The second byte is 0xff instead of 0x9f (two reserved bits
542 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
543 * command otherwise.
544 */
545 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
546
547 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
548 * any event mask for pre 1.2 devices.
549 */
550 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
551 return;
552
553 if (lmp_bredr_capable(hdev)) {
554 events[4] |= 0x01; /* Flow Specification Complete */
555 events[4] |= 0x02; /* Inquiry Result with RSSI */
556 events[4] |= 0x04; /* Read Remote Extended Features Complete */
557 events[5] |= 0x08; /* Synchronous Connection Complete */
558 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
559 } else {
560 /* Use a different default for LE-only devices */
561 memset(events, 0, sizeof(events));
562 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
563 events[1] |= 0x08; /* Read Remote Version Information Complete */
564 events[1] |= 0x20; /* Command Complete */
565 events[1] |= 0x40; /* Command Status */
566 events[1] |= 0x80; /* Hardware Error */
567 events[2] |= 0x04; /* Number of Completed Packets */
568 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
569
570 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
571 events[0] |= 0x80; /* Encryption Change */
572 events[5] |= 0x80; /* Encryption Key Refresh Complete */
573 }
2177bab5
JH
574 }
575
576 if (lmp_inq_rssi_capable(hdev))
577 events[4] |= 0x02; /* Inquiry Result with RSSI */
578
579 if (lmp_sniffsubr_capable(hdev))
580 events[5] |= 0x20; /* Sniff Subrating */
581
582 if (lmp_pause_enc_capable(hdev))
583 events[5] |= 0x80; /* Encryption Key Refresh Complete */
584
585 if (lmp_ext_inq_capable(hdev))
586 events[5] |= 0x40; /* Extended Inquiry Result */
587
588 if (lmp_no_flush_capable(hdev))
589 events[7] |= 0x01; /* Enhanced Flush Complete */
590
591 if (lmp_lsto_capable(hdev))
592 events[6] |= 0x80; /* Link Supervision Timeout Changed */
593
594 if (lmp_ssp_capable(hdev)) {
595 events[6] |= 0x01; /* IO Capability Request */
596 events[6] |= 0x02; /* IO Capability Response */
597 events[6] |= 0x04; /* User Confirmation Request */
598 events[6] |= 0x08; /* User Passkey Request */
599 events[6] |= 0x10; /* Remote OOB Data Request */
600 events[6] |= 0x20; /* Simple Pairing Complete */
601 events[7] |= 0x04; /* User Passkey Notification */
602 events[7] |= 0x08; /* Keypress Notification */
603 events[7] |= 0x10; /* Remote Host Supported
604 * Features Notification
605 */
606 }
607
608 if (lmp_le_capable(hdev))
609 events[7] |= 0x20; /* LE Meta-Event */
610
42c6b129 611 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
612}
613
42c6b129 614static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 615{
42c6b129
JH
616 struct hci_dev *hdev = req->hdev;
617
2177bab5 618 if (lmp_bredr_capable(hdev))
42c6b129 619 bredr_setup(req);
56f87901
JH
620 else
621 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
622
623 if (lmp_le_capable(hdev))
42c6b129 624 le_setup(req);
2177bab5 625
0f3adeae
MH
626 /* All Bluetooth 1.2 and later controllers should support the
627 * HCI command for reading the local supported commands.
628 *
629 * Unfortunately some controllers indicate Bluetooth 1.2 support,
630 * but do not have support for this command. If that is the case,
631 * the driver can quirk the behavior and skip reading the local
632 * supported commands.
3f8e2d75 633 */
0f3adeae
MH
634 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
635 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 636 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
637
638 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
639 /* When SSP is available, then the host features page
640 * should also be available as well. However some
641 * controllers list the max_page as 0 as long as SSP
642 * has not been enabled. To achieve proper debugging
643 * output, force the minimum max_page to 1 at least.
644 */
645 hdev->max_page = 0x01;
646
2177bab5
JH
647 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
648 u8 mode = 0x01;
42c6b129
JH
649 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
650 sizeof(mode), &mode);
2177bab5
JH
651 } else {
652 struct hci_cp_write_eir cp;
653
654 memset(hdev->eir, 0, sizeof(hdev->eir));
655 memset(&cp, 0, sizeof(cp));
656
42c6b129 657 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
658 }
659 }
660
661 if (lmp_inq_rssi_capable(hdev))
42c6b129 662 hci_setup_inquiry_mode(req);
2177bab5
JH
663
664 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 665 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
666
667 if (lmp_ext_feat_capable(hdev)) {
668 struct hci_cp_read_local_ext_features cp;
669
670 cp.page = 0x01;
42c6b129
JH
671 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
672 sizeof(cp), &cp);
2177bab5
JH
673 }
674
675 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
676 u8 enable = 1;
42c6b129
JH
677 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
678 &enable);
2177bab5
JH
679 }
680}
681
42c6b129 682static void hci_setup_link_policy(struct hci_request *req)
2177bab5 683{
42c6b129 684 struct hci_dev *hdev = req->hdev;
2177bab5
JH
685 struct hci_cp_write_def_link_policy cp;
686 u16 link_policy = 0;
687
688 if (lmp_rswitch_capable(hdev))
689 link_policy |= HCI_LP_RSWITCH;
690 if (lmp_hold_capable(hdev))
691 link_policy |= HCI_LP_HOLD;
692 if (lmp_sniff_capable(hdev))
693 link_policy |= HCI_LP_SNIFF;
694 if (lmp_park_capable(hdev))
695 link_policy |= HCI_LP_PARK;
696
697 cp.policy = cpu_to_le16(link_policy);
42c6b129 698 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
699}
700
42c6b129 701static void hci_set_le_support(struct hci_request *req)
2177bab5 702{
42c6b129 703 struct hci_dev *hdev = req->hdev;
2177bab5
JH
704 struct hci_cp_write_le_host_supported cp;
705
c73eee91
JH
706 /* LE-only devices do not support explicit enablement */
707 if (!lmp_bredr_capable(hdev))
708 return;
709
2177bab5
JH
710 memset(&cp, 0, sizeof(cp));
711
712 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
713 cp.le = 0x01;
32226e4f 714 cp.simul = 0x00;
2177bab5
JH
715 }
716
717 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
718 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
719 &cp);
2177bab5
JH
720}
721
d62e6d67
JH
722static void hci_set_event_mask_page_2(struct hci_request *req)
723{
724 struct hci_dev *hdev = req->hdev;
725 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
726
727 /* If Connectionless Slave Broadcast master role is supported
728 * enable all necessary events for it.
729 */
53b834d2 730 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
731 events[1] |= 0x40; /* Triggered Clock Capture */
732 events[1] |= 0x80; /* Synchronization Train Complete */
733 events[2] |= 0x10; /* Slave Page Response Timeout */
734 events[2] |= 0x20; /* CSB Channel Map Change */
735 }
736
737 /* If Connectionless Slave Broadcast slave role is supported
738 * enable all necessary events for it.
739 */
53b834d2 740 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
741 events[2] |= 0x01; /* Synchronization Train Received */
742 events[2] |= 0x02; /* CSB Receive */
743 events[2] |= 0x04; /* CSB Timeout */
744 events[2] |= 0x08; /* Truncated Page Complete */
745 }
746
40c59fcb 747 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 748 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
749 events[2] |= 0x80;
750
d62e6d67
JH
751 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
752}
753
42c6b129 754static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 755{
42c6b129 756 struct hci_dev *hdev = req->hdev;
d2c5d77f 757 u8 p;
42c6b129 758
0da71f1b
MH
759 hci_setup_event_mask(req);
760
b8f4e068
GP
761 /* Some Broadcom based Bluetooth controllers do not support the
762 * Delete Stored Link Key command. They are clearly indicating its
763 * absence in the bit mask of supported commands.
764 *
765 * Check the supported commands and only if the the command is marked
766 * as supported send it. If not supported assume that the controller
767 * does not have actual support for stored link keys which makes this
768 * command redundant anyway.
f9f462fa
MH
769 *
770 * Some controllers indicate that they support handling deleting
771 * stored link keys, but they don't. The quirk lets a driver
772 * just disable this command.
637b4cae 773 */
f9f462fa
MH
774 if (hdev->commands[6] & 0x80 &&
775 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
776 struct hci_cp_delete_stored_link_key cp;
777
778 bacpy(&cp.bdaddr, BDADDR_ANY);
779 cp.delete_all = 0x01;
780 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
781 sizeof(cp), &cp);
782 }
783
2177bab5 784 if (hdev->commands[5] & 0x10)
42c6b129 785 hci_setup_link_policy(req);
2177bab5 786
417287de
MH
787 if (hdev->commands[8] & 0x01)
788 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
789
790 /* Some older Broadcom based Bluetooth 1.2 controllers do not
791 * support the Read Page Scan Type command. Check support for
792 * this command in the bit mask of supported commands.
793 */
794 if (hdev->commands[13] & 0x01)
795 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
796
9193c6e8
AG
797 if (lmp_le_capable(hdev)) {
798 u8 events[8];
799
800 memset(events, 0, sizeof(events));
4d6c705b
MH
801 events[0] = 0x0f;
802
803 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
804 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
805
806 /* If controller supports the Connection Parameters Request
807 * Link Layer Procedure, enable the corresponding event.
808 */
809 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
810 events[0] |= 0x20; /* LE Remote Connection
811 * Parameter Request
812 */
813
a9f6068e
MH
814 /* If the controller supports the Data Length Extension
815 * feature, enable the corresponding event.
816 */
817 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
818 events[0] |= 0x40; /* LE Data Length Change */
819
4b71bba4
MH
820 /* If the controller supports Extended Scanner Filter
821 * Policies, enable the correspondig event.
822 */
823 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
824 events[1] |= 0x04; /* LE Direct Advertising
825 * Report
826 */
827
5a34bd5f
MH
828 /* If the controller supports the LE Read Local P-256
829 * Public Key command, enable the corresponding event.
830 */
831 if (hdev->commands[34] & 0x02)
832 events[0] |= 0x80; /* LE Read Local P-256
833 * Public Key Complete
834 */
835
836 /* If the controller supports the LE Generate DHKey
837 * command, enable the corresponding event.
838 */
839 if (hdev->commands[34] & 0x04)
840 events[1] |= 0x01; /* LE Generate DHKey Complete */
841
9193c6e8
AG
842 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
843 events);
844
15a49cca
MH
845 if (hdev->commands[25] & 0x40) {
846 /* Read LE Advertising Channel TX Power */
847 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
848 }
849
a9f6068e
MH
850 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
851 /* Read LE Maximum Data Length */
852 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
853
854 /* Read LE Suggested Default Data Length */
855 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
856 }
857
42c6b129 858 hci_set_le_support(req);
9193c6e8 859 }
d2c5d77f
JH
860
861 /* Read features beyond page 1 if available */
862 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
863 struct hci_cp_read_local_ext_features cp;
864
865 cp.page = p;
866 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
867 sizeof(cp), &cp);
868 }
2177bab5
JH
869}
870
5d4e7e8d
JH
871static void hci_init4_req(struct hci_request *req, unsigned long opt)
872{
873 struct hci_dev *hdev = req->hdev;
874
d62e6d67
JH
875 /* Set event mask page 2 if the HCI command for it is supported */
876 if (hdev->commands[22] & 0x04)
877 hci_set_event_mask_page_2(req);
878
109e3191
MH
879 /* Read local codec list if the HCI command is supported */
880 if (hdev->commands[29] & 0x20)
881 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
882
f4fe73ed
MH
883 /* Get MWS transport configuration if the HCI command is supported */
884 if (hdev->commands[30] & 0x08)
885 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
886
5d4e7e8d 887 /* Check for Synchronization Train support */
53b834d2 888 if (lmp_sync_train_capable(hdev))
5d4e7e8d 889 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
890
891 /* Enable Secure Connections if supported and configured */
710f11c0 892 if (bredr_sc_enabled(hdev)) {
a6d0d690
MH
893 u8 support = 0x01;
894 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
895 sizeof(support), &support);
896 }
5d4e7e8d
JH
897}
898
2177bab5
JH
899static int __hci_init(struct hci_dev *hdev)
900{
901 int err;
902
903 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
904 if (err < 0)
905 return err;
906
4b4148e9
MH
907 /* The Device Under Test (DUT) mode is special and available for
908 * all controller types. So just create it early on.
909 */
910 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
911 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
912 &dut_mode_fops);
913 }
914
2177bab5
JH
915 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
916 * BR/EDR/LE type controllers. AMP controllers only need the
917 * first stage init.
918 */
919 if (hdev->dev_type != HCI_BREDR)
920 return 0;
921
922 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
923 if (err < 0)
924 return err;
925
5d4e7e8d
JH
926 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
927 if (err < 0)
928 return err;
929
baf27f6e
MH
930 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
931 if (err < 0)
932 return err;
933
ec6cef9c
MH
934 /* This function is only called when the controller is actually in
935 * configured state. When the controller is marked as unconfigured,
936 * this initialization procedure is not run.
937 *
938 * It means that it is possible that a controller runs through its
939 * setup phase and then discovers missing settings. If that is the
940 * case, then this function will not be called. It then will only
941 * be called during the config phase.
942 *
943 * So only when in setup phase or config phase, create the debugfs
944 * entries and register the SMP channels.
baf27f6e 945 */
ec6cef9c
MH
946 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
947 !test_bit(HCI_CONFIG, &hdev->dev_flags))
baf27f6e
MH
948 return 0;
949
60c5f5fb
MH
950 hci_debugfs_create_common(hdev);
951
71c3b60e 952 if (lmp_bredr_capable(hdev))
60c5f5fb 953 hci_debugfs_create_bredr(hdev);
2bfa3531 954
d0f729b8 955 if (lmp_le_capable(hdev)) {
60c5f5fb 956 hci_debugfs_create_le(hdev);
711eafe3 957 smp_register(hdev);
d0f729b8 958 }
e7b8fc92 959
baf27f6e 960 return 0;
2177bab5
JH
961}
962
0ebca7d6
MH
963static void hci_init0_req(struct hci_request *req, unsigned long opt)
964{
965 struct hci_dev *hdev = req->hdev;
966
967 BT_DBG("%s %ld", hdev->name, opt);
968
969 /* Reset */
970 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
971 hci_reset_req(req, 0);
972
973 /* Read Local Version */
974 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
975
976 /* Read BD Address */
977 if (hdev->set_bdaddr)
978 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
979}
980
981static int __hci_unconf_init(struct hci_dev *hdev)
982{
983 int err;
984
cc78b44b
MH
985 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
986 return 0;
987
0ebca7d6
MH
988 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
989 if (err < 0)
990 return err;
991
992 return 0;
993}
994
42c6b129 995static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
996{
997 __u8 scan = opt;
998
42c6b129 999 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1000
1001 /* Inquiry and Page scans */
42c6b129 1002 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1003}
1004
42c6b129 1005static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1006{
1007 __u8 auth = opt;
1008
42c6b129 1009 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1010
1011 /* Authentication */
42c6b129 1012 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1013}
1014
42c6b129 1015static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1016{
1017 __u8 encrypt = opt;
1018
42c6b129 1019 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1020
e4e8e37c 1021 /* Encryption */
42c6b129 1022 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1023}
1024
42c6b129 1025static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1026{
1027 __le16 policy = cpu_to_le16(opt);
1028
42c6b129 1029 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1030
1031 /* Default link policy */
42c6b129 1032 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1033}
1034
8e87d142 1035/* Get HCI device by index.
1da177e4
LT
1036 * Device is held on return. */
1037struct hci_dev *hci_dev_get(int index)
1038{
8035ded4 1039 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1040
1041 BT_DBG("%d", index);
1042
1043 if (index < 0)
1044 return NULL;
1045
1046 read_lock(&hci_dev_list_lock);
8035ded4 1047 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1048 if (d->id == index) {
1049 hdev = hci_dev_hold(d);
1050 break;
1051 }
1052 }
1053 read_unlock(&hci_dev_list_lock);
1054 return hdev;
1055}
1da177e4
LT
1056
1057/* ---- Inquiry support ---- */
ff9ef578 1058
30dc78e1
JH
1059bool hci_discovery_active(struct hci_dev *hdev)
1060{
1061 struct discovery_state *discov = &hdev->discovery;
1062
6fbe195d 1063 switch (discov->state) {
343f935b 1064 case DISCOVERY_FINDING:
6fbe195d 1065 case DISCOVERY_RESOLVING:
30dc78e1
JH
1066 return true;
1067
6fbe195d
AG
1068 default:
1069 return false;
1070 }
30dc78e1
JH
1071}
1072
ff9ef578
JH
1073void hci_discovery_set_state(struct hci_dev *hdev, int state)
1074{
bb3e0a33
JH
1075 int old_state = hdev->discovery.state;
1076
ff9ef578
JH
1077 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1078
bb3e0a33 1079 if (old_state == state)
ff9ef578
JH
1080 return;
1081
bb3e0a33
JH
1082 hdev->discovery.state = state;
1083
ff9ef578
JH
1084 switch (state) {
1085 case DISCOVERY_STOPPED:
c54c3860
AG
1086 hci_update_background_scan(hdev);
1087
bb3e0a33 1088 if (old_state != DISCOVERY_STARTING)
7b99b659 1089 mgmt_discovering(hdev, 0);
ff9ef578
JH
1090 break;
1091 case DISCOVERY_STARTING:
1092 break;
343f935b 1093 case DISCOVERY_FINDING:
ff9ef578
JH
1094 mgmt_discovering(hdev, 1);
1095 break;
30dc78e1
JH
1096 case DISCOVERY_RESOLVING:
1097 break;
ff9ef578
JH
1098 case DISCOVERY_STOPPING:
1099 break;
1100 }
ff9ef578
JH
1101}
1102
1f9b9a5d 1103void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1104{
30883512 1105 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1106 struct inquiry_entry *p, *n;
1da177e4 1107
561aafbc
JH
1108 list_for_each_entry_safe(p, n, &cache->all, all) {
1109 list_del(&p->all);
b57c1a56 1110 kfree(p);
1da177e4 1111 }
561aafbc
JH
1112
1113 INIT_LIST_HEAD(&cache->unknown);
1114 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1115}
1116
a8c5fb1a
GP
1117struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1118 bdaddr_t *bdaddr)
1da177e4 1119{
30883512 1120 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1121 struct inquiry_entry *e;
1122
6ed93dc6 1123 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1124
561aafbc
JH
1125 list_for_each_entry(e, &cache->all, all) {
1126 if (!bacmp(&e->data.bdaddr, bdaddr))
1127 return e;
1128 }
1129
1130 return NULL;
1131}
1132
1133struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1134 bdaddr_t *bdaddr)
561aafbc 1135{
30883512 1136 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1137 struct inquiry_entry *e;
1138
6ed93dc6 1139 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1140
1141 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1142 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1143 return e;
1144 }
1145
1146 return NULL;
1da177e4
LT
1147}
1148
30dc78e1 1149struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1150 bdaddr_t *bdaddr,
1151 int state)
30dc78e1
JH
1152{
1153 struct discovery_state *cache = &hdev->discovery;
1154 struct inquiry_entry *e;
1155
6ed93dc6 1156 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1157
1158 list_for_each_entry(e, &cache->resolve, list) {
1159 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1160 return e;
1161 if (!bacmp(&e->data.bdaddr, bdaddr))
1162 return e;
1163 }
1164
1165 return NULL;
1166}
1167
a3d4e20a 1168void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1169 struct inquiry_entry *ie)
a3d4e20a
JH
1170{
1171 struct discovery_state *cache = &hdev->discovery;
1172 struct list_head *pos = &cache->resolve;
1173 struct inquiry_entry *p;
1174
1175 list_del(&ie->list);
1176
1177 list_for_each_entry(p, &cache->resolve, list) {
1178 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1179 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1180 break;
1181 pos = &p->list;
1182 }
1183
1184 list_add(&ie->list, pos);
1185}
1186
af58925c
MH
1187u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1188 bool name_known)
1da177e4 1189{
30883512 1190 struct discovery_state *cache = &hdev->discovery;
70f23020 1191 struct inquiry_entry *ie;
af58925c 1192 u32 flags = 0;
1da177e4 1193
6ed93dc6 1194 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1195
6928a924 1196 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1197
af58925c
MH
1198 if (!data->ssp_mode)
1199 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1200
70f23020 1201 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1202 if (ie) {
af58925c
MH
1203 if (!ie->data.ssp_mode)
1204 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1205
a3d4e20a 1206 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1207 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1208 ie->data.rssi = data->rssi;
1209 hci_inquiry_cache_update_resolve(hdev, ie);
1210 }
1211
561aafbc 1212 goto update;
a3d4e20a 1213 }
561aafbc
JH
1214
1215 /* Entry not in the cache. Add new one. */
27f70f3e 1216 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1217 if (!ie) {
1218 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1219 goto done;
1220 }
561aafbc
JH
1221
1222 list_add(&ie->all, &cache->all);
1223
1224 if (name_known) {
1225 ie->name_state = NAME_KNOWN;
1226 } else {
1227 ie->name_state = NAME_NOT_KNOWN;
1228 list_add(&ie->list, &cache->unknown);
1229 }
70f23020 1230
561aafbc
JH
1231update:
1232 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1233 ie->name_state != NAME_PENDING) {
561aafbc
JH
1234 ie->name_state = NAME_KNOWN;
1235 list_del(&ie->list);
1da177e4
LT
1236 }
1237
70f23020
AE
1238 memcpy(&ie->data, data, sizeof(*data));
1239 ie->timestamp = jiffies;
1da177e4 1240 cache->timestamp = jiffies;
3175405b
JH
1241
1242 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1243 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1244
af58925c
MH
1245done:
1246 return flags;
1da177e4
LT
1247}
1248
1249static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1250{
30883512 1251 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1252 struct inquiry_info *info = (struct inquiry_info *) buf;
1253 struct inquiry_entry *e;
1254 int copied = 0;
1255
561aafbc 1256 list_for_each_entry(e, &cache->all, all) {
1da177e4 1257 struct inquiry_data *data = &e->data;
b57c1a56
JH
1258
1259 if (copied >= num)
1260 break;
1261
1da177e4
LT
1262 bacpy(&info->bdaddr, &data->bdaddr);
1263 info->pscan_rep_mode = data->pscan_rep_mode;
1264 info->pscan_period_mode = data->pscan_period_mode;
1265 info->pscan_mode = data->pscan_mode;
1266 memcpy(info->dev_class, data->dev_class, 3);
1267 info->clock_offset = data->clock_offset;
b57c1a56 1268
1da177e4 1269 info++;
b57c1a56 1270 copied++;
1da177e4
LT
1271 }
1272
1273 BT_DBG("cache %p, copied %d", cache, copied);
1274 return copied;
1275}
1276
42c6b129 1277static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1278{
1279 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1280 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1281 struct hci_cp_inquiry cp;
1282
1283 BT_DBG("%s", hdev->name);
1284
1285 if (test_bit(HCI_INQUIRY, &hdev->flags))
1286 return;
1287
1288 /* Start Inquiry */
1289 memcpy(&cp.lap, &ir->lap, 3);
1290 cp.length = ir->length;
1291 cp.num_rsp = ir->num_rsp;
42c6b129 1292 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1293}
1294
1295int hci_inquiry(void __user *arg)
1296{
1297 __u8 __user *ptr = arg;
1298 struct hci_inquiry_req ir;
1299 struct hci_dev *hdev;
1300 int err = 0, do_inquiry = 0, max_rsp;
1301 long timeo;
1302 __u8 *buf;
1303
1304 if (copy_from_user(&ir, ptr, sizeof(ir)))
1305 return -EFAULT;
1306
5a08ecce
AE
1307 hdev = hci_dev_get(ir.dev_id);
1308 if (!hdev)
1da177e4
LT
1309 return -ENODEV;
1310
0736cfa8
MH
1311 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1312 err = -EBUSY;
1313 goto done;
1314 }
1315
4a964404 1316 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
1317 err = -EOPNOTSUPP;
1318 goto done;
1319 }
1320
5b69bef5
MH
1321 if (hdev->dev_type != HCI_BREDR) {
1322 err = -EOPNOTSUPP;
1323 goto done;
1324 }
1325
56f87901
JH
1326 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1327 err = -EOPNOTSUPP;
1328 goto done;
1329 }
1330
09fd0de5 1331 hci_dev_lock(hdev);
8e87d142 1332 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1333 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1334 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1335 do_inquiry = 1;
1336 }
09fd0de5 1337 hci_dev_unlock(hdev);
1da177e4 1338
04837f64 1339 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1340
1341 if (do_inquiry) {
01178cd4
JH
1342 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1343 timeo);
70f23020
AE
1344 if (err < 0)
1345 goto done;
3e13fa1e
AG
1346
1347 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1348 * cleared). If it is interrupted by a signal, return -EINTR.
1349 */
74316201 1350 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1351 TASK_INTERRUPTIBLE))
1352 return -EINTR;
70f23020 1353 }
1da177e4 1354
8fc9ced3
GP
1355 /* for unlimited number of responses we will use buffer with
1356 * 255 entries
1357 */
1da177e4
LT
1358 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1359
1360 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1361 * copy it to the user space.
1362 */
01df8c31 1363 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1364 if (!buf) {
1da177e4
LT
1365 err = -ENOMEM;
1366 goto done;
1367 }
1368
09fd0de5 1369 hci_dev_lock(hdev);
1da177e4 1370 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1371 hci_dev_unlock(hdev);
1da177e4
LT
1372
1373 BT_DBG("num_rsp %d", ir.num_rsp);
1374
1375 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1376 ptr += sizeof(ir);
1377 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1378 ir.num_rsp))
1da177e4 1379 err = -EFAULT;
8e87d142 1380 } else
1da177e4
LT
1381 err = -EFAULT;
1382
1383 kfree(buf);
1384
1385done:
1386 hci_dev_put(hdev);
1387 return err;
1388}
1389
cbed0ca1 1390static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1391{
1da177e4
LT
1392 int ret = 0;
1393
1da177e4
LT
1394 BT_DBG("%s %p", hdev->name, hdev);
1395
1396 hci_req_lock(hdev);
1397
94324962
JH
1398 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1399 ret = -ENODEV;
1400 goto done;
1401 }
1402
d603b76b
MH
1403 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1404 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
a5c8f270
MH
1405 /* Check for rfkill but allow the HCI setup stage to
1406 * proceed (which in itself doesn't cause any RF activity).
1407 */
1408 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1409 ret = -ERFKILL;
1410 goto done;
1411 }
1412
1413 /* Check for valid public address or a configured static
1414 * random adddress, but let the HCI setup proceed to
1415 * be able to determine if there is a public address
1416 * or not.
1417 *
c6beca0e
MH
1418 * In case of user channel usage, it is not important
1419 * if a public address or static random address is
1420 * available.
1421 *
a5c8f270
MH
1422 * This check is only valid for BR/EDR controllers
1423 * since AMP controllers do not have an address.
1424 */
c6beca0e
MH
1425 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1426 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1427 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1428 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1429 ret = -EADDRNOTAVAIL;
1430 goto done;
1431 }
611b30f7
MH
1432 }
1433
1da177e4
LT
1434 if (test_bit(HCI_UP, &hdev->flags)) {
1435 ret = -EALREADY;
1436 goto done;
1437 }
1438
1da177e4
LT
1439 if (hdev->open(hdev)) {
1440 ret = -EIO;
1441 goto done;
1442 }
1443
f41c70c4
MH
1444 atomic_set(&hdev->cmd_cnt, 1);
1445 set_bit(HCI_INIT, &hdev->flags);
1446
af202f84
MH
1447 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1448 if (hdev->setup)
1449 ret = hdev->setup(hdev);
f41c70c4 1450
af202f84
MH
1451 /* The transport driver can set these quirks before
1452 * creating the HCI device or in its setup callback.
1453 *
1454 * In case any of them is set, the controller has to
1455 * start up as unconfigured.
1456 */
eb1904f4
MH
1457 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1458 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
89bc22d2 1459 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
f41c70c4 1460
0ebca7d6
MH
1461 /* For an unconfigured controller it is required to
1462 * read at least the version information provided by
1463 * the Read Local Version Information command.
1464 *
1465 * If the set_bdaddr driver callback is provided, then
1466 * also the original Bluetooth public device address
1467 * will be read using the Read BD Address command.
1468 */
1469 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1470 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1471 }
1472
9713c17b
MH
1473 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1474 /* If public address change is configured, ensure that
1475 * the address gets programmed. If the driver does not
1476 * support changing the public address, fail the power
1477 * on procedure.
1478 */
1479 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1480 hdev->set_bdaddr)
24c457e2
MH
1481 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1482 else
1483 ret = -EADDRNOTAVAIL;
1484 }
1485
f41c70c4 1486 if (!ret) {
4a964404 1487 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 1488 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1489 ret = __hci_init(hdev);
1da177e4
LT
1490 }
1491
f41c70c4
MH
1492 clear_bit(HCI_INIT, &hdev->flags);
1493
1da177e4
LT
1494 if (!ret) {
1495 hci_dev_hold(hdev);
d6bfd59c 1496 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
1497 set_bit(HCI_UP, &hdev->flags);
1498 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1499 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
d603b76b 1500 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
4a964404 1501 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 1502 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1503 hdev->dev_type == HCI_BREDR) {
09fd0de5 1504 hci_dev_lock(hdev);
744cf19e 1505 mgmt_powered(hdev, 1);
09fd0de5 1506 hci_dev_unlock(hdev);
56e5cb86 1507 }
8e87d142 1508 } else {
1da177e4 1509 /* Init failed, cleanup */
3eff45ea 1510 flush_work(&hdev->tx_work);
c347b765 1511 flush_work(&hdev->cmd_work);
b78752cc 1512 flush_work(&hdev->rx_work);
1da177e4
LT
1513
1514 skb_queue_purge(&hdev->cmd_q);
1515 skb_queue_purge(&hdev->rx_q);
1516
1517 if (hdev->flush)
1518 hdev->flush(hdev);
1519
1520 if (hdev->sent_cmd) {
1521 kfree_skb(hdev->sent_cmd);
1522 hdev->sent_cmd = NULL;
1523 }
1524
1525 hdev->close(hdev);
fee746b0 1526 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1527 }
1528
1529done:
1530 hci_req_unlock(hdev);
1da177e4
LT
1531 return ret;
1532}
1533
cbed0ca1
JH
1534/* ---- HCI ioctl helpers ---- */
1535
1536int hci_dev_open(__u16 dev)
1537{
1538 struct hci_dev *hdev;
1539 int err;
1540
1541 hdev = hci_dev_get(dev);
1542 if (!hdev)
1543 return -ENODEV;
1544
4a964404 1545 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1546 * up as user channel. Trying to bring them up as normal devices
1547 * will result into a failure. Only user channel operation is
1548 * possible.
1549 *
1550 * When this function is called for a user channel, the flag
1551 * HCI_USER_CHANNEL will be set first before attempting to
1552 * open the device.
1553 */
4a964404 1554 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
1555 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1556 err = -EOPNOTSUPP;
1557 goto done;
1558 }
1559
e1d08f40
JH
1560 /* We need to ensure that no other power on/off work is pending
1561 * before proceeding to call hci_dev_do_open. This is
1562 * particularly important if the setup procedure has not yet
1563 * completed.
1564 */
1565 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1566 cancel_delayed_work(&hdev->power_off);
1567
a5c8f270
MH
1568 /* After this call it is guaranteed that the setup procedure
1569 * has finished. This means that error conditions like RFKILL
1570 * or no valid public or static random address apply.
1571 */
e1d08f40
JH
1572 flush_workqueue(hdev->req_workqueue);
1573
12aa4f0a 1574 /* For controllers not using the management interface and that
b6ae8457 1575 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1576 * so that pairing works for them. Once the management interface
1577 * is in use this bit will be cleared again and userspace has
1578 * to explicitly enable it.
1579 */
1580 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1581 !test_bit(HCI_MGMT, &hdev->dev_flags))
b6ae8457 1582 set_bit(HCI_BONDABLE, &hdev->dev_flags);
12aa4f0a 1583
cbed0ca1
JH
1584 err = hci_dev_do_open(hdev);
1585
fee746b0 1586done:
cbed0ca1 1587 hci_dev_put(hdev);
cbed0ca1
JH
1588 return err;
1589}
1590
d7347f3c
JH
1591/* This function requires the caller holds hdev->lock */
1592static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1593{
1594 struct hci_conn_params *p;
1595
f161dd41
JH
1596 list_for_each_entry(p, &hdev->le_conn_params, list) {
1597 if (p->conn) {
1598 hci_conn_drop(p->conn);
f8aaf9b6 1599 hci_conn_put(p->conn);
f161dd41
JH
1600 p->conn = NULL;
1601 }
d7347f3c 1602 list_del_init(&p->action);
f161dd41 1603 }
d7347f3c
JH
1604
1605 BT_DBG("All LE pending actions cleared");
1606}
1607
1da177e4
LT
1608static int hci_dev_do_close(struct hci_dev *hdev)
1609{
1610 BT_DBG("%s %p", hdev->name, hdev);
1611
78c04c0b
VCG
1612 cancel_delayed_work(&hdev->power_off);
1613
1da177e4
LT
1614 hci_req_cancel(hdev, ENODEV);
1615 hci_req_lock(hdev);
1616
1617 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1618 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1619 hci_req_unlock(hdev);
1620 return 0;
1621 }
1622
3eff45ea
GP
1623 /* Flush RX and TX works */
1624 flush_work(&hdev->tx_work);
b78752cc 1625 flush_work(&hdev->rx_work);
1da177e4 1626
16ab91ab 1627 if (hdev->discov_timeout > 0) {
e0f9309f 1628 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1629 hdev->discov_timeout = 0;
5e5282bb 1630 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 1631 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1632 }
1633
a8b2d5c2 1634 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1635 cancel_delayed_work(&hdev->service_cache);
1636
7ba8b4be 1637 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
1638
1639 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1640 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1641
76727c02
JH
1642 /* Avoid potential lockdep warnings from the *_flush() calls by
1643 * ensuring the workqueue is empty up front.
1644 */
1645 drain_workqueue(hdev->workqueue);
1646
09fd0de5 1647 hci_dev_lock(hdev);
1aeb9c65
JH
1648
1649 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1650 if (hdev->dev_type == HCI_BREDR)
1651 mgmt_powered(hdev, 0);
1652 }
1653
1f9b9a5d 1654 hci_inquiry_cache_flush(hdev);
d7347f3c 1655 hci_pend_le_actions_clear(hdev);
f161dd41 1656 hci_conn_hash_flush(hdev);
09fd0de5 1657 hci_dev_unlock(hdev);
1da177e4
LT
1658
1659 hci_notify(hdev, HCI_DEV_DOWN);
1660
1661 if (hdev->flush)
1662 hdev->flush(hdev);
1663
1664 /* Reset device */
1665 skb_queue_purge(&hdev->cmd_q);
1666 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
1667 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1668 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 1669 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1670 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1671 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1672 clear_bit(HCI_INIT, &hdev->flags);
1673 }
1674
c347b765
GP
1675 /* flush cmd work */
1676 flush_work(&hdev->cmd_work);
1da177e4
LT
1677
1678 /* Drop queues */
1679 skb_queue_purge(&hdev->rx_q);
1680 skb_queue_purge(&hdev->cmd_q);
1681 skb_queue_purge(&hdev->raw_q);
1682
1683 /* Drop last sent command */
1684 if (hdev->sent_cmd) {
65cc2b49 1685 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1686 kfree_skb(hdev->sent_cmd);
1687 hdev->sent_cmd = NULL;
1688 }
1689
b6ddb638
JH
1690 kfree_skb(hdev->recv_evt);
1691 hdev->recv_evt = NULL;
1692
1da177e4
LT
1693 /* After this point our queues are empty
1694 * and no tasks are scheduled. */
1695 hdev->close(hdev);
1696
35b973c9 1697 /* Clear flags */
fee746b0 1698 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
1699 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1700
ced5c338 1701 /* Controller radio is available but is currently powered down */
536619e8 1702 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1703
e59fda8d 1704 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1705 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1706 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1707
1da177e4
LT
1708 hci_req_unlock(hdev);
1709
1710 hci_dev_put(hdev);
1711 return 0;
1712}
1713
1714int hci_dev_close(__u16 dev)
1715{
1716 struct hci_dev *hdev;
1717 int err;
1718
70f23020
AE
1719 hdev = hci_dev_get(dev);
1720 if (!hdev)
1da177e4 1721 return -ENODEV;
8ee56540 1722
0736cfa8
MH
1723 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1724 err = -EBUSY;
1725 goto done;
1726 }
1727
8ee56540
MH
1728 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1729 cancel_delayed_work(&hdev->power_off);
1730
1da177e4 1731 err = hci_dev_do_close(hdev);
8ee56540 1732
0736cfa8 1733done:
1da177e4
LT
1734 hci_dev_put(hdev);
1735 return err;
1736}
1737
1738int hci_dev_reset(__u16 dev)
1739{
1740 struct hci_dev *hdev;
1741 int ret = 0;
1742
70f23020
AE
1743 hdev = hci_dev_get(dev);
1744 if (!hdev)
1da177e4
LT
1745 return -ENODEV;
1746
1747 hci_req_lock(hdev);
1da177e4 1748
808a049e
MH
1749 if (!test_bit(HCI_UP, &hdev->flags)) {
1750 ret = -ENETDOWN;
1da177e4 1751 goto done;
808a049e 1752 }
1da177e4 1753
0736cfa8
MH
1754 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1755 ret = -EBUSY;
1756 goto done;
1757 }
1758
4a964404 1759 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
1760 ret = -EOPNOTSUPP;
1761 goto done;
1762 }
1763
1da177e4
LT
1764 /* Drop queues */
1765 skb_queue_purge(&hdev->rx_q);
1766 skb_queue_purge(&hdev->cmd_q);
1767
76727c02
JH
1768 /* Avoid potential lockdep warnings from the *_flush() calls by
1769 * ensuring the workqueue is empty up front.
1770 */
1771 drain_workqueue(hdev->workqueue);
1772
09fd0de5 1773 hci_dev_lock(hdev);
1f9b9a5d 1774 hci_inquiry_cache_flush(hdev);
1da177e4 1775 hci_conn_hash_flush(hdev);
09fd0de5 1776 hci_dev_unlock(hdev);
1da177e4
LT
1777
1778 if (hdev->flush)
1779 hdev->flush(hdev);
1780
8e87d142 1781 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1782 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1783
fee746b0 1784 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1785
1786done:
1da177e4
LT
1787 hci_req_unlock(hdev);
1788 hci_dev_put(hdev);
1789 return ret;
1790}
1791
1792int hci_dev_reset_stat(__u16 dev)
1793{
1794 struct hci_dev *hdev;
1795 int ret = 0;
1796
70f23020
AE
1797 hdev = hci_dev_get(dev);
1798 if (!hdev)
1da177e4
LT
1799 return -ENODEV;
1800
0736cfa8
MH
1801 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1802 ret = -EBUSY;
1803 goto done;
1804 }
1805
4a964404 1806 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
1807 ret = -EOPNOTSUPP;
1808 goto done;
1809 }
1810
1da177e4
LT
1811 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1812
0736cfa8 1813done:
1da177e4 1814 hci_dev_put(hdev);
1da177e4
LT
1815 return ret;
1816}
1817
123abc08
JH
1818static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1819{
bc6d2d04 1820 bool conn_changed, discov_changed;
123abc08
JH
1821
1822 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1823
1824 if ((scan & SCAN_PAGE))
1825 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1826 &hdev->dev_flags);
1827 else
1828 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1829 &hdev->dev_flags);
1830
bc6d2d04
JH
1831 if ((scan & SCAN_INQUIRY)) {
1832 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1833 &hdev->dev_flags);
1834 } else {
1835 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1836 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1837 &hdev->dev_flags);
1838 }
1839
123abc08
JH
1840 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1841 return;
1842
bc6d2d04
JH
1843 if (conn_changed || discov_changed) {
1844 /* In case this was disabled through mgmt */
1845 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1846
1847 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1848 mgmt_update_adv_data(hdev);
1849
123abc08 1850 mgmt_new_settings(hdev);
bc6d2d04 1851 }
123abc08
JH
1852}
1853
1da177e4
LT
1854int hci_dev_cmd(unsigned int cmd, void __user *arg)
1855{
1856 struct hci_dev *hdev;
1857 struct hci_dev_req dr;
1858 int err = 0;
1859
1860 if (copy_from_user(&dr, arg, sizeof(dr)))
1861 return -EFAULT;
1862
70f23020
AE
1863 hdev = hci_dev_get(dr.dev_id);
1864 if (!hdev)
1da177e4
LT
1865 return -ENODEV;
1866
0736cfa8
MH
1867 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1868 err = -EBUSY;
1869 goto done;
1870 }
1871
4a964404 1872 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
1873 err = -EOPNOTSUPP;
1874 goto done;
1875 }
1876
5b69bef5
MH
1877 if (hdev->dev_type != HCI_BREDR) {
1878 err = -EOPNOTSUPP;
1879 goto done;
1880 }
1881
56f87901
JH
1882 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1883 err = -EOPNOTSUPP;
1884 goto done;
1885 }
1886
1da177e4
LT
1887 switch (cmd) {
1888 case HCISETAUTH:
01178cd4
JH
1889 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1890 HCI_INIT_TIMEOUT);
1da177e4
LT
1891 break;
1892
1893 case HCISETENCRYPT:
1894 if (!lmp_encrypt_capable(hdev)) {
1895 err = -EOPNOTSUPP;
1896 break;
1897 }
1898
1899 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1900 /* Auth must be enabled first */
01178cd4
JH
1901 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1902 HCI_INIT_TIMEOUT);
1da177e4
LT
1903 if (err)
1904 break;
1905 }
1906
01178cd4
JH
1907 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1908 HCI_INIT_TIMEOUT);
1da177e4
LT
1909 break;
1910
1911 case HCISETSCAN:
01178cd4
JH
1912 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1913 HCI_INIT_TIMEOUT);
91a668b0 1914
bc6d2d04
JH
1915 /* Ensure that the connectable and discoverable states
1916 * get correctly modified as this was a non-mgmt change.
91a668b0 1917 */
123abc08
JH
1918 if (!err)
1919 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1920 break;
1921
1da177e4 1922 case HCISETLINKPOL:
01178cd4
JH
1923 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1924 HCI_INIT_TIMEOUT);
1da177e4
LT
1925 break;
1926
1927 case HCISETLINKMODE:
e4e8e37c
MH
1928 hdev->link_mode = ((__u16) dr.dev_opt) &
1929 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1930 break;
1931
1932 case HCISETPTYPE:
1933 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1934 break;
1935
1936 case HCISETACLMTU:
e4e8e37c
MH
1937 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1938 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1939 break;
1940
1941 case HCISETSCOMTU:
e4e8e37c
MH
1942 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1943 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1944 break;
1945
1946 default:
1947 err = -EINVAL;
1948 break;
1949 }
e4e8e37c 1950
0736cfa8 1951done:
1da177e4
LT
1952 hci_dev_put(hdev);
1953 return err;
1954}
1955
1956int hci_get_dev_list(void __user *arg)
1957{
8035ded4 1958 struct hci_dev *hdev;
1da177e4
LT
1959 struct hci_dev_list_req *dl;
1960 struct hci_dev_req *dr;
1da177e4
LT
1961 int n = 0, size, err;
1962 __u16 dev_num;
1963
1964 if (get_user(dev_num, (__u16 __user *) arg))
1965 return -EFAULT;
1966
1967 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1968 return -EINVAL;
1969
1970 size = sizeof(*dl) + dev_num * sizeof(*dr);
1971
70f23020
AE
1972 dl = kzalloc(size, GFP_KERNEL);
1973 if (!dl)
1da177e4
LT
1974 return -ENOMEM;
1975
1976 dr = dl->dev_req;
1977
f20d09d5 1978 read_lock(&hci_dev_list_lock);
8035ded4 1979 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 1980 unsigned long flags = hdev->flags;
c542a06c 1981
2e84d8db
MH
1982 /* When the auto-off is configured it means the transport
1983 * is running, but in that case still indicate that the
1984 * device is actually down.
1985 */
1986 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1987 flags &= ~BIT(HCI_UP);
c542a06c 1988
1da177e4 1989 (dr + n)->dev_id = hdev->id;
2e84d8db 1990 (dr + n)->dev_opt = flags;
c542a06c 1991
1da177e4
LT
1992 if (++n >= dev_num)
1993 break;
1994 }
f20d09d5 1995 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1996
1997 dl->dev_num = n;
1998 size = sizeof(*dl) + n * sizeof(*dr);
1999
2000 err = copy_to_user(arg, dl, size);
2001 kfree(dl);
2002
2003 return err ? -EFAULT : 0;
2004}
2005
2006int hci_get_dev_info(void __user *arg)
2007{
2008 struct hci_dev *hdev;
2009 struct hci_dev_info di;
2e84d8db 2010 unsigned long flags;
1da177e4
LT
2011 int err = 0;
2012
2013 if (copy_from_user(&di, arg, sizeof(di)))
2014 return -EFAULT;
2015
70f23020
AE
2016 hdev = hci_dev_get(di.dev_id);
2017 if (!hdev)
1da177e4
LT
2018 return -ENODEV;
2019
2e84d8db
MH
2020 /* When the auto-off is configured it means the transport
2021 * is running, but in that case still indicate that the
2022 * device is actually down.
2023 */
2024 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2025 flags = hdev->flags & ~BIT(HCI_UP);
2026 else
2027 flags = hdev->flags;
c542a06c 2028
1da177e4
LT
2029 strcpy(di.name, hdev->name);
2030 di.bdaddr = hdev->bdaddr;
60f2a3ed 2031 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2032 di.flags = flags;
1da177e4 2033 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2034 if (lmp_bredr_capable(hdev)) {
2035 di.acl_mtu = hdev->acl_mtu;
2036 di.acl_pkts = hdev->acl_pkts;
2037 di.sco_mtu = hdev->sco_mtu;
2038 di.sco_pkts = hdev->sco_pkts;
2039 } else {
2040 di.acl_mtu = hdev->le_mtu;
2041 di.acl_pkts = hdev->le_pkts;
2042 di.sco_mtu = 0;
2043 di.sco_pkts = 0;
2044 }
1da177e4
LT
2045 di.link_policy = hdev->link_policy;
2046 di.link_mode = hdev->link_mode;
2047
2048 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2049 memcpy(&di.features, &hdev->features, sizeof(di.features));
2050
2051 if (copy_to_user(arg, &di, sizeof(di)))
2052 err = -EFAULT;
2053
2054 hci_dev_put(hdev);
2055
2056 return err;
2057}
2058
2059/* ---- Interface to HCI drivers ---- */
2060
611b30f7
MH
2061static int hci_rfkill_set_block(void *data, bool blocked)
2062{
2063 struct hci_dev *hdev = data;
2064
2065 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2066
0736cfa8
MH
2067 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2068 return -EBUSY;
2069
5e130367
JH
2070 if (blocked) {
2071 set_bit(HCI_RFKILLED, &hdev->dev_flags);
d603b76b
MH
2072 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2073 !test_bit(HCI_CONFIG, &hdev->dev_flags))
bf543036 2074 hci_dev_do_close(hdev);
5e130367
JH
2075 } else {
2076 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2077 }
611b30f7
MH
2078
2079 return 0;
2080}
2081
2082static const struct rfkill_ops hci_rfkill_ops = {
2083 .set_block = hci_rfkill_set_block,
2084};
2085
ab81cbf9
JH
2086static void hci_power_on(struct work_struct *work)
2087{
2088 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2089 int err;
ab81cbf9
JH
2090
2091 BT_DBG("%s", hdev->name);
2092
cbed0ca1 2093 err = hci_dev_do_open(hdev);
96570ffc 2094 if (err < 0) {
3ad67582 2095 hci_dev_lock(hdev);
96570ffc 2096 mgmt_set_powered_failed(hdev, err);
3ad67582 2097 hci_dev_unlock(hdev);
ab81cbf9 2098 return;
96570ffc 2099 }
ab81cbf9 2100
a5c8f270
MH
2101 /* During the HCI setup phase, a few error conditions are
2102 * ignored and they need to be checked now. If they are still
2103 * valid, it is important to turn the device back off.
2104 */
2105 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 2106 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
2107 (hdev->dev_type == HCI_BREDR &&
2108 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2109 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2110 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2111 hci_dev_do_close(hdev);
2112 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2113 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2114 HCI_AUTO_OFF_TIMEOUT);
bf543036 2115 }
ab81cbf9 2116
fee746b0 2117 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
2118 /* For unconfigured devices, set the HCI_RAW flag
2119 * so that userspace can easily identify them.
4a964404
MH
2120 */
2121 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2122 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2123
2124 /* For fully configured devices, this will send
2125 * the Index Added event. For unconfigured devices,
2126 * it will send Unconfigued Index Added event.
2127 *
2128 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2129 * and no event will be send.
2130 */
2131 mgmt_index_added(hdev);
d603b76b 2132 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
5ea234d3
MH
2133 /* When the controller is now configured, then it
2134 * is important to clear the HCI_RAW flag.
2135 */
2136 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2137 clear_bit(HCI_RAW, &hdev->flags);
2138
d603b76b
MH
2139 /* Powering on the controller with HCI_CONFIG set only
2140 * happens with the transition from unconfigured to
2141 * configured. This will send the Index Added event.
2142 */
744cf19e 2143 mgmt_index_added(hdev);
fee746b0 2144 }
ab81cbf9
JH
2145}
2146
2147static void hci_power_off(struct work_struct *work)
2148{
3243553f 2149 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2150 power_off.work);
ab81cbf9
JH
2151
2152 BT_DBG("%s", hdev->name);
2153
8ee56540 2154 hci_dev_do_close(hdev);
ab81cbf9
JH
2155}
2156
16ab91ab
JH
2157static void hci_discov_off(struct work_struct *work)
2158{
2159 struct hci_dev *hdev;
16ab91ab
JH
2160
2161 hdev = container_of(work, struct hci_dev, discov_off.work);
2162
2163 BT_DBG("%s", hdev->name);
2164
d1967ff8 2165 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2166}
2167
35f7498a 2168void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2169{
4821002c 2170 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2171
4821002c
JH
2172 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2173 list_del(&uuid->list);
2aeb9a1a
JH
2174 kfree(uuid);
2175 }
2aeb9a1a
JH
2176}
2177
35f7498a 2178void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2179{
0378b597 2180 struct link_key *key;
55ed8ca1 2181
0378b597
JH
2182 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2183 list_del_rcu(&key->list);
2184 kfree_rcu(key, rcu);
55ed8ca1 2185 }
55ed8ca1
JH
2186}
2187
35f7498a 2188void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2189{
970d0f1b 2190 struct smp_ltk *k;
b899efaf 2191
970d0f1b
JH
2192 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2193 list_del_rcu(&k->list);
2194 kfree_rcu(k, rcu);
b899efaf 2195 }
b899efaf
VCG
2196}
2197
970c4e46
JH
2198void hci_smp_irks_clear(struct hci_dev *hdev)
2199{
adae20cb 2200 struct smp_irk *k;
970c4e46 2201
adae20cb
JH
2202 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2203 list_del_rcu(&k->list);
2204 kfree_rcu(k, rcu);
970c4e46
JH
2205 }
2206}
2207
55ed8ca1
JH
2208struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2209{
8035ded4 2210 struct link_key *k;
55ed8ca1 2211
0378b597
JH
2212 rcu_read_lock();
2213 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2214 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2215 rcu_read_unlock();
55ed8ca1 2216 return k;
0378b597
JH
2217 }
2218 }
2219 rcu_read_unlock();
55ed8ca1
JH
2220
2221 return NULL;
2222}
2223
745c0ce3 2224static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2225 u8 key_type, u8 old_key_type)
d25e28ab
JH
2226{
2227 /* Legacy key */
2228 if (key_type < 0x03)
745c0ce3 2229 return true;
d25e28ab
JH
2230
2231 /* Debug keys are insecure so don't store them persistently */
2232 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2233 return false;
d25e28ab
JH
2234
2235 /* Changed combination key and there's no previous one */
2236 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2237 return false;
d25e28ab
JH
2238
2239 /* Security mode 3 case */
2240 if (!conn)
745c0ce3 2241 return true;
d25e28ab 2242
e3befab9
JH
2243 /* BR/EDR key derived using SC from an LE link */
2244 if (conn->type == LE_LINK)
2245 return true;
2246
d25e28ab
JH
2247 /* Neither local nor remote side had no-bonding as requirement */
2248 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2249 return true;
d25e28ab
JH
2250
2251 /* Local side had dedicated bonding as requirement */
2252 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2253 return true;
d25e28ab
JH
2254
2255 /* Remote side had dedicated bonding as requirement */
2256 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2257 return true;
d25e28ab
JH
2258
2259 /* If none of the above criteria match, then don't store the key
2260 * persistently */
745c0ce3 2261 return false;
d25e28ab
JH
2262}
2263
e804d25d 2264static u8 ltk_role(u8 type)
98a0b845 2265{
e804d25d
JH
2266 if (type == SMP_LTK)
2267 return HCI_ROLE_MASTER;
98a0b845 2268
e804d25d 2269 return HCI_ROLE_SLAVE;
98a0b845
JH
2270}
2271
f3a73d97
JH
2272struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2273 u8 addr_type, u8 role)
75d262c2 2274{
c9839a11 2275 struct smp_ltk *k;
75d262c2 2276
970d0f1b
JH
2277 rcu_read_lock();
2278 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2279 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2280 continue;
2281
923e2414 2282 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2283 rcu_read_unlock();
75d262c2 2284 return k;
970d0f1b
JH
2285 }
2286 }
2287 rcu_read_unlock();
75d262c2
VCG
2288
2289 return NULL;
2290}
75d262c2 2291
970c4e46
JH
2292struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2293{
2294 struct smp_irk *irk;
2295
adae20cb
JH
2296 rcu_read_lock();
2297 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2298 if (!bacmp(&irk->rpa, rpa)) {
2299 rcu_read_unlock();
970c4e46 2300 return irk;
adae20cb 2301 }
970c4e46
JH
2302 }
2303
adae20cb 2304 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2305 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2306 bacpy(&irk->rpa, rpa);
adae20cb 2307 rcu_read_unlock();
970c4e46
JH
2308 return irk;
2309 }
2310 }
adae20cb 2311 rcu_read_unlock();
970c4e46
JH
2312
2313 return NULL;
2314}
2315
2316struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2317 u8 addr_type)
2318{
2319 struct smp_irk *irk;
2320
6cfc9988
JH
2321 /* Identity Address must be public or static random */
2322 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2323 return NULL;
2324
adae20cb
JH
2325 rcu_read_lock();
2326 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2327 if (addr_type == irk->addr_type &&
adae20cb
JH
2328 bacmp(bdaddr, &irk->bdaddr) == 0) {
2329 rcu_read_unlock();
970c4e46 2330 return irk;
adae20cb 2331 }
970c4e46 2332 }
adae20cb 2333 rcu_read_unlock();
970c4e46
JH
2334
2335 return NULL;
2336}
2337
567fa2aa 2338struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2339 bdaddr_t *bdaddr, u8 *val, u8 type,
2340 u8 pin_len, bool *persistent)
55ed8ca1
JH
2341{
2342 struct link_key *key, *old_key;
745c0ce3 2343 u8 old_key_type;
55ed8ca1
JH
2344
2345 old_key = hci_find_link_key(hdev, bdaddr);
2346 if (old_key) {
2347 old_key_type = old_key->type;
2348 key = old_key;
2349 } else {
12adcf3a 2350 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2351 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2352 if (!key)
567fa2aa 2353 return NULL;
0378b597 2354 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2355 }
2356
6ed93dc6 2357 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2358
d25e28ab
JH
2359 /* Some buggy controller combinations generate a changed
2360 * combination key for legacy pairing even when there's no
2361 * previous key */
2362 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2363 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2364 type = HCI_LK_COMBINATION;
655fe6ec
JH
2365 if (conn)
2366 conn->key_type = type;
2367 }
d25e28ab 2368
55ed8ca1 2369 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2370 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2371 key->pin_len = pin_len;
2372
b6020ba0 2373 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2374 key->type = old_key_type;
4748fed2
JH
2375 else
2376 key->type = type;
2377
7652ff6a
JH
2378 if (persistent)
2379 *persistent = hci_persistent_key(hdev, conn, type,
2380 old_key_type);
4df378a1 2381
567fa2aa 2382 return key;
55ed8ca1
JH
2383}
2384
ca9142b8 2385struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2386 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2387 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2388{
c9839a11 2389 struct smp_ltk *key, *old_key;
e804d25d 2390 u8 role = ltk_role(type);
75d262c2 2391
f3a73d97 2392 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2393 if (old_key)
75d262c2 2394 key = old_key;
c9839a11 2395 else {
0a14ab41 2396 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2397 if (!key)
ca9142b8 2398 return NULL;
970d0f1b 2399 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2400 }
2401
75d262c2 2402 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2403 key->bdaddr_type = addr_type;
2404 memcpy(key->val, tk, sizeof(key->val));
2405 key->authenticated = authenticated;
2406 key->ediv = ediv;
fe39c7b2 2407 key->rand = rand;
c9839a11
VCG
2408 key->enc_size = enc_size;
2409 key->type = type;
75d262c2 2410
ca9142b8 2411 return key;
75d262c2
VCG
2412}
2413
ca9142b8
JH
2414struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2415 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2416{
2417 struct smp_irk *irk;
2418
2419 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2420 if (!irk) {
2421 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2422 if (!irk)
ca9142b8 2423 return NULL;
970c4e46
JH
2424
2425 bacpy(&irk->bdaddr, bdaddr);
2426 irk->addr_type = addr_type;
2427
adae20cb 2428 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2429 }
2430
2431 memcpy(irk->val, val, 16);
2432 bacpy(&irk->rpa, rpa);
2433
ca9142b8 2434 return irk;
970c4e46
JH
2435}
2436
55ed8ca1
JH
2437int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2438{
2439 struct link_key *key;
2440
2441 key = hci_find_link_key(hdev, bdaddr);
2442 if (!key)
2443 return -ENOENT;
2444
6ed93dc6 2445 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2446
0378b597
JH
2447 list_del_rcu(&key->list);
2448 kfree_rcu(key, rcu);
55ed8ca1
JH
2449
2450 return 0;
2451}
2452
e0b2b27e 2453int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2454{
970d0f1b 2455 struct smp_ltk *k;
c51ffa0b 2456 int removed = 0;
b899efaf 2457
970d0f1b 2458 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2459 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2460 continue;
2461
6ed93dc6 2462 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2463
970d0f1b
JH
2464 list_del_rcu(&k->list);
2465 kfree_rcu(k, rcu);
c51ffa0b 2466 removed++;
b899efaf
VCG
2467 }
2468
c51ffa0b 2469 return removed ? 0 : -ENOENT;
b899efaf
VCG
2470}
2471
a7ec7338
JH
2472void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2473{
adae20cb 2474 struct smp_irk *k;
a7ec7338 2475
adae20cb 2476 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2477 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2478 continue;
2479
2480 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2481
adae20cb
JH
2482 list_del_rcu(&k->list);
2483 kfree_rcu(k, rcu);
a7ec7338
JH
2484 }
2485}
2486
6bd32326 2487/* HCI command timer function */
65cc2b49 2488static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2489{
65cc2b49
MH
2490 struct hci_dev *hdev = container_of(work, struct hci_dev,
2491 cmd_timer.work);
6bd32326 2492
bda4f23a
AE
2493 if (hdev->sent_cmd) {
2494 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2495 u16 opcode = __le16_to_cpu(sent->opcode);
2496
2497 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2498 } else {
2499 BT_ERR("%s command tx timeout", hdev->name);
2500 }
2501
6bd32326 2502 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2503 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2504}
2505
2763eda6 2506struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2507 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2508{
2509 struct oob_data *data;
2510
6928a924
JH
2511 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2512 if (bacmp(bdaddr, &data->bdaddr) != 0)
2513 continue;
2514 if (data->bdaddr_type != bdaddr_type)
2515 continue;
2516 return data;
2517 }
2763eda6
SJ
2518
2519 return NULL;
2520}
2521
6928a924
JH
2522int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2523 u8 bdaddr_type)
2763eda6
SJ
2524{
2525 struct oob_data *data;
2526
6928a924 2527 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2528 if (!data)
2529 return -ENOENT;
2530
6928a924 2531 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2532
2533 list_del(&data->list);
2534 kfree(data);
2535
2536 return 0;
2537}
2538
35f7498a 2539void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2540{
2541 struct oob_data *data, *n;
2542
2543 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2544 list_del(&data->list);
2545 kfree(data);
2546 }
2763eda6
SJ
2547}
2548
0798872e 2549int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2550 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2551 u8 *hash256, u8 *rand256)
2763eda6
SJ
2552{
2553 struct oob_data *data;
2554
6928a924 2555 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2556 if (!data) {
0a14ab41 2557 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2558 if (!data)
2559 return -ENOMEM;
2560
2561 bacpy(&data->bdaddr, bdaddr);
6928a924 2562 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2563 list_add(&data->list, &hdev->remote_oob_data);
2564 }
2565
81328d5c
JH
2566 if (hash192 && rand192) {
2567 memcpy(data->hash192, hash192, sizeof(data->hash192));
2568 memcpy(data->rand192, rand192, sizeof(data->rand192));
2569 } else {
2570 memset(data->hash192, 0, sizeof(data->hash192));
2571 memset(data->rand192, 0, sizeof(data->rand192));
0798872e
MH
2572 }
2573
81328d5c
JH
2574 if (hash256 && rand256) {
2575 memcpy(data->hash256, hash256, sizeof(data->hash256));
2576 memcpy(data->rand256, rand256, sizeof(data->rand256));
2577 } else {
2578 memset(data->hash256, 0, sizeof(data->hash256));
2579 memset(data->rand256, 0, sizeof(data->rand256));
2580 }
0798872e 2581
6ed93dc6 2582 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2583
2584 return 0;
2585}
2586
dcc36c16 2587struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2588 bdaddr_t *bdaddr, u8 type)
b2a66aad 2589{
8035ded4 2590 struct bdaddr_list *b;
b2a66aad 2591
dcc36c16 2592 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2593 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2594 return b;
b9ee0a78 2595 }
b2a66aad
AJ
2596
2597 return NULL;
2598}
2599
dcc36c16 2600void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
2601{
2602 struct list_head *p, *n;
2603
dcc36c16 2604 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 2605 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2606
2607 list_del(p);
2608 kfree(b);
2609 }
b2a66aad
AJ
2610}
2611
dcc36c16 2612int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2613{
2614 struct bdaddr_list *entry;
b2a66aad 2615
b9ee0a78 2616 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2617 return -EBADF;
2618
dcc36c16 2619 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2620 return -EEXIST;
b2a66aad 2621
27f70f3e 2622 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2623 if (!entry)
2624 return -ENOMEM;
b2a66aad
AJ
2625
2626 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2627 entry->bdaddr_type = type;
b2a66aad 2628
dcc36c16 2629 list_add(&entry->list, list);
b2a66aad 2630
2a8357f2 2631 return 0;
b2a66aad
AJ
2632}
2633
dcc36c16 2634int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2635{
2636 struct bdaddr_list *entry;
b2a66aad 2637
35f7498a 2638 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2639 hci_bdaddr_list_clear(list);
35f7498a
JH
2640 return 0;
2641 }
b2a66aad 2642
dcc36c16 2643 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2644 if (!entry)
2645 return -ENOENT;
2646
2647 list_del(&entry->list);
2648 kfree(entry);
2649
2650 return 0;
2651}
2652
15819a70
AG
2653/* This function requires the caller holds hdev->lock */
2654struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2655 bdaddr_t *addr, u8 addr_type)
2656{
2657 struct hci_conn_params *params;
2658
738f6185
JH
2659 /* The conn params list only contains identity addresses */
2660 if (!hci_is_identity_address(addr, addr_type))
2661 return NULL;
2662
15819a70
AG
2663 list_for_each_entry(params, &hdev->le_conn_params, list) {
2664 if (bacmp(&params->addr, addr) == 0 &&
2665 params->addr_type == addr_type) {
2666 return params;
2667 }
2668 }
2669
2670 return NULL;
2671}
2672
4b10966f 2673/* This function requires the caller holds hdev->lock */
501f8827
JH
2674struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2675 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2676{
912b42ef 2677 struct hci_conn_params *param;
a9b0a04c 2678
738f6185
JH
2679 /* The list only contains identity addresses */
2680 if (!hci_is_identity_address(addr, addr_type))
2681 return NULL;
a9b0a04c 2682
501f8827 2683 list_for_each_entry(param, list, action) {
912b42ef
JH
2684 if (bacmp(&param->addr, addr) == 0 &&
2685 param->addr_type == addr_type)
2686 return param;
4b10966f
MH
2687 }
2688
2689 return NULL;
a9b0a04c
AG
2690}
2691
15819a70 2692/* This function requires the caller holds hdev->lock */
51d167c0
MH
2693struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2694 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2695{
2696 struct hci_conn_params *params;
2697
c46245b3 2698 if (!hci_is_identity_address(addr, addr_type))
51d167c0 2699 return NULL;
a9b0a04c 2700
15819a70 2701 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2702 if (params)
51d167c0 2703 return params;
15819a70
AG
2704
2705 params = kzalloc(sizeof(*params), GFP_KERNEL);
2706 if (!params) {
2707 BT_ERR("Out of memory");
51d167c0 2708 return NULL;
15819a70
AG
2709 }
2710
2711 bacpy(&params->addr, addr);
2712 params->addr_type = addr_type;
cef952ce
AG
2713
2714 list_add(&params->list, &hdev->le_conn_params);
93450c75 2715 INIT_LIST_HEAD(&params->action);
cef952ce 2716
bf5b3c8b
MH
2717 params->conn_min_interval = hdev->le_conn_min_interval;
2718 params->conn_max_interval = hdev->le_conn_max_interval;
2719 params->conn_latency = hdev->le_conn_latency;
2720 params->supervision_timeout = hdev->le_supv_timeout;
2721 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2722
2723 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2724
51d167c0 2725 return params;
bf5b3c8b
MH
2726}
2727
f6c63249 2728static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2729{
f8aaf9b6 2730 if (params->conn) {
f161dd41 2731 hci_conn_drop(params->conn);
f8aaf9b6
JH
2732 hci_conn_put(params->conn);
2733 }
f161dd41 2734
95305baa 2735 list_del(&params->action);
15819a70
AG
2736 list_del(&params->list);
2737 kfree(params);
f6c63249
JH
2738}
2739
2740/* This function requires the caller holds hdev->lock */
2741void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2742{
2743 struct hci_conn_params *params;
2744
2745 params = hci_conn_params_lookup(hdev, addr, addr_type);
2746 if (!params)
2747 return;
2748
2749 hci_conn_params_free(params);
15819a70 2750
95305baa
JH
2751 hci_update_background_scan(hdev);
2752
15819a70
AG
2753 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2754}
2755
2756/* This function requires the caller holds hdev->lock */
55af49a8 2757void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2758{
2759 struct hci_conn_params *params, *tmp;
2760
2761 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
2762 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2763 continue;
15819a70
AG
2764 list_del(&params->list);
2765 kfree(params);
2766 }
2767
55af49a8 2768 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
2769}
2770
2771/* This function requires the caller holds hdev->lock */
373110c5 2772void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 2773{
15819a70 2774 struct hci_conn_params *params, *tmp;
77a77a30 2775
f6c63249
JH
2776 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2777 hci_conn_params_free(params);
77a77a30 2778
a4790dbd 2779 hci_update_background_scan(hdev);
77a77a30 2780
15819a70 2781 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
2782}
2783
4c87eaab 2784static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2785{
4c87eaab
AG
2786 if (status) {
2787 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2788
4c87eaab
AG
2789 hci_dev_lock(hdev);
2790 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2791 hci_dev_unlock(hdev);
2792 return;
2793 }
7ba8b4be
AG
2794}
2795
4c87eaab 2796static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2797{
4c87eaab
AG
2798 /* General inquiry access code (GIAC) */
2799 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2800 struct hci_request req;
2801 struct hci_cp_inquiry cp;
7ba8b4be
AG
2802 int err;
2803
4c87eaab
AG
2804 if (status) {
2805 BT_ERR("Failed to disable LE scanning: status %d", status);
2806 return;
2807 }
7ba8b4be 2808
4c87eaab
AG
2809 switch (hdev->discovery.type) {
2810 case DISCOV_TYPE_LE:
2811 hci_dev_lock(hdev);
2812 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2813 hci_dev_unlock(hdev);
2814 break;
7ba8b4be 2815
4c87eaab
AG
2816 case DISCOV_TYPE_INTERLEAVED:
2817 hci_req_init(&req, hdev);
7ba8b4be 2818
4c87eaab
AG
2819 memset(&cp, 0, sizeof(cp));
2820 memcpy(&cp.lap, lap, sizeof(cp.lap));
2821 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2822 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2823
4c87eaab 2824 hci_dev_lock(hdev);
7dbfac1d 2825
4c87eaab 2826 hci_inquiry_cache_flush(hdev);
7dbfac1d 2827
4c87eaab
AG
2828 err = hci_req_run(&req, inquiry_complete);
2829 if (err) {
2830 BT_ERR("Inquiry request failed: err %d", err);
2831 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2832 }
7dbfac1d 2833
4c87eaab
AG
2834 hci_dev_unlock(hdev);
2835 break;
7dbfac1d 2836 }
7dbfac1d
AG
2837}
2838
7ba8b4be
AG
2839static void le_scan_disable_work(struct work_struct *work)
2840{
2841 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2842 le_scan_disable.work);
4c87eaab
AG
2843 struct hci_request req;
2844 int err;
7ba8b4be
AG
2845
2846 BT_DBG("%s", hdev->name);
2847
4c87eaab 2848 hci_req_init(&req, hdev);
28b75a89 2849
b1efcc28 2850 hci_req_add_le_scan_disable(&req);
28b75a89 2851
4c87eaab
AG
2852 err = hci_req_run(&req, le_scan_disable_work_complete);
2853 if (err)
2854 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2855}
2856
a1f4c318
JH
2857/* Copy the Identity Address of the controller.
2858 *
2859 * If the controller has a public BD_ADDR, then by default use that one.
2860 * If this is a LE only controller without a public address, default to
2861 * the static random address.
2862 *
2863 * For debugging purposes it is possible to force controllers with a
2864 * public address to use the static random address instead.
50b5b952
MH
2865 *
2866 * In case BR/EDR has been disabled on a dual-mode controller and
2867 * userspace has configured a static address, then that address
2868 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
2869 */
2870void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2871 u8 *bdaddr_type)
2872{
111902f7 2873 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
50b5b952
MH
2874 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2875 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2876 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
2877 bacpy(bdaddr, &hdev->static_addr);
2878 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2879 } else {
2880 bacpy(bdaddr, &hdev->bdaddr);
2881 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2882 }
2883}
2884
9be0dab7
DH
2885/* Alloc HCI device */
2886struct hci_dev *hci_alloc_dev(void)
2887{
2888 struct hci_dev *hdev;
2889
27f70f3e 2890 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
2891 if (!hdev)
2892 return NULL;
2893
b1b813d4
DH
2894 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2895 hdev->esco_type = (ESCO_HV1);
2896 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2897 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2898 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 2899 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
2900 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2901 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2902
b1b813d4
DH
2903 hdev->sniff_max_interval = 800;
2904 hdev->sniff_min_interval = 80;
2905
3f959d46 2906 hdev->le_adv_channel_map = 0x07;
628531c9
GL
2907 hdev->le_adv_min_interval = 0x0800;
2908 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
2909 hdev->le_scan_interval = 0x0060;
2910 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
2911 hdev->le_conn_min_interval = 0x0028;
2912 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
2913 hdev->le_conn_latency = 0x0000;
2914 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
2915 hdev->le_def_tx_len = 0x001b;
2916 hdev->le_def_tx_time = 0x0148;
2917 hdev->le_max_tx_len = 0x001b;
2918 hdev->le_max_tx_time = 0x0148;
2919 hdev->le_max_rx_len = 0x001b;
2920 hdev->le_max_rx_time = 0x0148;
bef64738 2921
d6bfd59c 2922 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 2923 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
2924 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2925 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 2926
b1b813d4
DH
2927 mutex_init(&hdev->lock);
2928 mutex_init(&hdev->req_lock);
2929
2930 INIT_LIST_HEAD(&hdev->mgmt_pending);
2931 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 2932 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
2933 INIT_LIST_HEAD(&hdev->uuids);
2934 INIT_LIST_HEAD(&hdev->link_keys);
2935 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 2936 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 2937 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 2938 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 2939 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 2940 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 2941 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 2942 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2943
2944 INIT_WORK(&hdev->rx_work, hci_rx_work);
2945 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2946 INIT_WORK(&hdev->tx_work, hci_tx_work);
2947 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2948
b1b813d4
DH
2949 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2950 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2951 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2952
b1b813d4
DH
2953 skb_queue_head_init(&hdev->rx_q);
2954 skb_queue_head_init(&hdev->cmd_q);
2955 skb_queue_head_init(&hdev->raw_q);
2956
2957 init_waitqueue_head(&hdev->req_wait_q);
2958
65cc2b49 2959 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 2960
b1b813d4
DH
2961 hci_init_sysfs(hdev);
2962 discovery_init(hdev);
9be0dab7
DH
2963
2964 return hdev;
2965}
2966EXPORT_SYMBOL(hci_alloc_dev);
2967
2968/* Free HCI device */
2969void hci_free_dev(struct hci_dev *hdev)
2970{
9be0dab7
DH
2971 /* will free via device release */
2972 put_device(&hdev->dev);
2973}
2974EXPORT_SYMBOL(hci_free_dev);
2975
1da177e4
LT
2976/* Register HCI device */
2977int hci_register_dev(struct hci_dev *hdev)
2978{
b1b813d4 2979 int id, error;
1da177e4 2980
74292d5a 2981 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
2982 return -EINVAL;
2983
08add513
MM
2984 /* Do not allow HCI_AMP devices to register at index 0,
2985 * so the index can be used as the AMP controller ID.
2986 */
3df92b31
SL
2987 switch (hdev->dev_type) {
2988 case HCI_BREDR:
2989 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2990 break;
2991 case HCI_AMP:
2992 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2993 break;
2994 default:
2995 return -EINVAL;
1da177e4 2996 }
8e87d142 2997
3df92b31
SL
2998 if (id < 0)
2999 return id;
3000
1da177e4
LT
3001 sprintf(hdev->name, "hci%d", id);
3002 hdev->id = id;
2d8b3a11
AE
3003
3004 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3005
d8537548
KC
3006 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3007 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3008 if (!hdev->workqueue) {
3009 error = -ENOMEM;
3010 goto err;
3011 }
f48fd9c8 3012
d8537548
KC
3013 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3014 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3015 if (!hdev->req_workqueue) {
3016 destroy_workqueue(hdev->workqueue);
3017 error = -ENOMEM;
3018 goto err;
3019 }
3020
0153e2ec
MH
3021 if (!IS_ERR_OR_NULL(bt_debugfs))
3022 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3023
bdc3e0f1
MH
3024 dev_set_name(&hdev->dev, "%s", hdev->name);
3025
3026 error = device_add(&hdev->dev);
33ca954d 3027 if (error < 0)
54506918 3028 goto err_wqueue;
1da177e4 3029
611b30f7 3030 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3031 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3032 hdev);
611b30f7
MH
3033 if (hdev->rfkill) {
3034 if (rfkill_register(hdev->rfkill) < 0) {
3035 rfkill_destroy(hdev->rfkill);
3036 hdev->rfkill = NULL;
3037 }
3038 }
3039
5e130367
JH
3040 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3041 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3042
a8b2d5c2 3043 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3044 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3045
01cd3404 3046 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3047 /* Assume BR/EDR support until proven otherwise (such as
3048 * through reading supported features during init.
3049 */
3050 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3051 }
ce2be9ac 3052
fcee3377
GP
3053 write_lock(&hci_dev_list_lock);
3054 list_add(&hdev->list, &hci_dev_list);
3055 write_unlock(&hci_dev_list_lock);
3056
4a964404
MH
3057 /* Devices that are marked for raw-only usage are unconfigured
3058 * and should not be included in normal operation.
fee746b0
MH
3059 */
3060 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 3061 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 3062
1da177e4 3063 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3064 hci_dev_hold(hdev);
1da177e4 3065
19202573 3066 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3067
1da177e4 3068 return id;
f48fd9c8 3069
33ca954d
DH
3070err_wqueue:
3071 destroy_workqueue(hdev->workqueue);
6ead1bbc 3072 destroy_workqueue(hdev->req_workqueue);
33ca954d 3073err:
3df92b31 3074 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3075
33ca954d 3076 return error;
1da177e4
LT
3077}
3078EXPORT_SYMBOL(hci_register_dev);
3079
3080/* Unregister HCI device */
59735631 3081void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3082{
3df92b31 3083 int i, id;
ef222013 3084
c13854ce 3085 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3086
94324962
JH
3087 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3088
3df92b31
SL
3089 id = hdev->id;
3090
f20d09d5 3091 write_lock(&hci_dev_list_lock);
1da177e4 3092 list_del(&hdev->list);
f20d09d5 3093 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3094
3095 hci_dev_do_close(hdev);
3096
cd4c5391 3097 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3098 kfree_skb(hdev->reassembly[i]);
3099
b9b5ef18
GP
3100 cancel_work_sync(&hdev->power_on);
3101
ab81cbf9 3102 if (!test_bit(HCI_INIT, &hdev->flags) &&
d603b76b
MH
3103 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3104 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
09fd0de5 3105 hci_dev_lock(hdev);
744cf19e 3106 mgmt_index_removed(hdev);
09fd0de5 3107 hci_dev_unlock(hdev);
56e5cb86 3108 }
ab81cbf9 3109
2e58ef3e
JH
3110 /* mgmt_index_removed should take care of emptying the
3111 * pending list */
3112 BUG_ON(!list_empty(&hdev->mgmt_pending));
3113
1da177e4
LT
3114 hci_notify(hdev, HCI_DEV_UNREG);
3115
611b30f7
MH
3116 if (hdev->rfkill) {
3117 rfkill_unregister(hdev->rfkill);
3118 rfkill_destroy(hdev->rfkill);
3119 }
3120
711eafe3 3121 smp_unregister(hdev);
99780a7b 3122
bdc3e0f1 3123 device_del(&hdev->dev);
147e2d59 3124
0153e2ec
MH
3125 debugfs_remove_recursive(hdev->debugfs);
3126
f48fd9c8 3127 destroy_workqueue(hdev->workqueue);
6ead1bbc 3128 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3129
09fd0de5 3130 hci_dev_lock(hdev);
dcc36c16 3131 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3132 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3133 hci_uuids_clear(hdev);
55ed8ca1 3134 hci_link_keys_clear(hdev);
b899efaf 3135 hci_smp_ltks_clear(hdev);
970c4e46 3136 hci_smp_irks_clear(hdev);
2763eda6 3137 hci_remote_oob_data_clear(hdev);
dcc36c16 3138 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3139 hci_conn_params_clear_all(hdev);
22078800 3140 hci_discovery_filter_clear(hdev);
09fd0de5 3141 hci_dev_unlock(hdev);
e2e0cacb 3142
dc946bd8 3143 hci_dev_put(hdev);
3df92b31
SL
3144
3145 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3146}
3147EXPORT_SYMBOL(hci_unregister_dev);
3148
3149/* Suspend HCI device */
3150int hci_suspend_dev(struct hci_dev *hdev)
3151{
3152 hci_notify(hdev, HCI_DEV_SUSPEND);
3153 return 0;
3154}
3155EXPORT_SYMBOL(hci_suspend_dev);
3156
3157/* Resume HCI device */
3158int hci_resume_dev(struct hci_dev *hdev)
3159{
3160 hci_notify(hdev, HCI_DEV_RESUME);
3161 return 0;
3162}
3163EXPORT_SYMBOL(hci_resume_dev);
3164
75e0569f
MH
3165/* Reset HCI device */
3166int hci_reset_dev(struct hci_dev *hdev)
3167{
3168 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3169 struct sk_buff *skb;
3170
3171 skb = bt_skb_alloc(3, GFP_ATOMIC);
3172 if (!skb)
3173 return -ENOMEM;
3174
3175 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3176 memcpy(skb_put(skb, 3), hw_err, 3);
3177
3178 /* Send Hardware Error to upper stack */
3179 return hci_recv_frame(hdev, skb);
3180}
3181EXPORT_SYMBOL(hci_reset_dev);
3182
76bca880 3183/* Receive frame from HCI drivers */
e1a26170 3184int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3185{
76bca880 3186 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3187 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3188 kfree_skb(skb);
3189 return -ENXIO;
3190 }
3191
d82603c6 3192 /* Incoming skb */
76bca880
MH
3193 bt_cb(skb)->incoming = 1;
3194
3195 /* Time stamp */
3196 __net_timestamp(skb);
3197
76bca880 3198 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3199 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3200
76bca880
MH
3201 return 0;
3202}
3203EXPORT_SYMBOL(hci_recv_frame);
3204
33e882a5 3205static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3206 int count, __u8 index)
33e882a5
SS
3207{
3208 int len = 0;
3209 int hlen = 0;
3210 int remain = count;
3211 struct sk_buff *skb;
3212 struct bt_skb_cb *scb;
3213
3214 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3215 index >= NUM_REASSEMBLY)
33e882a5
SS
3216 return -EILSEQ;
3217
3218 skb = hdev->reassembly[index];
3219
3220 if (!skb) {
3221 switch (type) {
3222 case HCI_ACLDATA_PKT:
3223 len = HCI_MAX_FRAME_SIZE;
3224 hlen = HCI_ACL_HDR_SIZE;
3225 break;
3226 case HCI_EVENT_PKT:
3227 len = HCI_MAX_EVENT_SIZE;
3228 hlen = HCI_EVENT_HDR_SIZE;
3229 break;
3230 case HCI_SCODATA_PKT:
3231 len = HCI_MAX_SCO_SIZE;
3232 hlen = HCI_SCO_HDR_SIZE;
3233 break;
3234 }
3235
1e429f38 3236 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3237 if (!skb)
3238 return -ENOMEM;
3239
3240 scb = (void *) skb->cb;
3241 scb->expect = hlen;
3242 scb->pkt_type = type;
3243
33e882a5
SS
3244 hdev->reassembly[index] = skb;
3245 }
3246
3247 while (count) {
3248 scb = (void *) skb->cb;
89bb46d0 3249 len = min_t(uint, scb->expect, count);
33e882a5
SS
3250
3251 memcpy(skb_put(skb, len), data, len);
3252
3253 count -= len;
3254 data += len;
3255 scb->expect -= len;
3256 remain = count;
3257
3258 switch (type) {
3259 case HCI_EVENT_PKT:
3260 if (skb->len == HCI_EVENT_HDR_SIZE) {
3261 struct hci_event_hdr *h = hci_event_hdr(skb);
3262 scb->expect = h->plen;
3263
3264 if (skb_tailroom(skb) < scb->expect) {
3265 kfree_skb(skb);
3266 hdev->reassembly[index] = NULL;
3267 return -ENOMEM;
3268 }
3269 }
3270 break;
3271
3272 case HCI_ACLDATA_PKT:
3273 if (skb->len == HCI_ACL_HDR_SIZE) {
3274 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3275 scb->expect = __le16_to_cpu(h->dlen);
3276
3277 if (skb_tailroom(skb) < scb->expect) {
3278 kfree_skb(skb);
3279 hdev->reassembly[index] = NULL;
3280 return -ENOMEM;
3281 }
3282 }
3283 break;
3284
3285 case HCI_SCODATA_PKT:
3286 if (skb->len == HCI_SCO_HDR_SIZE) {
3287 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3288 scb->expect = h->dlen;
3289
3290 if (skb_tailroom(skb) < scb->expect) {
3291 kfree_skb(skb);
3292 hdev->reassembly[index] = NULL;
3293 return -ENOMEM;
3294 }
3295 }
3296 break;
3297 }
3298
3299 if (scb->expect == 0) {
3300 /* Complete frame */
3301
3302 bt_cb(skb)->pkt_type = type;
e1a26170 3303 hci_recv_frame(hdev, skb);
33e882a5
SS
3304
3305 hdev->reassembly[index] = NULL;
3306 return remain;
3307 }
3308 }
3309
3310 return remain;
3311}
3312
99811510
SS
3313#define STREAM_REASSEMBLY 0
3314
3315int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3316{
3317 int type;
3318 int rem = 0;
3319
da5f6c37 3320 while (count) {
99811510
SS
3321 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3322
3323 if (!skb) {
3324 struct { char type; } *pkt;
3325
3326 /* Start of the frame */
3327 pkt = data;
3328 type = pkt->type;
3329
3330 data++;
3331 count--;
3332 } else
3333 type = bt_cb(skb)->pkt_type;
3334
1e429f38 3335 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3336 STREAM_REASSEMBLY);
99811510
SS
3337 if (rem < 0)
3338 return rem;
3339
3340 data += (count - rem);
3341 count = rem;
f81c6224 3342 }
99811510
SS
3343
3344 return rem;
3345}
3346EXPORT_SYMBOL(hci_recv_stream_fragment);
3347
1da177e4
LT
3348/* ---- Interface to upper protocols ---- */
3349
1da177e4
LT
3350int hci_register_cb(struct hci_cb *cb)
3351{
3352 BT_DBG("%p name %s", cb, cb->name);
3353
f20d09d5 3354 write_lock(&hci_cb_list_lock);
1da177e4 3355 list_add(&cb->list, &hci_cb_list);
f20d09d5 3356 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3357
3358 return 0;
3359}
3360EXPORT_SYMBOL(hci_register_cb);
3361
3362int hci_unregister_cb(struct hci_cb *cb)
3363{
3364 BT_DBG("%p name %s", cb, cb->name);
3365
f20d09d5 3366 write_lock(&hci_cb_list_lock);
1da177e4 3367 list_del(&cb->list);
f20d09d5 3368 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3369
3370 return 0;
3371}
3372EXPORT_SYMBOL(hci_unregister_cb);
3373
51086991 3374static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3375{
cdc52faa
MH
3376 int err;
3377
0d48d939 3378 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3379
cd82e61c
MH
3380 /* Time stamp */
3381 __net_timestamp(skb);
1da177e4 3382
cd82e61c
MH
3383 /* Send copy to monitor */
3384 hci_send_to_monitor(hdev, skb);
3385
3386 if (atomic_read(&hdev->promisc)) {
3387 /* Send copy to the sockets */
470fe1b5 3388 hci_send_to_sock(hdev, skb);
1da177e4
LT
3389 }
3390
3391 /* Get rid of skb owner, prior to sending to the driver. */
3392 skb_orphan(skb);
3393
cdc52faa
MH
3394 err = hdev->send(hdev, skb);
3395 if (err < 0) {
3396 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3397 kfree_skb(skb);
3398 }
1da177e4
LT
3399}
3400
899de765
MH
3401bool hci_req_pending(struct hci_dev *hdev)
3402{
3403 return (hdev->req_status == HCI_REQ_PEND);
3404}
3405
1ca3a9d0 3406/* Send HCI command */
07dc93dd
JH
3407int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3408 const void *param)
1ca3a9d0
JH
3409{
3410 struct sk_buff *skb;
3411
3412 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3413
3414 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3415 if (!skb) {
3416 BT_ERR("%s no memory for command", hdev->name);
3417 return -ENOMEM;
3418 }
3419
49c922bb 3420 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3421 * single-command requests.
3422 */
3423 bt_cb(skb)->req.start = true;
3424
1da177e4 3425 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3426 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3427
3428 return 0;
3429}
1da177e4
LT
3430
3431/* Get data from the previously sent command */
a9de9248 3432void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3433{
3434 struct hci_command_hdr *hdr;
3435
3436 if (!hdev->sent_cmd)
3437 return NULL;
3438
3439 hdr = (void *) hdev->sent_cmd->data;
3440
a9de9248 3441 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3442 return NULL;
3443
f0e09510 3444 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3445
3446 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3447}
3448
3449/* Send ACL data */
3450static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3451{
3452 struct hci_acl_hdr *hdr;
3453 int len = skb->len;
3454
badff6d0
ACM
3455 skb_push(skb, HCI_ACL_HDR_SIZE);
3456 skb_reset_transport_header(skb);
9c70220b 3457 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3458 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3459 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3460}
3461
ee22be7e 3462static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3463 struct sk_buff *skb, __u16 flags)
1da177e4 3464{
ee22be7e 3465 struct hci_conn *conn = chan->conn;
1da177e4
LT
3466 struct hci_dev *hdev = conn->hdev;
3467 struct sk_buff *list;
3468
087bfd99
GP
3469 skb->len = skb_headlen(skb);
3470 skb->data_len = 0;
3471
3472 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3473
3474 switch (hdev->dev_type) {
3475 case HCI_BREDR:
3476 hci_add_acl_hdr(skb, conn->handle, flags);
3477 break;
3478 case HCI_AMP:
3479 hci_add_acl_hdr(skb, chan->handle, flags);
3480 break;
3481 default:
3482 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3483 return;
3484 }
087bfd99 3485
70f23020
AE
3486 list = skb_shinfo(skb)->frag_list;
3487 if (!list) {
1da177e4
LT
3488 /* Non fragmented */
3489 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3490
73d80deb 3491 skb_queue_tail(queue, skb);
1da177e4
LT
3492 } else {
3493 /* Fragmented */
3494 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3495
3496 skb_shinfo(skb)->frag_list = NULL;
3497
9cfd5a23
JR
3498 /* Queue all fragments atomically. We need to use spin_lock_bh
3499 * here because of 6LoWPAN links, as there this function is
3500 * called from softirq and using normal spin lock could cause
3501 * deadlocks.
3502 */
3503 spin_lock_bh(&queue->lock);
1da177e4 3504
73d80deb 3505 __skb_queue_tail(queue, skb);
e702112f
AE
3506
3507 flags &= ~ACL_START;
3508 flags |= ACL_CONT;
1da177e4
LT
3509 do {
3510 skb = list; list = list->next;
8e87d142 3511
0d48d939 3512 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3513 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3514
3515 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3516
73d80deb 3517 __skb_queue_tail(queue, skb);
1da177e4
LT
3518 } while (list);
3519
9cfd5a23 3520 spin_unlock_bh(&queue->lock);
1da177e4 3521 }
73d80deb
LAD
3522}
3523
3524void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3525{
ee22be7e 3526 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3527
f0e09510 3528 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3529
ee22be7e 3530 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3531
3eff45ea 3532 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3533}
1da177e4
LT
3534
3535/* Send SCO data */
0d861d8b 3536void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3537{
3538 struct hci_dev *hdev = conn->hdev;
3539 struct hci_sco_hdr hdr;
3540
3541 BT_DBG("%s len %d", hdev->name, skb->len);
3542
aca3192c 3543 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3544 hdr.dlen = skb->len;
3545
badff6d0
ACM
3546 skb_push(skb, HCI_SCO_HDR_SIZE);
3547 skb_reset_transport_header(skb);
9c70220b 3548 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3549
0d48d939 3550 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3551
1da177e4 3552 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3553 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3554}
1da177e4
LT
3555
3556/* ---- HCI TX task (outgoing data) ---- */
3557
3558/* HCI Connection scheduler */
6039aa73
GP
3559static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3560 int *quote)
1da177e4
LT
3561{
3562 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3563 struct hci_conn *conn = NULL, *c;
abc5de8f 3564 unsigned int num = 0, min = ~0;
1da177e4 3565
8e87d142 3566 /* We don't have to lock device here. Connections are always
1da177e4 3567 * added and removed with TX task disabled. */
bf4c6325
GP
3568
3569 rcu_read_lock();
3570
3571 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3572 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3573 continue;
769be974
MH
3574
3575 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3576 continue;
3577
1da177e4
LT
3578 num++;
3579
3580 if (c->sent < min) {
3581 min = c->sent;
3582 conn = c;
3583 }
52087a79
LAD
3584
3585 if (hci_conn_num(hdev, type) == num)
3586 break;
1da177e4
LT
3587 }
3588
bf4c6325
GP
3589 rcu_read_unlock();
3590
1da177e4 3591 if (conn) {
6ed58ec5
VT
3592 int cnt, q;
3593
3594 switch (conn->type) {
3595 case ACL_LINK:
3596 cnt = hdev->acl_cnt;
3597 break;
3598 case SCO_LINK:
3599 case ESCO_LINK:
3600 cnt = hdev->sco_cnt;
3601 break;
3602 case LE_LINK:
3603 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3604 break;
3605 default:
3606 cnt = 0;
3607 BT_ERR("Unknown link type");
3608 }
3609
3610 q = cnt / num;
1da177e4
LT
3611 *quote = q ? q : 1;
3612 } else
3613 *quote = 0;
3614
3615 BT_DBG("conn %p quote %d", conn, *quote);
3616 return conn;
3617}
3618
6039aa73 3619static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3620{
3621 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3622 struct hci_conn *c;
1da177e4 3623
bae1f5d9 3624 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3625
bf4c6325
GP
3626 rcu_read_lock();
3627
1da177e4 3628 /* Kill stalled connections */
bf4c6325 3629 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3630 if (c->type == type && c->sent) {
6ed93dc6
AE
3631 BT_ERR("%s killing stalled connection %pMR",
3632 hdev->name, &c->dst);
bed71748 3633 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3634 }
3635 }
bf4c6325
GP
3636
3637 rcu_read_unlock();
1da177e4
LT
3638}
3639
6039aa73
GP
3640static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3641 int *quote)
1da177e4 3642{
73d80deb
LAD
3643 struct hci_conn_hash *h = &hdev->conn_hash;
3644 struct hci_chan *chan = NULL;
abc5de8f 3645 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3646 struct hci_conn *conn;
73d80deb
LAD
3647 int cnt, q, conn_num = 0;
3648
3649 BT_DBG("%s", hdev->name);
3650
bf4c6325
GP
3651 rcu_read_lock();
3652
3653 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3654 struct hci_chan *tmp;
3655
3656 if (conn->type != type)
3657 continue;
3658
3659 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3660 continue;
3661
3662 conn_num++;
3663
8192edef 3664 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3665 struct sk_buff *skb;
3666
3667 if (skb_queue_empty(&tmp->data_q))
3668 continue;
3669
3670 skb = skb_peek(&tmp->data_q);
3671 if (skb->priority < cur_prio)
3672 continue;
3673
3674 if (skb->priority > cur_prio) {
3675 num = 0;
3676 min = ~0;
3677 cur_prio = skb->priority;
3678 }
3679
3680 num++;
3681
3682 if (conn->sent < min) {
3683 min = conn->sent;
3684 chan = tmp;
3685 }
3686 }
3687
3688 if (hci_conn_num(hdev, type) == conn_num)
3689 break;
3690 }
3691
bf4c6325
GP
3692 rcu_read_unlock();
3693
73d80deb
LAD
3694 if (!chan)
3695 return NULL;
3696
3697 switch (chan->conn->type) {
3698 case ACL_LINK:
3699 cnt = hdev->acl_cnt;
3700 break;
bd1eb66b
AE
3701 case AMP_LINK:
3702 cnt = hdev->block_cnt;
3703 break;
73d80deb
LAD
3704 case SCO_LINK:
3705 case ESCO_LINK:
3706 cnt = hdev->sco_cnt;
3707 break;
3708 case LE_LINK:
3709 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3710 break;
3711 default:
3712 cnt = 0;
3713 BT_ERR("Unknown link type");
3714 }
3715
3716 q = cnt / num;
3717 *quote = q ? q : 1;
3718 BT_DBG("chan %p quote %d", chan, *quote);
3719 return chan;
3720}
3721
02b20f0b
LAD
3722static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3723{
3724 struct hci_conn_hash *h = &hdev->conn_hash;
3725 struct hci_conn *conn;
3726 int num = 0;
3727
3728 BT_DBG("%s", hdev->name);
3729
bf4c6325
GP
3730 rcu_read_lock();
3731
3732 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3733 struct hci_chan *chan;
3734
3735 if (conn->type != type)
3736 continue;
3737
3738 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3739 continue;
3740
3741 num++;
3742
8192edef 3743 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3744 struct sk_buff *skb;
3745
3746 if (chan->sent) {
3747 chan->sent = 0;
3748 continue;
3749 }
3750
3751 if (skb_queue_empty(&chan->data_q))
3752 continue;
3753
3754 skb = skb_peek(&chan->data_q);
3755 if (skb->priority >= HCI_PRIO_MAX - 1)
3756 continue;
3757
3758 skb->priority = HCI_PRIO_MAX - 1;
3759
3760 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3761 skb->priority);
02b20f0b
LAD
3762 }
3763
3764 if (hci_conn_num(hdev, type) == num)
3765 break;
3766 }
bf4c6325
GP
3767
3768 rcu_read_unlock();
3769
02b20f0b
LAD
3770}
3771
b71d385a
AE
3772static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3773{
3774 /* Calculate count of blocks used by this packet */
3775 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3776}
3777
6039aa73 3778static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3779{
4a964404 3780 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
3781 /* ACL tx timeout must be longer than maximum
3782 * link supervision timeout (40.9 seconds) */
63d2bc1b 3783 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3784 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3785 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3786 }
63d2bc1b 3787}
1da177e4 3788
6039aa73 3789static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3790{
3791 unsigned int cnt = hdev->acl_cnt;
3792 struct hci_chan *chan;
3793 struct sk_buff *skb;
3794 int quote;
3795
3796 __check_timeout(hdev, cnt);
04837f64 3797
73d80deb 3798 while (hdev->acl_cnt &&
a8c5fb1a 3799 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3800 u32 priority = (skb_peek(&chan->data_q))->priority;
3801 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3802 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3803 skb->len, skb->priority);
73d80deb 3804
ec1cce24
LAD
3805 /* Stop if priority has changed */
3806 if (skb->priority < priority)
3807 break;
3808
3809 skb = skb_dequeue(&chan->data_q);
3810
73d80deb 3811 hci_conn_enter_active_mode(chan->conn,
04124681 3812 bt_cb(skb)->force_active);
04837f64 3813
57d17d70 3814 hci_send_frame(hdev, skb);
1da177e4
LT
3815 hdev->acl_last_tx = jiffies;
3816
3817 hdev->acl_cnt--;
73d80deb
LAD
3818 chan->sent++;
3819 chan->conn->sent++;
1da177e4
LT
3820 }
3821 }
02b20f0b
LAD
3822
3823 if (cnt != hdev->acl_cnt)
3824 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3825}
3826
6039aa73 3827static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3828{
63d2bc1b 3829 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3830 struct hci_chan *chan;
3831 struct sk_buff *skb;
3832 int quote;
bd1eb66b 3833 u8 type;
b71d385a 3834
63d2bc1b 3835 __check_timeout(hdev, cnt);
b71d385a 3836
bd1eb66b
AE
3837 BT_DBG("%s", hdev->name);
3838
3839 if (hdev->dev_type == HCI_AMP)
3840 type = AMP_LINK;
3841 else
3842 type = ACL_LINK;
3843
b71d385a 3844 while (hdev->block_cnt > 0 &&
bd1eb66b 3845 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3846 u32 priority = (skb_peek(&chan->data_q))->priority;
3847 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3848 int blocks;
3849
3850 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3851 skb->len, skb->priority);
b71d385a
AE
3852
3853 /* Stop if priority has changed */
3854 if (skb->priority < priority)
3855 break;
3856
3857 skb = skb_dequeue(&chan->data_q);
3858
3859 blocks = __get_blocks(hdev, skb);
3860 if (blocks > hdev->block_cnt)
3861 return;
3862
3863 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3864 bt_cb(skb)->force_active);
b71d385a 3865
57d17d70 3866 hci_send_frame(hdev, skb);
b71d385a
AE
3867 hdev->acl_last_tx = jiffies;
3868
3869 hdev->block_cnt -= blocks;
3870 quote -= blocks;
3871
3872 chan->sent += blocks;
3873 chan->conn->sent += blocks;
3874 }
3875 }
3876
3877 if (cnt != hdev->block_cnt)
bd1eb66b 3878 hci_prio_recalculate(hdev, type);
b71d385a
AE
3879}
3880
6039aa73 3881static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3882{
3883 BT_DBG("%s", hdev->name);
3884
bd1eb66b
AE
3885 /* No ACL link over BR/EDR controller */
3886 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3887 return;
3888
3889 /* No AMP link over AMP controller */
3890 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3891 return;
3892
3893 switch (hdev->flow_ctl_mode) {
3894 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3895 hci_sched_acl_pkt(hdev);
3896 break;
3897
3898 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3899 hci_sched_acl_blk(hdev);
3900 break;
3901 }
3902}
3903
1da177e4 3904/* Schedule SCO */
6039aa73 3905static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3906{
3907 struct hci_conn *conn;
3908 struct sk_buff *skb;
3909 int quote;
3910
3911 BT_DBG("%s", hdev->name);
3912
52087a79
LAD
3913 if (!hci_conn_num(hdev, SCO_LINK))
3914 return;
3915
1da177e4
LT
3916 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3917 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3918 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3919 hci_send_frame(hdev, skb);
1da177e4
LT
3920
3921 conn->sent++;
3922 if (conn->sent == ~0)
3923 conn->sent = 0;
3924 }
3925 }
3926}
3927
6039aa73 3928static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3929{
3930 struct hci_conn *conn;
3931 struct sk_buff *skb;
3932 int quote;
3933
3934 BT_DBG("%s", hdev->name);
3935
52087a79
LAD
3936 if (!hci_conn_num(hdev, ESCO_LINK))
3937 return;
3938
8fc9ced3
GP
3939 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3940 &quote))) {
b6a0dc82
MH
3941 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3942 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3943 hci_send_frame(hdev, skb);
b6a0dc82
MH
3944
3945 conn->sent++;
3946 if (conn->sent == ~0)
3947 conn->sent = 0;
3948 }
3949 }
3950}
3951
6039aa73 3952static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3953{
73d80deb 3954 struct hci_chan *chan;
6ed58ec5 3955 struct sk_buff *skb;
02b20f0b 3956 int quote, cnt, tmp;
6ed58ec5
VT
3957
3958 BT_DBG("%s", hdev->name);
3959
52087a79
LAD
3960 if (!hci_conn_num(hdev, LE_LINK))
3961 return;
3962
4a964404 3963 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
3964 /* LE tx timeout must be longer than maximum
3965 * link supervision timeout (40.9 seconds) */
bae1f5d9 3966 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3967 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3968 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3969 }
3970
3971 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3972 tmp = cnt;
73d80deb 3973 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3974 u32 priority = (skb_peek(&chan->data_q))->priority;
3975 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3976 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3977 skb->len, skb->priority);
6ed58ec5 3978
ec1cce24
LAD
3979 /* Stop if priority has changed */
3980 if (skb->priority < priority)
3981 break;
3982
3983 skb = skb_dequeue(&chan->data_q);
3984
57d17d70 3985 hci_send_frame(hdev, skb);
6ed58ec5
VT
3986 hdev->le_last_tx = jiffies;
3987
3988 cnt--;
73d80deb
LAD
3989 chan->sent++;
3990 chan->conn->sent++;
6ed58ec5
VT
3991 }
3992 }
73d80deb 3993
6ed58ec5
VT
3994 if (hdev->le_pkts)
3995 hdev->le_cnt = cnt;
3996 else
3997 hdev->acl_cnt = cnt;
02b20f0b
LAD
3998
3999 if (cnt != tmp)
4000 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4001}
4002
3eff45ea 4003static void hci_tx_work(struct work_struct *work)
1da177e4 4004{
3eff45ea 4005 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4006 struct sk_buff *skb;
4007
6ed58ec5 4008 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4009 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4010
52de599e
MH
4011 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4012 /* Schedule queues and send stuff to HCI driver */
4013 hci_sched_acl(hdev);
4014 hci_sched_sco(hdev);
4015 hci_sched_esco(hdev);
4016 hci_sched_le(hdev);
4017 }
6ed58ec5 4018
1da177e4
LT
4019 /* Send next queued raw (unknown type) packet */
4020 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4021 hci_send_frame(hdev, skb);
1da177e4
LT
4022}
4023
25985edc 4024/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4025
4026/* ACL data packet */
6039aa73 4027static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4028{
4029 struct hci_acl_hdr *hdr = (void *) skb->data;
4030 struct hci_conn *conn;
4031 __u16 handle, flags;
4032
4033 skb_pull(skb, HCI_ACL_HDR_SIZE);
4034
4035 handle = __le16_to_cpu(hdr->handle);
4036 flags = hci_flags(handle);
4037 handle = hci_handle(handle);
4038
f0e09510 4039 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4040 handle, flags);
1da177e4
LT
4041
4042 hdev->stat.acl_rx++;
4043
4044 hci_dev_lock(hdev);
4045 conn = hci_conn_hash_lookup_handle(hdev, handle);
4046 hci_dev_unlock(hdev);
8e87d142 4047
1da177e4 4048 if (conn) {
65983fc7 4049 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4050
1da177e4 4051 /* Send to upper protocol */
686ebf28
UF
4052 l2cap_recv_acldata(conn, skb, flags);
4053 return;
1da177e4 4054 } else {
8e87d142 4055 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4056 hdev->name, handle);
1da177e4
LT
4057 }
4058
4059 kfree_skb(skb);
4060}
4061
4062/* SCO data packet */
6039aa73 4063static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4064{
4065 struct hci_sco_hdr *hdr = (void *) skb->data;
4066 struct hci_conn *conn;
4067 __u16 handle;
4068
4069 skb_pull(skb, HCI_SCO_HDR_SIZE);
4070
4071 handle = __le16_to_cpu(hdr->handle);
4072
f0e09510 4073 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4074
4075 hdev->stat.sco_rx++;
4076
4077 hci_dev_lock(hdev);
4078 conn = hci_conn_hash_lookup_handle(hdev, handle);
4079 hci_dev_unlock(hdev);
4080
4081 if (conn) {
1da177e4 4082 /* Send to upper protocol */
686ebf28
UF
4083 sco_recv_scodata(conn, skb);
4084 return;
1da177e4 4085 } else {
8e87d142 4086 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4087 hdev->name, handle);
1da177e4
LT
4088 }
4089
4090 kfree_skb(skb);
4091}
4092
9238f36a
JH
4093static bool hci_req_is_complete(struct hci_dev *hdev)
4094{
4095 struct sk_buff *skb;
4096
4097 skb = skb_peek(&hdev->cmd_q);
4098 if (!skb)
4099 return true;
4100
4101 return bt_cb(skb)->req.start;
4102}
4103
42c6b129
JH
4104static void hci_resend_last(struct hci_dev *hdev)
4105{
4106 struct hci_command_hdr *sent;
4107 struct sk_buff *skb;
4108 u16 opcode;
4109
4110 if (!hdev->sent_cmd)
4111 return;
4112
4113 sent = (void *) hdev->sent_cmd->data;
4114 opcode = __le16_to_cpu(sent->opcode);
4115 if (opcode == HCI_OP_RESET)
4116 return;
4117
4118 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4119 if (!skb)
4120 return;
4121
4122 skb_queue_head(&hdev->cmd_q, skb);
4123 queue_work(hdev->workqueue, &hdev->cmd_work);
4124}
4125
9238f36a
JH
4126void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4127{
4128 hci_req_complete_t req_complete = NULL;
4129 struct sk_buff *skb;
4130 unsigned long flags;
4131
4132 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4133
42c6b129
JH
4134 /* If the completed command doesn't match the last one that was
4135 * sent we need to do special handling of it.
9238f36a 4136 */
42c6b129
JH
4137 if (!hci_sent_cmd_data(hdev, opcode)) {
4138 /* Some CSR based controllers generate a spontaneous
4139 * reset complete event during init and any pending
4140 * command will never be completed. In such a case we
4141 * need to resend whatever was the last sent
4142 * command.
4143 */
4144 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4145 hci_resend_last(hdev);
4146
9238f36a 4147 return;
42c6b129 4148 }
9238f36a
JH
4149
4150 /* If the command succeeded and there's still more commands in
4151 * this request the request is not yet complete.
4152 */
4153 if (!status && !hci_req_is_complete(hdev))
4154 return;
4155
4156 /* If this was the last command in a request the complete
4157 * callback would be found in hdev->sent_cmd instead of the
4158 * command queue (hdev->cmd_q).
4159 */
4160 if (hdev->sent_cmd) {
4161 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4162
4163 if (req_complete) {
4164 /* We must set the complete callback to NULL to
4165 * avoid calling the callback more than once if
4166 * this function gets called again.
4167 */
4168 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4169
9238f36a 4170 goto call_complete;
53e21fbc 4171 }
9238f36a
JH
4172 }
4173
4174 /* Remove all pending commands belonging to this request */
4175 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4176 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4177 if (bt_cb(skb)->req.start) {
4178 __skb_queue_head(&hdev->cmd_q, skb);
4179 break;
4180 }
4181
4182 req_complete = bt_cb(skb)->req.complete;
4183 kfree_skb(skb);
4184 }
4185 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4186
4187call_complete:
4188 if (req_complete)
4189 req_complete(hdev, status);
4190}
4191
b78752cc 4192static void hci_rx_work(struct work_struct *work)
1da177e4 4193{
b78752cc 4194 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4195 struct sk_buff *skb;
4196
4197 BT_DBG("%s", hdev->name);
4198
1da177e4 4199 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4200 /* Send copy to monitor */
4201 hci_send_to_monitor(hdev, skb);
4202
1da177e4
LT
4203 if (atomic_read(&hdev->promisc)) {
4204 /* Send copy to the sockets */
470fe1b5 4205 hci_send_to_sock(hdev, skb);
1da177e4
LT
4206 }
4207
fee746b0 4208 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4209 kfree_skb(skb);
4210 continue;
4211 }
4212
4213 if (test_bit(HCI_INIT, &hdev->flags)) {
4214 /* Don't process data packets in this states. */
0d48d939 4215 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4216 case HCI_ACLDATA_PKT:
4217 case HCI_SCODATA_PKT:
4218 kfree_skb(skb);
4219 continue;
3ff50b79 4220 }
1da177e4
LT
4221 }
4222
4223 /* Process frame */
0d48d939 4224 switch (bt_cb(skb)->pkt_type) {
1da177e4 4225 case HCI_EVENT_PKT:
b78752cc 4226 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4227 hci_event_packet(hdev, skb);
4228 break;
4229
4230 case HCI_ACLDATA_PKT:
4231 BT_DBG("%s ACL data packet", hdev->name);
4232 hci_acldata_packet(hdev, skb);
4233 break;
4234
4235 case HCI_SCODATA_PKT:
4236 BT_DBG("%s SCO data packet", hdev->name);
4237 hci_scodata_packet(hdev, skb);
4238 break;
4239
4240 default:
4241 kfree_skb(skb);
4242 break;
4243 }
4244 }
1da177e4
LT
4245}
4246
c347b765 4247static void hci_cmd_work(struct work_struct *work)
1da177e4 4248{
c347b765 4249 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4250 struct sk_buff *skb;
4251
2104786b
AE
4252 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4253 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4254
1da177e4 4255 /* Send queued commands */
5a08ecce
AE
4256 if (atomic_read(&hdev->cmd_cnt)) {
4257 skb = skb_dequeue(&hdev->cmd_q);
4258 if (!skb)
4259 return;
4260
7585b97a 4261 kfree_skb(hdev->sent_cmd);
1da177e4 4262
a675d7f1 4263 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4264 if (hdev->sent_cmd) {
1da177e4 4265 atomic_dec(&hdev->cmd_cnt);
57d17d70 4266 hci_send_frame(hdev, skb);
7bdb8a5c 4267 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4268 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4269 else
65cc2b49
MH
4270 schedule_delayed_work(&hdev->cmd_timer,
4271 HCI_CMD_TIMEOUT);
1da177e4
LT
4272 } else {
4273 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4274 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4275 }
4276 }
4277}