]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/bluetooth/hci_core.c
at86rf230: assign wait_for_completion_timeout to appropriately typed var
[mirror_ubuntu-jammy-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46
JH
42#include "smp.h"
43
b78752cc 44static void hci_rx_work(struct work_struct *work);
c347b765 45static void hci_cmd_work(struct work_struct *work);
3eff45ea 46static void hci_tx_work(struct work_struct *work);
1da177e4 47
1da177e4
LT
48/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
54DEFINE_RWLOCK(hci_cb_list_lock);
55
3df92b31
SL
56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
899de765
MH
59/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
1da177e4
LT
68/* ---- HCI notifications ---- */
69
6516455d 70static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 71{
040030ef 72 hci_sock_dev_event(hdev, event);
1da177e4
LT
73}
74
baf27f6e
MH
75/* ---- HCI debugfs entries ---- */
76
4b4148e9
MH
77static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
111902f7 83 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
111902f7 109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
111902f7 130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
131
132 return count;
133}
134
135static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140};
141
1da177e4
LT
142/* ---- HCI requests ---- */
143
1904a853 144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
1da177e4 145{
42c6b129 146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
147
148 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE;
151 wake_up_interruptible(&hdev->req_wait_q);
152 }
153}
154
155static void hci_req_cancel(struct hci_dev *hdev, int err)
156{
157 BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159 if (hdev->req_status == HCI_REQ_PEND) {
160 hdev->req_result = err;
161 hdev->req_status = HCI_REQ_CANCELED;
162 wake_up_interruptible(&hdev->req_wait_q);
163 }
164}
165
77a63e0a
FW
166static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167 u8 event)
75e84b7c
JH
168{
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
171 struct sk_buff *skb;
172
173 hci_dev_lock(hdev);
174
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
177
178 hci_dev_unlock(hdev);
179
180 if (!skb)
181 return ERR_PTR(-ENODATA);
182
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
185 goto failed;
186 }
187
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
7b1abbbe
JH
191 if (event) {
192 if (hdr->evt != event)
193 goto failed;
194 return skb;
195 }
196
75e84b7c
JH
197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199 goto failed;
200 }
201
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
204 goto failed;
205 }
206
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
209
210 if (opcode == __le16_to_cpu(ev->opcode))
211 return skb;
212
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
215
216failed:
217 kfree_skb(skb);
218 return ERR_PTR(-ENODATA);
219}
220
7b1abbbe 221struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 222 const void *param, u8 event, u32 timeout)
75e84b7c
JH
223{
224 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req;
226 int err = 0;
227
228 BT_DBG("%s", hdev->name);
229
230 hci_req_init(&req, hdev);
231
7b1abbbe 232 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
233
234 hdev->req_status = HCI_REQ_PEND;
235
75e84b7c
JH
236 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE);
238
039fada5
CP
239 err = hci_req_run(&req, hci_req_sync_complete);
240 if (err < 0) {
241 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 242 set_current_state(TASK_RUNNING);
039fada5
CP
243 return ERR_PTR(err);
244 }
245
75e84b7c
JH
246 schedule_timeout(timeout);
247
248 remove_wait_queue(&hdev->req_wait_q, &wait);
249
250 if (signal_pending(current))
251 return ERR_PTR(-EINTR);
252
253 switch (hdev->req_status) {
254 case HCI_REQ_DONE:
255 err = -bt_to_errno(hdev->req_result);
256 break;
257
258 case HCI_REQ_CANCELED:
259 err = -hdev->req_result;
260 break;
261
262 default:
263 err = -ETIMEDOUT;
264 break;
265 }
266
267 hdev->req_status = hdev->req_result = 0;
268
269 BT_DBG("%s end: err %d", hdev->name, err);
270
271 if (err < 0)
272 return ERR_PTR(err);
273
7b1abbbe
JH
274 return hci_get_cmd_complete(hdev, opcode, event);
275}
276EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 279 const void *param, u32 timeout)
7b1abbbe
JH
280{
281 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
282}
283EXPORT_SYMBOL(__hci_cmd_sync);
284
1da177e4 285/* Execute request and wait for completion. */
01178cd4 286static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
287 void (*func)(struct hci_request *req,
288 unsigned long opt),
01178cd4 289 unsigned long opt, __u32 timeout)
1da177e4 290{
42c6b129 291 struct hci_request req;
1da177e4
LT
292 DECLARE_WAITQUEUE(wait, current);
293 int err = 0;
294
295 BT_DBG("%s start", hdev->name);
296
42c6b129
JH
297 hci_req_init(&req, hdev);
298
1da177e4
LT
299 hdev->req_status = HCI_REQ_PEND;
300
42c6b129 301 func(&req, opt);
53cce22d 302
039fada5
CP
303 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE);
305
42c6b129
JH
306 err = hci_req_run(&req, hci_req_sync_complete);
307 if (err < 0) {
53cce22d 308 hdev->req_status = 0;
920c8300 309
039fada5 310 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 311 set_current_state(TASK_RUNNING);
039fada5 312
920c8300
AG
313 /* ENODATA means the HCI request command queue is empty.
314 * This can happen when a request with conditionals doesn't
315 * trigger any commands to be sent. This is normal behavior
316 * and should not trigger an error return.
42c6b129 317 */
920c8300
AG
318 if (err == -ENODATA)
319 return 0;
320
321 return err;
53cce22d
JH
322 }
323
1da177e4
LT
324 schedule_timeout(timeout);
325
326 remove_wait_queue(&hdev->req_wait_q, &wait);
327
328 if (signal_pending(current))
329 return -EINTR;
330
331 switch (hdev->req_status) {
332 case HCI_REQ_DONE:
e175072f 333 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
334 break;
335
336 case HCI_REQ_CANCELED:
337 err = -hdev->req_result;
338 break;
339
340 default:
341 err = -ETIMEDOUT;
342 break;
3ff50b79 343 }
1da177e4 344
a5040efa 345 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
346
347 BT_DBG("%s end: err %d", hdev->name, err);
348
349 return err;
350}
351
01178cd4 352static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
353 void (*req)(struct hci_request *req,
354 unsigned long opt),
01178cd4 355 unsigned long opt, __u32 timeout)
1da177e4
LT
356{
357 int ret;
358
7c6a329e
MH
359 if (!test_bit(HCI_UP, &hdev->flags))
360 return -ENETDOWN;
361
1da177e4
LT
362 /* Serialize all requests */
363 hci_req_lock(hdev);
01178cd4 364 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
365 hci_req_unlock(hdev);
366
367 return ret;
368}
369
42c6b129 370static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 371{
42c6b129 372 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
373
374 /* Reset device */
42c6b129
JH
375 set_bit(HCI_RESET, &req->hdev->flags);
376 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
377}
378
42c6b129 379static void bredr_init(struct hci_request *req)
1da177e4 380{
42c6b129 381 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 382
1da177e4 383 /* Read Local Supported Features */
42c6b129 384 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 385
1143e5a6 386 /* Read Local Version */
42c6b129 387 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
388
389 /* Read BD Address */
42c6b129 390 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
391}
392
42c6b129 393static void amp_init(struct hci_request *req)
e61ef499 394{
42c6b129 395 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 396
e61ef499 397 /* Read Local Version */
42c6b129 398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 399
f6996cfe
MH
400 /* Read Local Supported Commands */
401 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403 /* Read Local Supported Features */
404 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
6bcbc489 406 /* Read Local AMP Info */
42c6b129 407 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
408
409 /* Read Data Blk size */
42c6b129 410 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 411
f38ba941
MH
412 /* Read Flow Control Mode */
413 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
7528ca1c
MH
415 /* Read Location Data */
416 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
417}
418
42c6b129 419static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 420{
42c6b129 421 struct hci_dev *hdev = req->hdev;
e61ef499
AE
422
423 BT_DBG("%s %ld", hdev->name, opt);
424
11778716
AE
425 /* Reset */
426 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 427 hci_reset_req(req, 0);
11778716 428
e61ef499
AE
429 switch (hdev->dev_type) {
430 case HCI_BREDR:
42c6b129 431 bredr_init(req);
e61ef499
AE
432 break;
433
434 case HCI_AMP:
42c6b129 435 amp_init(req);
e61ef499
AE
436 break;
437
438 default:
439 BT_ERR("Unknown device type %d", hdev->dev_type);
440 break;
441 }
e61ef499
AE
442}
443
42c6b129 444static void bredr_setup(struct hci_request *req)
2177bab5 445{
2177bab5
JH
446 __le16 param;
447 __u8 flt_type;
448
449 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 450 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
451
452 /* Read Class of Device */
42c6b129 453 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
454
455 /* Read Local Name */
42c6b129 456 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
457
458 /* Read Voice Setting */
42c6b129 459 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 460
b4cb9fb2
MH
461 /* Read Number of Supported IAC */
462 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
4b836f39
MH
464 /* Read Current IAC LAP */
465 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
2177bab5
JH
467 /* Clear Event Filters */
468 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 469 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
470
471 /* Connection accept timeout ~20 secs */
dcf4adbf 472 param = cpu_to_le16(0x7d00);
42c6b129 473 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
474}
475
42c6b129 476static void le_setup(struct hci_request *req)
2177bab5 477{
c73eee91
JH
478 struct hci_dev *hdev = req->hdev;
479
2177bab5 480 /* Read LE Buffer Size */
42c6b129 481 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
482
483 /* Read LE Local Supported Features */
42c6b129 484 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 485
747d3f03
MH
486 /* Read LE Supported States */
487 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
2177bab5 489 /* Read LE White List Size */
42c6b129 490 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 491
747d3f03
MH
492 /* Clear LE White List */
493 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
494
495 /* LE-only controllers have LE implicitly enabled */
496 if (!lmp_bredr_capable(hdev))
497 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
498}
499
42c6b129 500static void hci_setup_event_mask(struct hci_request *req)
2177bab5 501{
42c6b129
JH
502 struct hci_dev *hdev = req->hdev;
503
2177bab5
JH
504 /* The second byte is 0xff instead of 0x9f (two reserved bits
505 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
506 * command otherwise.
507 */
508 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
509
510 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
511 * any event mask for pre 1.2 devices.
512 */
513 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
514 return;
515
516 if (lmp_bredr_capable(hdev)) {
517 events[4] |= 0x01; /* Flow Specification Complete */
518 events[4] |= 0x02; /* Inquiry Result with RSSI */
519 events[4] |= 0x04; /* Read Remote Extended Features Complete */
520 events[5] |= 0x08; /* Synchronous Connection Complete */
521 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
522 } else {
523 /* Use a different default for LE-only devices */
524 memset(events, 0, sizeof(events));
525 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
526 events[1] |= 0x08; /* Read Remote Version Information Complete */
527 events[1] |= 0x20; /* Command Complete */
528 events[1] |= 0x40; /* Command Status */
529 events[1] |= 0x80; /* Hardware Error */
530 events[2] |= 0x04; /* Number of Completed Packets */
531 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
532
533 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
534 events[0] |= 0x80; /* Encryption Change */
535 events[5] |= 0x80; /* Encryption Key Refresh Complete */
536 }
2177bab5
JH
537 }
538
539 if (lmp_inq_rssi_capable(hdev))
540 events[4] |= 0x02; /* Inquiry Result with RSSI */
541
542 if (lmp_sniffsubr_capable(hdev))
543 events[5] |= 0x20; /* Sniff Subrating */
544
545 if (lmp_pause_enc_capable(hdev))
546 events[5] |= 0x80; /* Encryption Key Refresh Complete */
547
548 if (lmp_ext_inq_capable(hdev))
549 events[5] |= 0x40; /* Extended Inquiry Result */
550
551 if (lmp_no_flush_capable(hdev))
552 events[7] |= 0x01; /* Enhanced Flush Complete */
553
554 if (lmp_lsto_capable(hdev))
555 events[6] |= 0x80; /* Link Supervision Timeout Changed */
556
557 if (lmp_ssp_capable(hdev)) {
558 events[6] |= 0x01; /* IO Capability Request */
559 events[6] |= 0x02; /* IO Capability Response */
560 events[6] |= 0x04; /* User Confirmation Request */
561 events[6] |= 0x08; /* User Passkey Request */
562 events[6] |= 0x10; /* Remote OOB Data Request */
563 events[6] |= 0x20; /* Simple Pairing Complete */
564 events[7] |= 0x04; /* User Passkey Notification */
565 events[7] |= 0x08; /* Keypress Notification */
566 events[7] |= 0x10; /* Remote Host Supported
567 * Features Notification
568 */
569 }
570
571 if (lmp_le_capable(hdev))
572 events[7] |= 0x20; /* LE Meta-Event */
573
42c6b129 574 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
575}
576
42c6b129 577static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 578{
42c6b129
JH
579 struct hci_dev *hdev = req->hdev;
580
2177bab5 581 if (lmp_bredr_capable(hdev))
42c6b129 582 bredr_setup(req);
56f87901
JH
583 else
584 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
585
586 if (lmp_le_capable(hdev))
42c6b129 587 le_setup(req);
2177bab5 588
0f3adeae
MH
589 /* All Bluetooth 1.2 and later controllers should support the
590 * HCI command for reading the local supported commands.
591 *
592 * Unfortunately some controllers indicate Bluetooth 1.2 support,
593 * but do not have support for this command. If that is the case,
594 * the driver can quirk the behavior and skip reading the local
595 * supported commands.
3f8e2d75 596 */
0f3adeae
MH
597 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
598 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 599 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
600
601 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
602 /* When SSP is available, then the host features page
603 * should also be available as well. However some
604 * controllers list the max_page as 0 as long as SSP
605 * has not been enabled. To achieve proper debugging
606 * output, force the minimum max_page to 1 at least.
607 */
608 hdev->max_page = 0x01;
609
2177bab5
JH
610 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
611 u8 mode = 0x01;
574ea3c7 612
42c6b129
JH
613 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
614 sizeof(mode), &mode);
2177bab5
JH
615 } else {
616 struct hci_cp_write_eir cp;
617
618 memset(hdev->eir, 0, sizeof(hdev->eir));
619 memset(&cp, 0, sizeof(cp));
620
42c6b129 621 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
622 }
623 }
624
043ec9bf
MH
625 if (lmp_inq_rssi_capable(hdev) ||
626 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
627 u8 mode;
628
629 /* If Extended Inquiry Result events are supported, then
630 * they are clearly preferred over Inquiry Result with RSSI
631 * events.
632 */
633 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
634
635 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
636 }
2177bab5
JH
637
638 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 639 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
640
641 if (lmp_ext_feat_capable(hdev)) {
642 struct hci_cp_read_local_ext_features cp;
643
644 cp.page = 0x01;
42c6b129
JH
645 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
646 sizeof(cp), &cp);
2177bab5
JH
647 }
648
649 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
650 u8 enable = 1;
42c6b129
JH
651 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
652 &enable);
2177bab5
JH
653 }
654}
655
42c6b129 656static void hci_setup_link_policy(struct hci_request *req)
2177bab5 657{
42c6b129 658 struct hci_dev *hdev = req->hdev;
2177bab5
JH
659 struct hci_cp_write_def_link_policy cp;
660 u16 link_policy = 0;
661
662 if (lmp_rswitch_capable(hdev))
663 link_policy |= HCI_LP_RSWITCH;
664 if (lmp_hold_capable(hdev))
665 link_policy |= HCI_LP_HOLD;
666 if (lmp_sniff_capable(hdev))
667 link_policy |= HCI_LP_SNIFF;
668 if (lmp_park_capable(hdev))
669 link_policy |= HCI_LP_PARK;
670
671 cp.policy = cpu_to_le16(link_policy);
42c6b129 672 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
673}
674
42c6b129 675static void hci_set_le_support(struct hci_request *req)
2177bab5 676{
42c6b129 677 struct hci_dev *hdev = req->hdev;
2177bab5
JH
678 struct hci_cp_write_le_host_supported cp;
679
c73eee91
JH
680 /* LE-only devices do not support explicit enablement */
681 if (!lmp_bredr_capable(hdev))
682 return;
683
2177bab5
JH
684 memset(&cp, 0, sizeof(cp));
685
686 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
687 cp.le = 0x01;
32226e4f 688 cp.simul = 0x00;
2177bab5
JH
689 }
690
691 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
692 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
693 &cp);
2177bab5
JH
694}
695
d62e6d67
JH
696static void hci_set_event_mask_page_2(struct hci_request *req)
697{
698 struct hci_dev *hdev = req->hdev;
699 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
700
701 /* If Connectionless Slave Broadcast master role is supported
702 * enable all necessary events for it.
703 */
53b834d2 704 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
705 events[1] |= 0x40; /* Triggered Clock Capture */
706 events[1] |= 0x80; /* Synchronization Train Complete */
707 events[2] |= 0x10; /* Slave Page Response Timeout */
708 events[2] |= 0x20; /* CSB Channel Map Change */
709 }
710
711 /* If Connectionless Slave Broadcast slave role is supported
712 * enable all necessary events for it.
713 */
53b834d2 714 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
715 events[2] |= 0x01; /* Synchronization Train Received */
716 events[2] |= 0x02; /* CSB Receive */
717 events[2] |= 0x04; /* CSB Timeout */
718 events[2] |= 0x08; /* Truncated Page Complete */
719 }
720
40c59fcb 721 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 722 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
723 events[2] |= 0x80;
724
d62e6d67
JH
725 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
726}
727
42c6b129 728static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 729{
42c6b129 730 struct hci_dev *hdev = req->hdev;
d2c5d77f 731 u8 p;
42c6b129 732
0da71f1b
MH
733 hci_setup_event_mask(req);
734
48ce62c4
MH
735 if (hdev->commands[6] & 0x20) {
736 struct hci_cp_read_stored_link_key cp;
737
738 bacpy(&cp.bdaddr, BDADDR_ANY);
739 cp.read_all = 0x01;
740 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
741 }
742
2177bab5 743 if (hdev->commands[5] & 0x10)
42c6b129 744 hci_setup_link_policy(req);
2177bab5 745
417287de
MH
746 if (hdev->commands[8] & 0x01)
747 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
748
749 /* Some older Broadcom based Bluetooth 1.2 controllers do not
750 * support the Read Page Scan Type command. Check support for
751 * this command in the bit mask of supported commands.
752 */
753 if (hdev->commands[13] & 0x01)
754 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
755
9193c6e8
AG
756 if (lmp_le_capable(hdev)) {
757 u8 events[8];
758
759 memset(events, 0, sizeof(events));
4d6c705b
MH
760 events[0] = 0x0f;
761
762 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
763 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
764
765 /* If controller supports the Connection Parameters Request
766 * Link Layer Procedure, enable the corresponding event.
767 */
768 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
769 events[0] |= 0x20; /* LE Remote Connection
770 * Parameter Request
771 */
772
a9f6068e
MH
773 /* If the controller supports the Data Length Extension
774 * feature, enable the corresponding event.
775 */
776 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
777 events[0] |= 0x40; /* LE Data Length Change */
778
4b71bba4
MH
779 /* If the controller supports Extended Scanner Filter
780 * Policies, enable the correspondig event.
781 */
782 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
783 events[1] |= 0x04; /* LE Direct Advertising
784 * Report
785 */
786
5a34bd5f
MH
787 /* If the controller supports the LE Read Local P-256
788 * Public Key command, enable the corresponding event.
789 */
790 if (hdev->commands[34] & 0x02)
791 events[0] |= 0x80; /* LE Read Local P-256
792 * Public Key Complete
793 */
794
795 /* If the controller supports the LE Generate DHKey
796 * command, enable the corresponding event.
797 */
798 if (hdev->commands[34] & 0x04)
799 events[1] |= 0x01; /* LE Generate DHKey Complete */
800
9193c6e8
AG
801 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
802 events);
803
15a49cca
MH
804 if (hdev->commands[25] & 0x40) {
805 /* Read LE Advertising Channel TX Power */
806 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
807 }
808
a9f6068e
MH
809 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
810 /* Read LE Maximum Data Length */
811 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
812
813 /* Read LE Suggested Default Data Length */
814 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
815 }
816
42c6b129 817 hci_set_le_support(req);
9193c6e8 818 }
d2c5d77f
JH
819
820 /* Read features beyond page 1 if available */
821 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
822 struct hci_cp_read_local_ext_features cp;
823
824 cp.page = p;
825 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
826 sizeof(cp), &cp);
827 }
2177bab5
JH
828}
829
5d4e7e8d
JH
830static void hci_init4_req(struct hci_request *req, unsigned long opt)
831{
832 struct hci_dev *hdev = req->hdev;
833
36f260ce
MH
834 /* Some Broadcom based Bluetooth controllers do not support the
835 * Delete Stored Link Key command. They are clearly indicating its
836 * absence in the bit mask of supported commands.
837 *
838 * Check the supported commands and only if the the command is marked
839 * as supported send it. If not supported assume that the controller
840 * does not have actual support for stored link keys which makes this
841 * command redundant anyway.
842 *
843 * Some controllers indicate that they support handling deleting
844 * stored link keys, but they don't. The quirk lets a driver
845 * just disable this command.
846 */
847 if (hdev->commands[6] & 0x80 &&
848 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
849 struct hci_cp_delete_stored_link_key cp;
850
851 bacpy(&cp.bdaddr, BDADDR_ANY);
852 cp.delete_all = 0x01;
853 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
854 sizeof(cp), &cp);
855 }
856
d62e6d67
JH
857 /* Set event mask page 2 if the HCI command for it is supported */
858 if (hdev->commands[22] & 0x04)
859 hci_set_event_mask_page_2(req);
860
109e3191
MH
861 /* Read local codec list if the HCI command is supported */
862 if (hdev->commands[29] & 0x20)
863 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
864
f4fe73ed
MH
865 /* Get MWS transport configuration if the HCI command is supported */
866 if (hdev->commands[30] & 0x08)
867 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
868
5d4e7e8d 869 /* Check for Synchronization Train support */
53b834d2 870 if (lmp_sync_train_capable(hdev))
5d4e7e8d 871 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
872
873 /* Enable Secure Connections if supported and configured */
574ea3c7
MH
874 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
875 bredr_sc_enabled(hdev)) {
a6d0d690 876 u8 support = 0x01;
574ea3c7 877
a6d0d690
MH
878 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
879 sizeof(support), &support);
880 }
5d4e7e8d
JH
881}
882
2177bab5
JH
883static int __hci_init(struct hci_dev *hdev)
884{
885 int err;
886
887 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
888 if (err < 0)
889 return err;
890
4b4148e9
MH
891 /* The Device Under Test (DUT) mode is special and available for
892 * all controller types. So just create it early on.
893 */
894 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
895 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
896 &dut_mode_fops);
897 }
898
2177bab5
JH
899 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
900 * BR/EDR/LE type controllers. AMP controllers only need the
901 * first stage init.
902 */
903 if (hdev->dev_type != HCI_BREDR)
904 return 0;
905
906 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
907 if (err < 0)
908 return err;
909
5d4e7e8d
JH
910 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
911 if (err < 0)
912 return err;
913
baf27f6e
MH
914 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
915 if (err < 0)
916 return err;
917
ec6cef9c
MH
918 /* This function is only called when the controller is actually in
919 * configured state. When the controller is marked as unconfigured,
920 * this initialization procedure is not run.
921 *
922 * It means that it is possible that a controller runs through its
923 * setup phase and then discovers missing settings. If that is the
924 * case, then this function will not be called. It then will only
925 * be called during the config phase.
926 *
927 * So only when in setup phase or config phase, create the debugfs
928 * entries and register the SMP channels.
baf27f6e 929 */
ec6cef9c
MH
930 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
931 !test_bit(HCI_CONFIG, &hdev->dev_flags))
baf27f6e
MH
932 return 0;
933
60c5f5fb
MH
934 hci_debugfs_create_common(hdev);
935
71c3b60e 936 if (lmp_bredr_capable(hdev))
60c5f5fb 937 hci_debugfs_create_bredr(hdev);
2bfa3531 938
162a3bac 939 if (lmp_le_capable(hdev))
60c5f5fb 940 hci_debugfs_create_le(hdev);
e7b8fc92 941
baf27f6e 942 return 0;
2177bab5
JH
943}
944
0ebca7d6
MH
945static void hci_init0_req(struct hci_request *req, unsigned long opt)
946{
947 struct hci_dev *hdev = req->hdev;
948
949 BT_DBG("%s %ld", hdev->name, opt);
950
951 /* Reset */
952 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
953 hci_reset_req(req, 0);
954
955 /* Read Local Version */
956 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
957
958 /* Read BD Address */
959 if (hdev->set_bdaddr)
960 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
961}
962
963static int __hci_unconf_init(struct hci_dev *hdev)
964{
965 int err;
966
cc78b44b
MH
967 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
968 return 0;
969
0ebca7d6
MH
970 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
971 if (err < 0)
972 return err;
973
974 return 0;
975}
976
42c6b129 977static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
978{
979 __u8 scan = opt;
980
42c6b129 981 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
982
983 /* Inquiry and Page scans */
42c6b129 984 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
985}
986
42c6b129 987static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
988{
989 __u8 auth = opt;
990
42c6b129 991 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
992
993 /* Authentication */
42c6b129 994 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
995}
996
42c6b129 997static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
998{
999 __u8 encrypt = opt;
1000
42c6b129 1001 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1002
e4e8e37c 1003 /* Encryption */
42c6b129 1004 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1005}
1006
42c6b129 1007static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1008{
1009 __le16 policy = cpu_to_le16(opt);
1010
42c6b129 1011 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1012
1013 /* Default link policy */
42c6b129 1014 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1015}
1016
8e87d142 1017/* Get HCI device by index.
1da177e4
LT
1018 * Device is held on return. */
1019struct hci_dev *hci_dev_get(int index)
1020{
8035ded4 1021 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1022
1023 BT_DBG("%d", index);
1024
1025 if (index < 0)
1026 return NULL;
1027
1028 read_lock(&hci_dev_list_lock);
8035ded4 1029 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1030 if (d->id == index) {
1031 hdev = hci_dev_hold(d);
1032 break;
1033 }
1034 }
1035 read_unlock(&hci_dev_list_lock);
1036 return hdev;
1037}
1da177e4
LT
1038
1039/* ---- Inquiry support ---- */
ff9ef578 1040
30dc78e1
JH
1041bool hci_discovery_active(struct hci_dev *hdev)
1042{
1043 struct discovery_state *discov = &hdev->discovery;
1044
6fbe195d 1045 switch (discov->state) {
343f935b 1046 case DISCOVERY_FINDING:
6fbe195d 1047 case DISCOVERY_RESOLVING:
30dc78e1
JH
1048 return true;
1049
6fbe195d
AG
1050 default:
1051 return false;
1052 }
30dc78e1
JH
1053}
1054
ff9ef578
JH
1055void hci_discovery_set_state(struct hci_dev *hdev, int state)
1056{
bb3e0a33
JH
1057 int old_state = hdev->discovery.state;
1058
ff9ef578
JH
1059 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1060
bb3e0a33 1061 if (old_state == state)
ff9ef578
JH
1062 return;
1063
bb3e0a33
JH
1064 hdev->discovery.state = state;
1065
ff9ef578
JH
1066 switch (state) {
1067 case DISCOVERY_STOPPED:
c54c3860
AG
1068 hci_update_background_scan(hdev);
1069
bb3e0a33 1070 if (old_state != DISCOVERY_STARTING)
7b99b659 1071 mgmt_discovering(hdev, 0);
ff9ef578
JH
1072 break;
1073 case DISCOVERY_STARTING:
1074 break;
343f935b 1075 case DISCOVERY_FINDING:
ff9ef578
JH
1076 mgmt_discovering(hdev, 1);
1077 break;
30dc78e1
JH
1078 case DISCOVERY_RESOLVING:
1079 break;
ff9ef578
JH
1080 case DISCOVERY_STOPPING:
1081 break;
1082 }
ff9ef578
JH
1083}
1084
1f9b9a5d 1085void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1086{
30883512 1087 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1088 struct inquiry_entry *p, *n;
1da177e4 1089
561aafbc
JH
1090 list_for_each_entry_safe(p, n, &cache->all, all) {
1091 list_del(&p->all);
b57c1a56 1092 kfree(p);
1da177e4 1093 }
561aafbc
JH
1094
1095 INIT_LIST_HEAD(&cache->unknown);
1096 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1097}
1098
a8c5fb1a
GP
1099struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1100 bdaddr_t *bdaddr)
1da177e4 1101{
30883512 1102 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1103 struct inquiry_entry *e;
1104
6ed93dc6 1105 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1106
561aafbc
JH
1107 list_for_each_entry(e, &cache->all, all) {
1108 if (!bacmp(&e->data.bdaddr, bdaddr))
1109 return e;
1110 }
1111
1112 return NULL;
1113}
1114
1115struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1116 bdaddr_t *bdaddr)
561aafbc 1117{
30883512 1118 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1119 struct inquiry_entry *e;
1120
6ed93dc6 1121 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1122
1123 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1124 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1125 return e;
1126 }
1127
1128 return NULL;
1da177e4
LT
1129}
1130
30dc78e1 1131struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1132 bdaddr_t *bdaddr,
1133 int state)
30dc78e1
JH
1134{
1135 struct discovery_state *cache = &hdev->discovery;
1136 struct inquiry_entry *e;
1137
6ed93dc6 1138 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1139
1140 list_for_each_entry(e, &cache->resolve, list) {
1141 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1142 return e;
1143 if (!bacmp(&e->data.bdaddr, bdaddr))
1144 return e;
1145 }
1146
1147 return NULL;
1148}
1149
a3d4e20a 1150void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1151 struct inquiry_entry *ie)
a3d4e20a
JH
1152{
1153 struct discovery_state *cache = &hdev->discovery;
1154 struct list_head *pos = &cache->resolve;
1155 struct inquiry_entry *p;
1156
1157 list_del(&ie->list);
1158
1159 list_for_each_entry(p, &cache->resolve, list) {
1160 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1161 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1162 break;
1163 pos = &p->list;
1164 }
1165
1166 list_add(&ie->list, pos);
1167}
1168
af58925c
MH
1169u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1170 bool name_known)
1da177e4 1171{
30883512 1172 struct discovery_state *cache = &hdev->discovery;
70f23020 1173 struct inquiry_entry *ie;
af58925c 1174 u32 flags = 0;
1da177e4 1175
6ed93dc6 1176 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1177
6928a924 1178 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1179
af58925c
MH
1180 if (!data->ssp_mode)
1181 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1182
70f23020 1183 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1184 if (ie) {
af58925c
MH
1185 if (!ie->data.ssp_mode)
1186 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1187
a3d4e20a 1188 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1189 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1190 ie->data.rssi = data->rssi;
1191 hci_inquiry_cache_update_resolve(hdev, ie);
1192 }
1193
561aafbc 1194 goto update;
a3d4e20a 1195 }
561aafbc
JH
1196
1197 /* Entry not in the cache. Add new one. */
27f70f3e 1198 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1199 if (!ie) {
1200 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1201 goto done;
1202 }
561aafbc
JH
1203
1204 list_add(&ie->all, &cache->all);
1205
1206 if (name_known) {
1207 ie->name_state = NAME_KNOWN;
1208 } else {
1209 ie->name_state = NAME_NOT_KNOWN;
1210 list_add(&ie->list, &cache->unknown);
1211 }
70f23020 1212
561aafbc
JH
1213update:
1214 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1215 ie->name_state != NAME_PENDING) {
561aafbc
JH
1216 ie->name_state = NAME_KNOWN;
1217 list_del(&ie->list);
1da177e4
LT
1218 }
1219
70f23020
AE
1220 memcpy(&ie->data, data, sizeof(*data));
1221 ie->timestamp = jiffies;
1da177e4 1222 cache->timestamp = jiffies;
3175405b
JH
1223
1224 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1225 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1226
af58925c
MH
1227done:
1228 return flags;
1da177e4
LT
1229}
1230
1231static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1232{
30883512 1233 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1234 struct inquiry_info *info = (struct inquiry_info *) buf;
1235 struct inquiry_entry *e;
1236 int copied = 0;
1237
561aafbc 1238 list_for_each_entry(e, &cache->all, all) {
1da177e4 1239 struct inquiry_data *data = &e->data;
b57c1a56
JH
1240
1241 if (copied >= num)
1242 break;
1243
1da177e4
LT
1244 bacpy(&info->bdaddr, &data->bdaddr);
1245 info->pscan_rep_mode = data->pscan_rep_mode;
1246 info->pscan_period_mode = data->pscan_period_mode;
1247 info->pscan_mode = data->pscan_mode;
1248 memcpy(info->dev_class, data->dev_class, 3);
1249 info->clock_offset = data->clock_offset;
b57c1a56 1250
1da177e4 1251 info++;
b57c1a56 1252 copied++;
1da177e4
LT
1253 }
1254
1255 BT_DBG("cache %p, copied %d", cache, copied);
1256 return copied;
1257}
1258
42c6b129 1259static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1260{
1261 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1262 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1263 struct hci_cp_inquiry cp;
1264
1265 BT_DBG("%s", hdev->name);
1266
1267 if (test_bit(HCI_INQUIRY, &hdev->flags))
1268 return;
1269
1270 /* Start Inquiry */
1271 memcpy(&cp.lap, &ir->lap, 3);
1272 cp.length = ir->length;
1273 cp.num_rsp = ir->num_rsp;
42c6b129 1274 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1275}
1276
1277int hci_inquiry(void __user *arg)
1278{
1279 __u8 __user *ptr = arg;
1280 struct hci_inquiry_req ir;
1281 struct hci_dev *hdev;
1282 int err = 0, do_inquiry = 0, max_rsp;
1283 long timeo;
1284 __u8 *buf;
1285
1286 if (copy_from_user(&ir, ptr, sizeof(ir)))
1287 return -EFAULT;
1288
5a08ecce
AE
1289 hdev = hci_dev_get(ir.dev_id);
1290 if (!hdev)
1da177e4
LT
1291 return -ENODEV;
1292
0736cfa8
MH
1293 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1294 err = -EBUSY;
1295 goto done;
1296 }
1297
4a964404 1298 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
1299 err = -EOPNOTSUPP;
1300 goto done;
1301 }
1302
5b69bef5
MH
1303 if (hdev->dev_type != HCI_BREDR) {
1304 err = -EOPNOTSUPP;
1305 goto done;
1306 }
1307
56f87901
JH
1308 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1309 err = -EOPNOTSUPP;
1310 goto done;
1311 }
1312
09fd0de5 1313 hci_dev_lock(hdev);
8e87d142 1314 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1315 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1316 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1317 do_inquiry = 1;
1318 }
09fd0de5 1319 hci_dev_unlock(hdev);
1da177e4 1320
04837f64 1321 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1322
1323 if (do_inquiry) {
01178cd4
JH
1324 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1325 timeo);
70f23020
AE
1326 if (err < 0)
1327 goto done;
3e13fa1e
AG
1328
1329 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1330 * cleared). If it is interrupted by a signal, return -EINTR.
1331 */
74316201 1332 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1333 TASK_INTERRUPTIBLE))
1334 return -EINTR;
70f23020 1335 }
1da177e4 1336
8fc9ced3
GP
1337 /* for unlimited number of responses we will use buffer with
1338 * 255 entries
1339 */
1da177e4
LT
1340 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1341
1342 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1343 * copy it to the user space.
1344 */
01df8c31 1345 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1346 if (!buf) {
1da177e4
LT
1347 err = -ENOMEM;
1348 goto done;
1349 }
1350
09fd0de5 1351 hci_dev_lock(hdev);
1da177e4 1352 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1353 hci_dev_unlock(hdev);
1da177e4
LT
1354
1355 BT_DBG("num_rsp %d", ir.num_rsp);
1356
1357 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1358 ptr += sizeof(ir);
1359 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1360 ir.num_rsp))
1da177e4 1361 err = -EFAULT;
8e87d142 1362 } else
1da177e4
LT
1363 err = -EFAULT;
1364
1365 kfree(buf);
1366
1367done:
1368 hci_dev_put(hdev);
1369 return err;
1370}
1371
cbed0ca1 1372static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1373{
1da177e4
LT
1374 int ret = 0;
1375
1da177e4
LT
1376 BT_DBG("%s %p", hdev->name, hdev);
1377
1378 hci_req_lock(hdev);
1379
94324962
JH
1380 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1381 ret = -ENODEV;
1382 goto done;
1383 }
1384
d603b76b
MH
1385 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1386 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
a5c8f270
MH
1387 /* Check for rfkill but allow the HCI setup stage to
1388 * proceed (which in itself doesn't cause any RF activity).
1389 */
1390 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1391 ret = -ERFKILL;
1392 goto done;
1393 }
1394
1395 /* Check for valid public address or a configured static
1396 * random adddress, but let the HCI setup proceed to
1397 * be able to determine if there is a public address
1398 * or not.
1399 *
c6beca0e
MH
1400 * In case of user channel usage, it is not important
1401 * if a public address or static random address is
1402 * available.
1403 *
a5c8f270
MH
1404 * This check is only valid for BR/EDR controllers
1405 * since AMP controllers do not have an address.
1406 */
c6beca0e
MH
1407 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1408 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1409 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1410 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1411 ret = -EADDRNOTAVAIL;
1412 goto done;
1413 }
611b30f7
MH
1414 }
1415
1da177e4
LT
1416 if (test_bit(HCI_UP, &hdev->flags)) {
1417 ret = -EALREADY;
1418 goto done;
1419 }
1420
1da177e4
LT
1421 if (hdev->open(hdev)) {
1422 ret = -EIO;
1423 goto done;
1424 }
1425
f41c70c4
MH
1426 atomic_set(&hdev->cmd_cnt, 1);
1427 set_bit(HCI_INIT, &hdev->flags);
1428
af202f84
MH
1429 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1430 if (hdev->setup)
1431 ret = hdev->setup(hdev);
f41c70c4 1432
af202f84
MH
1433 /* The transport driver can set these quirks before
1434 * creating the HCI device or in its setup callback.
1435 *
1436 * In case any of them is set, the controller has to
1437 * start up as unconfigured.
1438 */
eb1904f4
MH
1439 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1440 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
89bc22d2 1441 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
f41c70c4 1442
0ebca7d6
MH
1443 /* For an unconfigured controller it is required to
1444 * read at least the version information provided by
1445 * the Read Local Version Information command.
1446 *
1447 * If the set_bdaddr driver callback is provided, then
1448 * also the original Bluetooth public device address
1449 * will be read using the Read BD Address command.
1450 */
1451 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1452 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1453 }
1454
9713c17b
MH
1455 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1456 /* If public address change is configured, ensure that
1457 * the address gets programmed. If the driver does not
1458 * support changing the public address, fail the power
1459 * on procedure.
1460 */
1461 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1462 hdev->set_bdaddr)
24c457e2
MH
1463 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1464 else
1465 ret = -EADDRNOTAVAIL;
1466 }
1467
f41c70c4 1468 if (!ret) {
4a964404 1469 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 1470 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1471 ret = __hci_init(hdev);
1da177e4
LT
1472 }
1473
f41c70c4
MH
1474 clear_bit(HCI_INIT, &hdev->flags);
1475
1da177e4
LT
1476 if (!ret) {
1477 hci_dev_hold(hdev);
d6bfd59c 1478 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
1479 set_bit(HCI_UP, &hdev->flags);
1480 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1481 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
d603b76b 1482 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
4a964404 1483 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 1484 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1485 hdev->dev_type == HCI_BREDR) {
09fd0de5 1486 hci_dev_lock(hdev);
744cf19e 1487 mgmt_powered(hdev, 1);
09fd0de5 1488 hci_dev_unlock(hdev);
56e5cb86 1489 }
8e87d142 1490 } else {
1da177e4 1491 /* Init failed, cleanup */
3eff45ea 1492 flush_work(&hdev->tx_work);
c347b765 1493 flush_work(&hdev->cmd_work);
b78752cc 1494 flush_work(&hdev->rx_work);
1da177e4
LT
1495
1496 skb_queue_purge(&hdev->cmd_q);
1497 skb_queue_purge(&hdev->rx_q);
1498
1499 if (hdev->flush)
1500 hdev->flush(hdev);
1501
1502 if (hdev->sent_cmd) {
1503 kfree_skb(hdev->sent_cmd);
1504 hdev->sent_cmd = NULL;
1505 }
1506
1507 hdev->close(hdev);
fee746b0 1508 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1509 }
1510
1511done:
1512 hci_req_unlock(hdev);
1da177e4
LT
1513 return ret;
1514}
1515
cbed0ca1
JH
1516/* ---- HCI ioctl helpers ---- */
1517
1518int hci_dev_open(__u16 dev)
1519{
1520 struct hci_dev *hdev;
1521 int err;
1522
1523 hdev = hci_dev_get(dev);
1524 if (!hdev)
1525 return -ENODEV;
1526
4a964404 1527 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1528 * up as user channel. Trying to bring them up as normal devices
1529 * will result into a failure. Only user channel operation is
1530 * possible.
1531 *
1532 * When this function is called for a user channel, the flag
1533 * HCI_USER_CHANNEL will be set first before attempting to
1534 * open the device.
1535 */
4a964404 1536 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
1537 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1538 err = -EOPNOTSUPP;
1539 goto done;
1540 }
1541
e1d08f40
JH
1542 /* We need to ensure that no other power on/off work is pending
1543 * before proceeding to call hci_dev_do_open. This is
1544 * particularly important if the setup procedure has not yet
1545 * completed.
1546 */
1547 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1548 cancel_delayed_work(&hdev->power_off);
1549
a5c8f270
MH
1550 /* After this call it is guaranteed that the setup procedure
1551 * has finished. This means that error conditions like RFKILL
1552 * or no valid public or static random address apply.
1553 */
e1d08f40
JH
1554 flush_workqueue(hdev->req_workqueue);
1555
12aa4f0a 1556 /* For controllers not using the management interface and that
b6ae8457 1557 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1558 * so that pairing works for them. Once the management interface
1559 * is in use this bit will be cleared again and userspace has
1560 * to explicitly enable it.
1561 */
1562 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1563 !test_bit(HCI_MGMT, &hdev->dev_flags))
b6ae8457 1564 set_bit(HCI_BONDABLE, &hdev->dev_flags);
12aa4f0a 1565
cbed0ca1
JH
1566 err = hci_dev_do_open(hdev);
1567
fee746b0 1568done:
cbed0ca1 1569 hci_dev_put(hdev);
cbed0ca1
JH
1570 return err;
1571}
1572
d7347f3c
JH
1573/* This function requires the caller holds hdev->lock */
1574static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1575{
1576 struct hci_conn_params *p;
1577
f161dd41
JH
1578 list_for_each_entry(p, &hdev->le_conn_params, list) {
1579 if (p->conn) {
1580 hci_conn_drop(p->conn);
f8aaf9b6 1581 hci_conn_put(p->conn);
f161dd41
JH
1582 p->conn = NULL;
1583 }
d7347f3c 1584 list_del_init(&p->action);
f161dd41 1585 }
d7347f3c
JH
1586
1587 BT_DBG("All LE pending actions cleared");
1588}
1589
1da177e4
LT
1590static int hci_dev_do_close(struct hci_dev *hdev)
1591{
1592 BT_DBG("%s %p", hdev->name, hdev);
1593
78c04c0b
VCG
1594 cancel_delayed_work(&hdev->power_off);
1595
1da177e4
LT
1596 hci_req_cancel(hdev, ENODEV);
1597 hci_req_lock(hdev);
1598
1599 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1600 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1601 hci_req_unlock(hdev);
1602 return 0;
1603 }
1604
3eff45ea
GP
1605 /* Flush RX and TX works */
1606 flush_work(&hdev->tx_work);
b78752cc 1607 flush_work(&hdev->rx_work);
1da177e4 1608
16ab91ab 1609 if (hdev->discov_timeout > 0) {
e0f9309f 1610 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1611 hdev->discov_timeout = 0;
5e5282bb 1612 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 1613 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1614 }
1615
a8b2d5c2 1616 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1617 cancel_delayed_work(&hdev->service_cache);
1618
7ba8b4be 1619 cancel_delayed_work_sync(&hdev->le_scan_disable);
2d28cfe7 1620 cancel_delayed_work_sync(&hdev->le_scan_restart);
4518bb0f
JH
1621
1622 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1623 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1624
76727c02
JH
1625 /* Avoid potential lockdep warnings from the *_flush() calls by
1626 * ensuring the workqueue is empty up front.
1627 */
1628 drain_workqueue(hdev->workqueue);
1629
09fd0de5 1630 hci_dev_lock(hdev);
1aeb9c65 1631
8f502f84
JH
1632 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1633
1aeb9c65
JH
1634 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1635 if (hdev->dev_type == HCI_BREDR)
1636 mgmt_powered(hdev, 0);
1637 }
1638
1f9b9a5d 1639 hci_inquiry_cache_flush(hdev);
d7347f3c 1640 hci_pend_le_actions_clear(hdev);
f161dd41 1641 hci_conn_hash_flush(hdev);
09fd0de5 1642 hci_dev_unlock(hdev);
1da177e4 1643
64dae967
MH
1644 smp_unregister(hdev);
1645
1da177e4
LT
1646 hci_notify(hdev, HCI_DEV_DOWN);
1647
1648 if (hdev->flush)
1649 hdev->flush(hdev);
1650
1651 /* Reset device */
1652 skb_queue_purge(&hdev->cmd_q);
1653 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
1654 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1655 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 1656 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1657 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1658 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1659 clear_bit(HCI_INIT, &hdev->flags);
1660 }
1661
c347b765
GP
1662 /* flush cmd work */
1663 flush_work(&hdev->cmd_work);
1da177e4
LT
1664
1665 /* Drop queues */
1666 skb_queue_purge(&hdev->rx_q);
1667 skb_queue_purge(&hdev->cmd_q);
1668 skb_queue_purge(&hdev->raw_q);
1669
1670 /* Drop last sent command */
1671 if (hdev->sent_cmd) {
65cc2b49 1672 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1673 kfree_skb(hdev->sent_cmd);
1674 hdev->sent_cmd = NULL;
1675 }
1676
b6ddb638
JH
1677 kfree_skb(hdev->recv_evt);
1678 hdev->recv_evt = NULL;
1679
1da177e4
LT
1680 /* After this point our queues are empty
1681 * and no tasks are scheduled. */
1682 hdev->close(hdev);
1683
35b973c9 1684 /* Clear flags */
fee746b0 1685 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
1686 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1687
ced5c338 1688 /* Controller radio is available but is currently powered down */
536619e8 1689 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1690
e59fda8d 1691 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1692 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1693 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1694
1da177e4
LT
1695 hci_req_unlock(hdev);
1696
1697 hci_dev_put(hdev);
1698 return 0;
1699}
1700
1701int hci_dev_close(__u16 dev)
1702{
1703 struct hci_dev *hdev;
1704 int err;
1705
70f23020
AE
1706 hdev = hci_dev_get(dev);
1707 if (!hdev)
1da177e4 1708 return -ENODEV;
8ee56540 1709
0736cfa8
MH
1710 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1711 err = -EBUSY;
1712 goto done;
1713 }
1714
8ee56540
MH
1715 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1716 cancel_delayed_work(&hdev->power_off);
1717
1da177e4 1718 err = hci_dev_do_close(hdev);
8ee56540 1719
0736cfa8 1720done:
1da177e4
LT
1721 hci_dev_put(hdev);
1722 return err;
1723}
1724
5c912495 1725static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1726{
5c912495 1727 int ret;
1da177e4 1728
5c912495 1729 BT_DBG("%s %p", hdev->name, hdev);
1da177e4
LT
1730
1731 hci_req_lock(hdev);
1da177e4 1732
1da177e4
LT
1733 /* Drop queues */
1734 skb_queue_purge(&hdev->rx_q);
1735 skb_queue_purge(&hdev->cmd_q);
1736
76727c02
JH
1737 /* Avoid potential lockdep warnings from the *_flush() calls by
1738 * ensuring the workqueue is empty up front.
1739 */
1740 drain_workqueue(hdev->workqueue);
1741
09fd0de5 1742 hci_dev_lock(hdev);
1f9b9a5d 1743 hci_inquiry_cache_flush(hdev);
1da177e4 1744 hci_conn_hash_flush(hdev);
09fd0de5 1745 hci_dev_unlock(hdev);
1da177e4
LT
1746
1747 if (hdev->flush)
1748 hdev->flush(hdev);
1749
8e87d142 1750 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1751 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1752
fee746b0 1753 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4 1754
1da177e4 1755 hci_req_unlock(hdev);
1da177e4
LT
1756 return ret;
1757}
1758
5c912495
MH
1759int hci_dev_reset(__u16 dev)
1760{
1761 struct hci_dev *hdev;
1762 int err;
1763
1764 hdev = hci_dev_get(dev);
1765 if (!hdev)
1766 return -ENODEV;
1767
1768 if (!test_bit(HCI_UP, &hdev->flags)) {
1769 err = -ENETDOWN;
1770 goto done;
1771 }
1772
1773 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1774 err = -EBUSY;
1775 goto done;
1776 }
1777
1778 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1779 err = -EOPNOTSUPP;
1780 goto done;
1781 }
1782
1783 err = hci_dev_do_reset(hdev);
1784
1785done:
1786 hci_dev_put(hdev);
1787 return err;
1788}
1789
1da177e4
LT
1790int hci_dev_reset_stat(__u16 dev)
1791{
1792 struct hci_dev *hdev;
1793 int ret = 0;
1794
70f23020
AE
1795 hdev = hci_dev_get(dev);
1796 if (!hdev)
1da177e4
LT
1797 return -ENODEV;
1798
0736cfa8
MH
1799 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1800 ret = -EBUSY;
1801 goto done;
1802 }
1803
4a964404 1804 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
1805 ret = -EOPNOTSUPP;
1806 goto done;
1807 }
1808
1da177e4
LT
1809 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1810
0736cfa8 1811done:
1da177e4 1812 hci_dev_put(hdev);
1da177e4
LT
1813 return ret;
1814}
1815
123abc08
JH
1816static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1817{
bc6d2d04 1818 bool conn_changed, discov_changed;
123abc08
JH
1819
1820 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1821
1822 if ((scan & SCAN_PAGE))
1823 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1824 &hdev->dev_flags);
1825 else
1826 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1827 &hdev->dev_flags);
1828
bc6d2d04
JH
1829 if ((scan & SCAN_INQUIRY)) {
1830 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1831 &hdev->dev_flags);
1832 } else {
1833 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1834 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1835 &hdev->dev_flags);
1836 }
1837
123abc08
JH
1838 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1839 return;
1840
bc6d2d04
JH
1841 if (conn_changed || discov_changed) {
1842 /* In case this was disabled through mgmt */
1843 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1844
1845 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1846 mgmt_update_adv_data(hdev);
1847
123abc08 1848 mgmt_new_settings(hdev);
bc6d2d04 1849 }
123abc08
JH
1850}
1851
1da177e4
LT
1852int hci_dev_cmd(unsigned int cmd, void __user *arg)
1853{
1854 struct hci_dev *hdev;
1855 struct hci_dev_req dr;
1856 int err = 0;
1857
1858 if (copy_from_user(&dr, arg, sizeof(dr)))
1859 return -EFAULT;
1860
70f23020
AE
1861 hdev = hci_dev_get(dr.dev_id);
1862 if (!hdev)
1da177e4
LT
1863 return -ENODEV;
1864
0736cfa8
MH
1865 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1866 err = -EBUSY;
1867 goto done;
1868 }
1869
4a964404 1870 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
1871 err = -EOPNOTSUPP;
1872 goto done;
1873 }
1874
5b69bef5
MH
1875 if (hdev->dev_type != HCI_BREDR) {
1876 err = -EOPNOTSUPP;
1877 goto done;
1878 }
1879
56f87901
JH
1880 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1881 err = -EOPNOTSUPP;
1882 goto done;
1883 }
1884
1da177e4
LT
1885 switch (cmd) {
1886 case HCISETAUTH:
01178cd4
JH
1887 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1888 HCI_INIT_TIMEOUT);
1da177e4
LT
1889 break;
1890
1891 case HCISETENCRYPT:
1892 if (!lmp_encrypt_capable(hdev)) {
1893 err = -EOPNOTSUPP;
1894 break;
1895 }
1896
1897 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1898 /* Auth must be enabled first */
01178cd4
JH
1899 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1900 HCI_INIT_TIMEOUT);
1da177e4
LT
1901 if (err)
1902 break;
1903 }
1904
01178cd4
JH
1905 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1906 HCI_INIT_TIMEOUT);
1da177e4
LT
1907 break;
1908
1909 case HCISETSCAN:
01178cd4
JH
1910 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1911 HCI_INIT_TIMEOUT);
91a668b0 1912
bc6d2d04
JH
1913 /* Ensure that the connectable and discoverable states
1914 * get correctly modified as this was a non-mgmt change.
91a668b0 1915 */
123abc08
JH
1916 if (!err)
1917 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1918 break;
1919
1da177e4 1920 case HCISETLINKPOL:
01178cd4
JH
1921 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1922 HCI_INIT_TIMEOUT);
1da177e4
LT
1923 break;
1924
1925 case HCISETLINKMODE:
e4e8e37c
MH
1926 hdev->link_mode = ((__u16) dr.dev_opt) &
1927 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1928 break;
1929
1930 case HCISETPTYPE:
1931 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1932 break;
1933
1934 case HCISETACLMTU:
e4e8e37c
MH
1935 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1936 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1937 break;
1938
1939 case HCISETSCOMTU:
e4e8e37c
MH
1940 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1941 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1942 break;
1943
1944 default:
1945 err = -EINVAL;
1946 break;
1947 }
e4e8e37c 1948
0736cfa8 1949done:
1da177e4
LT
1950 hci_dev_put(hdev);
1951 return err;
1952}
1953
1954int hci_get_dev_list(void __user *arg)
1955{
8035ded4 1956 struct hci_dev *hdev;
1da177e4
LT
1957 struct hci_dev_list_req *dl;
1958 struct hci_dev_req *dr;
1da177e4
LT
1959 int n = 0, size, err;
1960 __u16 dev_num;
1961
1962 if (get_user(dev_num, (__u16 __user *) arg))
1963 return -EFAULT;
1964
1965 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1966 return -EINVAL;
1967
1968 size = sizeof(*dl) + dev_num * sizeof(*dr);
1969
70f23020
AE
1970 dl = kzalloc(size, GFP_KERNEL);
1971 if (!dl)
1da177e4
LT
1972 return -ENOMEM;
1973
1974 dr = dl->dev_req;
1975
f20d09d5 1976 read_lock(&hci_dev_list_lock);
8035ded4 1977 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 1978 unsigned long flags = hdev->flags;
c542a06c 1979
2e84d8db
MH
1980 /* When the auto-off is configured it means the transport
1981 * is running, but in that case still indicate that the
1982 * device is actually down.
1983 */
1984 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1985 flags &= ~BIT(HCI_UP);
c542a06c 1986
1da177e4 1987 (dr + n)->dev_id = hdev->id;
2e84d8db 1988 (dr + n)->dev_opt = flags;
c542a06c 1989
1da177e4
LT
1990 if (++n >= dev_num)
1991 break;
1992 }
f20d09d5 1993 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1994
1995 dl->dev_num = n;
1996 size = sizeof(*dl) + n * sizeof(*dr);
1997
1998 err = copy_to_user(arg, dl, size);
1999 kfree(dl);
2000
2001 return err ? -EFAULT : 0;
2002}
2003
2004int hci_get_dev_info(void __user *arg)
2005{
2006 struct hci_dev *hdev;
2007 struct hci_dev_info di;
2e84d8db 2008 unsigned long flags;
1da177e4
LT
2009 int err = 0;
2010
2011 if (copy_from_user(&di, arg, sizeof(di)))
2012 return -EFAULT;
2013
70f23020
AE
2014 hdev = hci_dev_get(di.dev_id);
2015 if (!hdev)
1da177e4
LT
2016 return -ENODEV;
2017
2e84d8db
MH
2018 /* When the auto-off is configured it means the transport
2019 * is running, but in that case still indicate that the
2020 * device is actually down.
2021 */
2022 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2023 flags = hdev->flags & ~BIT(HCI_UP);
2024 else
2025 flags = hdev->flags;
c542a06c 2026
1da177e4
LT
2027 strcpy(di.name, hdev->name);
2028 di.bdaddr = hdev->bdaddr;
60f2a3ed 2029 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2030 di.flags = flags;
1da177e4 2031 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2032 if (lmp_bredr_capable(hdev)) {
2033 di.acl_mtu = hdev->acl_mtu;
2034 di.acl_pkts = hdev->acl_pkts;
2035 di.sco_mtu = hdev->sco_mtu;
2036 di.sco_pkts = hdev->sco_pkts;
2037 } else {
2038 di.acl_mtu = hdev->le_mtu;
2039 di.acl_pkts = hdev->le_pkts;
2040 di.sco_mtu = 0;
2041 di.sco_pkts = 0;
2042 }
1da177e4
LT
2043 di.link_policy = hdev->link_policy;
2044 di.link_mode = hdev->link_mode;
2045
2046 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2047 memcpy(&di.features, &hdev->features, sizeof(di.features));
2048
2049 if (copy_to_user(arg, &di, sizeof(di)))
2050 err = -EFAULT;
2051
2052 hci_dev_put(hdev);
2053
2054 return err;
2055}
2056
2057/* ---- Interface to HCI drivers ---- */
2058
611b30f7
MH
2059static int hci_rfkill_set_block(void *data, bool blocked)
2060{
2061 struct hci_dev *hdev = data;
2062
2063 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2064
0736cfa8
MH
2065 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2066 return -EBUSY;
2067
5e130367
JH
2068 if (blocked) {
2069 set_bit(HCI_RFKILLED, &hdev->dev_flags);
d603b76b
MH
2070 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2071 !test_bit(HCI_CONFIG, &hdev->dev_flags))
bf543036 2072 hci_dev_do_close(hdev);
5e130367
JH
2073 } else {
2074 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2075 }
611b30f7
MH
2076
2077 return 0;
2078}
2079
2080static const struct rfkill_ops hci_rfkill_ops = {
2081 .set_block = hci_rfkill_set_block,
2082};
2083
ab81cbf9
JH
2084static void hci_power_on(struct work_struct *work)
2085{
2086 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2087 int err;
ab81cbf9
JH
2088
2089 BT_DBG("%s", hdev->name);
2090
cbed0ca1 2091 err = hci_dev_do_open(hdev);
96570ffc 2092 if (err < 0) {
3ad67582 2093 hci_dev_lock(hdev);
96570ffc 2094 mgmt_set_powered_failed(hdev, err);
3ad67582 2095 hci_dev_unlock(hdev);
ab81cbf9 2096 return;
96570ffc 2097 }
ab81cbf9 2098
a5c8f270
MH
2099 /* During the HCI setup phase, a few error conditions are
2100 * ignored and they need to be checked now. If they are still
2101 * valid, it is important to turn the device back off.
2102 */
2103 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 2104 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
2105 (hdev->dev_type == HCI_BREDR &&
2106 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2107 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2108 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2109 hci_dev_do_close(hdev);
2110 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2111 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2112 HCI_AUTO_OFF_TIMEOUT);
bf543036 2113 }
ab81cbf9 2114
fee746b0 2115 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
2116 /* For unconfigured devices, set the HCI_RAW flag
2117 * so that userspace can easily identify them.
4a964404
MH
2118 */
2119 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2120 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2121
2122 /* For fully configured devices, this will send
2123 * the Index Added event. For unconfigured devices,
2124 * it will send Unconfigued Index Added event.
2125 *
2126 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2127 * and no event will be send.
2128 */
2129 mgmt_index_added(hdev);
d603b76b 2130 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
5ea234d3
MH
2131 /* When the controller is now configured, then it
2132 * is important to clear the HCI_RAW flag.
2133 */
2134 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2135 clear_bit(HCI_RAW, &hdev->flags);
2136
d603b76b
MH
2137 /* Powering on the controller with HCI_CONFIG set only
2138 * happens with the transition from unconfigured to
2139 * configured. This will send the Index Added event.
2140 */
744cf19e 2141 mgmt_index_added(hdev);
fee746b0 2142 }
ab81cbf9
JH
2143}
2144
2145static void hci_power_off(struct work_struct *work)
2146{
3243553f 2147 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2148 power_off.work);
ab81cbf9
JH
2149
2150 BT_DBG("%s", hdev->name);
2151
8ee56540 2152 hci_dev_do_close(hdev);
ab81cbf9
JH
2153}
2154
c7741d16
MH
2155static void hci_error_reset(struct work_struct *work)
2156{
2157 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2158
2159 BT_DBG("%s", hdev->name);
2160
2161 if (hdev->hw_error)
2162 hdev->hw_error(hdev, hdev->hw_error_code);
2163 else
2164 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2165 hdev->hw_error_code);
2166
2167 if (hci_dev_do_close(hdev))
2168 return;
2169
c7741d16
MH
2170 hci_dev_do_open(hdev);
2171}
2172
16ab91ab
JH
2173static void hci_discov_off(struct work_struct *work)
2174{
2175 struct hci_dev *hdev;
16ab91ab
JH
2176
2177 hdev = container_of(work, struct hci_dev, discov_off.work);
2178
2179 BT_DBG("%s", hdev->name);
2180
d1967ff8 2181 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2182}
2183
35f7498a 2184void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2185{
4821002c 2186 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2187
4821002c
JH
2188 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2189 list_del(&uuid->list);
2aeb9a1a
JH
2190 kfree(uuid);
2191 }
2aeb9a1a
JH
2192}
2193
35f7498a 2194void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2195{
0378b597 2196 struct link_key *key;
55ed8ca1 2197
0378b597
JH
2198 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2199 list_del_rcu(&key->list);
2200 kfree_rcu(key, rcu);
55ed8ca1 2201 }
55ed8ca1
JH
2202}
2203
35f7498a 2204void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2205{
970d0f1b 2206 struct smp_ltk *k;
b899efaf 2207
970d0f1b
JH
2208 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2209 list_del_rcu(&k->list);
2210 kfree_rcu(k, rcu);
b899efaf 2211 }
b899efaf
VCG
2212}
2213
970c4e46
JH
2214void hci_smp_irks_clear(struct hci_dev *hdev)
2215{
adae20cb 2216 struct smp_irk *k;
970c4e46 2217
adae20cb
JH
2218 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2219 list_del_rcu(&k->list);
2220 kfree_rcu(k, rcu);
970c4e46
JH
2221 }
2222}
2223
55ed8ca1
JH
2224struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2225{
8035ded4 2226 struct link_key *k;
55ed8ca1 2227
0378b597
JH
2228 rcu_read_lock();
2229 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2230 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2231 rcu_read_unlock();
55ed8ca1 2232 return k;
0378b597
JH
2233 }
2234 }
2235 rcu_read_unlock();
55ed8ca1
JH
2236
2237 return NULL;
2238}
2239
745c0ce3 2240static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2241 u8 key_type, u8 old_key_type)
d25e28ab
JH
2242{
2243 /* Legacy key */
2244 if (key_type < 0x03)
745c0ce3 2245 return true;
d25e28ab
JH
2246
2247 /* Debug keys are insecure so don't store them persistently */
2248 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2249 return false;
d25e28ab
JH
2250
2251 /* Changed combination key and there's no previous one */
2252 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2253 return false;
d25e28ab
JH
2254
2255 /* Security mode 3 case */
2256 if (!conn)
745c0ce3 2257 return true;
d25e28ab 2258
e3befab9
JH
2259 /* BR/EDR key derived using SC from an LE link */
2260 if (conn->type == LE_LINK)
2261 return true;
2262
d25e28ab
JH
2263 /* Neither local nor remote side had no-bonding as requirement */
2264 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2265 return true;
d25e28ab
JH
2266
2267 /* Local side had dedicated bonding as requirement */
2268 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2269 return true;
d25e28ab
JH
2270
2271 /* Remote side had dedicated bonding as requirement */
2272 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2273 return true;
d25e28ab
JH
2274
2275 /* If none of the above criteria match, then don't store the key
2276 * persistently */
745c0ce3 2277 return false;
d25e28ab
JH
2278}
2279
e804d25d 2280static u8 ltk_role(u8 type)
98a0b845 2281{
e804d25d
JH
2282 if (type == SMP_LTK)
2283 return HCI_ROLE_MASTER;
98a0b845 2284
e804d25d 2285 return HCI_ROLE_SLAVE;
98a0b845
JH
2286}
2287
f3a73d97
JH
2288struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2289 u8 addr_type, u8 role)
75d262c2 2290{
c9839a11 2291 struct smp_ltk *k;
75d262c2 2292
970d0f1b
JH
2293 rcu_read_lock();
2294 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2295 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2296 continue;
2297
923e2414 2298 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2299 rcu_read_unlock();
75d262c2 2300 return k;
970d0f1b
JH
2301 }
2302 }
2303 rcu_read_unlock();
75d262c2
VCG
2304
2305 return NULL;
2306}
75d262c2 2307
970c4e46
JH
2308struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2309{
2310 struct smp_irk *irk;
2311
adae20cb
JH
2312 rcu_read_lock();
2313 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2314 if (!bacmp(&irk->rpa, rpa)) {
2315 rcu_read_unlock();
970c4e46 2316 return irk;
adae20cb 2317 }
970c4e46
JH
2318 }
2319
adae20cb 2320 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2321 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2322 bacpy(&irk->rpa, rpa);
adae20cb 2323 rcu_read_unlock();
970c4e46
JH
2324 return irk;
2325 }
2326 }
adae20cb 2327 rcu_read_unlock();
970c4e46
JH
2328
2329 return NULL;
2330}
2331
2332struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2333 u8 addr_type)
2334{
2335 struct smp_irk *irk;
2336
6cfc9988
JH
2337 /* Identity Address must be public or static random */
2338 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2339 return NULL;
2340
adae20cb
JH
2341 rcu_read_lock();
2342 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2343 if (addr_type == irk->addr_type &&
adae20cb
JH
2344 bacmp(bdaddr, &irk->bdaddr) == 0) {
2345 rcu_read_unlock();
970c4e46 2346 return irk;
adae20cb 2347 }
970c4e46 2348 }
adae20cb 2349 rcu_read_unlock();
970c4e46
JH
2350
2351 return NULL;
2352}
2353
567fa2aa 2354struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2355 bdaddr_t *bdaddr, u8 *val, u8 type,
2356 u8 pin_len, bool *persistent)
55ed8ca1
JH
2357{
2358 struct link_key *key, *old_key;
745c0ce3 2359 u8 old_key_type;
55ed8ca1
JH
2360
2361 old_key = hci_find_link_key(hdev, bdaddr);
2362 if (old_key) {
2363 old_key_type = old_key->type;
2364 key = old_key;
2365 } else {
12adcf3a 2366 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2367 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2368 if (!key)
567fa2aa 2369 return NULL;
0378b597 2370 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2371 }
2372
6ed93dc6 2373 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2374
d25e28ab
JH
2375 /* Some buggy controller combinations generate a changed
2376 * combination key for legacy pairing even when there's no
2377 * previous key */
2378 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2379 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2380 type = HCI_LK_COMBINATION;
655fe6ec
JH
2381 if (conn)
2382 conn->key_type = type;
2383 }
d25e28ab 2384
55ed8ca1 2385 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2386 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2387 key->pin_len = pin_len;
2388
b6020ba0 2389 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2390 key->type = old_key_type;
4748fed2
JH
2391 else
2392 key->type = type;
2393
7652ff6a
JH
2394 if (persistent)
2395 *persistent = hci_persistent_key(hdev, conn, type,
2396 old_key_type);
4df378a1 2397
567fa2aa 2398 return key;
55ed8ca1
JH
2399}
2400
ca9142b8 2401struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2402 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2403 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2404{
c9839a11 2405 struct smp_ltk *key, *old_key;
e804d25d 2406 u8 role = ltk_role(type);
75d262c2 2407
f3a73d97 2408 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2409 if (old_key)
75d262c2 2410 key = old_key;
c9839a11 2411 else {
0a14ab41 2412 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2413 if (!key)
ca9142b8 2414 return NULL;
970d0f1b 2415 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2416 }
2417
75d262c2 2418 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2419 key->bdaddr_type = addr_type;
2420 memcpy(key->val, tk, sizeof(key->val));
2421 key->authenticated = authenticated;
2422 key->ediv = ediv;
fe39c7b2 2423 key->rand = rand;
c9839a11
VCG
2424 key->enc_size = enc_size;
2425 key->type = type;
75d262c2 2426
ca9142b8 2427 return key;
75d262c2
VCG
2428}
2429
ca9142b8
JH
2430struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2431 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2432{
2433 struct smp_irk *irk;
2434
2435 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2436 if (!irk) {
2437 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2438 if (!irk)
ca9142b8 2439 return NULL;
970c4e46
JH
2440
2441 bacpy(&irk->bdaddr, bdaddr);
2442 irk->addr_type = addr_type;
2443
adae20cb 2444 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2445 }
2446
2447 memcpy(irk->val, val, 16);
2448 bacpy(&irk->rpa, rpa);
2449
ca9142b8 2450 return irk;
970c4e46
JH
2451}
2452
55ed8ca1
JH
2453int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2454{
2455 struct link_key *key;
2456
2457 key = hci_find_link_key(hdev, bdaddr);
2458 if (!key)
2459 return -ENOENT;
2460
6ed93dc6 2461 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2462
0378b597
JH
2463 list_del_rcu(&key->list);
2464 kfree_rcu(key, rcu);
55ed8ca1
JH
2465
2466 return 0;
2467}
2468
e0b2b27e 2469int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2470{
970d0f1b 2471 struct smp_ltk *k;
c51ffa0b 2472 int removed = 0;
b899efaf 2473
970d0f1b 2474 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2475 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2476 continue;
2477
6ed93dc6 2478 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2479
970d0f1b
JH
2480 list_del_rcu(&k->list);
2481 kfree_rcu(k, rcu);
c51ffa0b 2482 removed++;
b899efaf
VCG
2483 }
2484
c51ffa0b 2485 return removed ? 0 : -ENOENT;
b899efaf
VCG
2486}
2487
a7ec7338
JH
2488void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2489{
adae20cb 2490 struct smp_irk *k;
a7ec7338 2491
adae20cb 2492 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2493 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2494 continue;
2495
2496 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2497
adae20cb
JH
2498 list_del_rcu(&k->list);
2499 kfree_rcu(k, rcu);
a7ec7338
JH
2500 }
2501}
2502
6bd32326 2503/* HCI command timer function */
65cc2b49 2504static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2505{
65cc2b49
MH
2506 struct hci_dev *hdev = container_of(work, struct hci_dev,
2507 cmd_timer.work);
6bd32326 2508
bda4f23a
AE
2509 if (hdev->sent_cmd) {
2510 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2511 u16 opcode = __le16_to_cpu(sent->opcode);
2512
2513 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2514 } else {
2515 BT_ERR("%s command tx timeout", hdev->name);
2516 }
2517
6bd32326 2518 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2519 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2520}
2521
2763eda6 2522struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2523 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2524{
2525 struct oob_data *data;
2526
6928a924
JH
2527 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2528 if (bacmp(bdaddr, &data->bdaddr) != 0)
2529 continue;
2530 if (data->bdaddr_type != bdaddr_type)
2531 continue;
2532 return data;
2533 }
2763eda6
SJ
2534
2535 return NULL;
2536}
2537
6928a924
JH
2538int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2539 u8 bdaddr_type)
2763eda6
SJ
2540{
2541 struct oob_data *data;
2542
6928a924 2543 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2544 if (!data)
2545 return -ENOENT;
2546
6928a924 2547 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2548
2549 list_del(&data->list);
2550 kfree(data);
2551
2552 return 0;
2553}
2554
35f7498a 2555void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2556{
2557 struct oob_data *data, *n;
2558
2559 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2560 list_del(&data->list);
2561 kfree(data);
2562 }
2763eda6
SJ
2563}
2564
0798872e 2565int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2566 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2567 u8 *hash256, u8 *rand256)
2763eda6
SJ
2568{
2569 struct oob_data *data;
2570
6928a924 2571 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2572 if (!data) {
0a14ab41 2573 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2574 if (!data)
2575 return -ENOMEM;
2576
2577 bacpy(&data->bdaddr, bdaddr);
6928a924 2578 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2579 list_add(&data->list, &hdev->remote_oob_data);
2580 }
2581
81328d5c
JH
2582 if (hash192 && rand192) {
2583 memcpy(data->hash192, hash192, sizeof(data->hash192));
2584 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2585 if (hash256 && rand256)
2586 data->present = 0x03;
81328d5c
JH
2587 } else {
2588 memset(data->hash192, 0, sizeof(data->hash192));
2589 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2590 if (hash256 && rand256)
2591 data->present = 0x02;
2592 else
2593 data->present = 0x00;
0798872e
MH
2594 }
2595
81328d5c
JH
2596 if (hash256 && rand256) {
2597 memcpy(data->hash256, hash256, sizeof(data->hash256));
2598 memcpy(data->rand256, rand256, sizeof(data->rand256));
2599 } else {
2600 memset(data->hash256, 0, sizeof(data->hash256));
2601 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2602 if (hash192 && rand192)
2603 data->present = 0x01;
81328d5c 2604 }
0798872e 2605
6ed93dc6 2606 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2607
2608 return 0;
2609}
2610
dcc36c16 2611struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2612 bdaddr_t *bdaddr, u8 type)
b2a66aad 2613{
8035ded4 2614 struct bdaddr_list *b;
b2a66aad 2615
dcc36c16 2616 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2617 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2618 return b;
b9ee0a78 2619 }
b2a66aad
AJ
2620
2621 return NULL;
2622}
2623
dcc36c16 2624void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
2625{
2626 struct list_head *p, *n;
2627
dcc36c16 2628 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 2629 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2630
2631 list_del(p);
2632 kfree(b);
2633 }
b2a66aad
AJ
2634}
2635
dcc36c16 2636int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2637{
2638 struct bdaddr_list *entry;
b2a66aad 2639
b9ee0a78 2640 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2641 return -EBADF;
2642
dcc36c16 2643 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2644 return -EEXIST;
b2a66aad 2645
27f70f3e 2646 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2647 if (!entry)
2648 return -ENOMEM;
b2a66aad
AJ
2649
2650 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2651 entry->bdaddr_type = type;
b2a66aad 2652
dcc36c16 2653 list_add(&entry->list, list);
b2a66aad 2654
2a8357f2 2655 return 0;
b2a66aad
AJ
2656}
2657
dcc36c16 2658int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2659{
2660 struct bdaddr_list *entry;
b2a66aad 2661
35f7498a 2662 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2663 hci_bdaddr_list_clear(list);
35f7498a
JH
2664 return 0;
2665 }
b2a66aad 2666
dcc36c16 2667 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2668 if (!entry)
2669 return -ENOENT;
2670
2671 list_del(&entry->list);
2672 kfree(entry);
2673
2674 return 0;
2675}
2676
15819a70
AG
2677/* This function requires the caller holds hdev->lock */
2678struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2679 bdaddr_t *addr, u8 addr_type)
2680{
2681 struct hci_conn_params *params;
2682
738f6185
JH
2683 /* The conn params list only contains identity addresses */
2684 if (!hci_is_identity_address(addr, addr_type))
2685 return NULL;
2686
15819a70
AG
2687 list_for_each_entry(params, &hdev->le_conn_params, list) {
2688 if (bacmp(&params->addr, addr) == 0 &&
2689 params->addr_type == addr_type) {
2690 return params;
2691 }
2692 }
2693
2694 return NULL;
2695}
2696
4b10966f 2697/* This function requires the caller holds hdev->lock */
501f8827
JH
2698struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2699 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2700{
912b42ef 2701 struct hci_conn_params *param;
a9b0a04c 2702
738f6185
JH
2703 /* The list only contains identity addresses */
2704 if (!hci_is_identity_address(addr, addr_type))
2705 return NULL;
a9b0a04c 2706
501f8827 2707 list_for_each_entry(param, list, action) {
912b42ef
JH
2708 if (bacmp(&param->addr, addr) == 0 &&
2709 param->addr_type == addr_type)
2710 return param;
4b10966f
MH
2711 }
2712
2713 return NULL;
a9b0a04c
AG
2714}
2715
15819a70 2716/* This function requires the caller holds hdev->lock */
51d167c0
MH
2717struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2718 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2719{
2720 struct hci_conn_params *params;
2721
c46245b3 2722 if (!hci_is_identity_address(addr, addr_type))
51d167c0 2723 return NULL;
a9b0a04c 2724
15819a70 2725 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2726 if (params)
51d167c0 2727 return params;
15819a70
AG
2728
2729 params = kzalloc(sizeof(*params), GFP_KERNEL);
2730 if (!params) {
2731 BT_ERR("Out of memory");
51d167c0 2732 return NULL;
15819a70
AG
2733 }
2734
2735 bacpy(&params->addr, addr);
2736 params->addr_type = addr_type;
cef952ce
AG
2737
2738 list_add(&params->list, &hdev->le_conn_params);
93450c75 2739 INIT_LIST_HEAD(&params->action);
cef952ce 2740
bf5b3c8b
MH
2741 params->conn_min_interval = hdev->le_conn_min_interval;
2742 params->conn_max_interval = hdev->le_conn_max_interval;
2743 params->conn_latency = hdev->le_conn_latency;
2744 params->supervision_timeout = hdev->le_supv_timeout;
2745 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2746
2747 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2748
51d167c0 2749 return params;
bf5b3c8b
MH
2750}
2751
f6c63249 2752static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2753{
f8aaf9b6 2754 if (params->conn) {
f161dd41 2755 hci_conn_drop(params->conn);
f8aaf9b6
JH
2756 hci_conn_put(params->conn);
2757 }
f161dd41 2758
95305baa 2759 list_del(&params->action);
15819a70
AG
2760 list_del(&params->list);
2761 kfree(params);
f6c63249
JH
2762}
2763
2764/* This function requires the caller holds hdev->lock */
2765void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2766{
2767 struct hci_conn_params *params;
2768
2769 params = hci_conn_params_lookup(hdev, addr, addr_type);
2770 if (!params)
2771 return;
2772
2773 hci_conn_params_free(params);
15819a70 2774
95305baa
JH
2775 hci_update_background_scan(hdev);
2776
15819a70
AG
2777 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2778}
2779
2780/* This function requires the caller holds hdev->lock */
55af49a8 2781void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2782{
2783 struct hci_conn_params *params, *tmp;
2784
2785 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
2786 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2787 continue;
15819a70
AG
2788 list_del(&params->list);
2789 kfree(params);
2790 }
2791
55af49a8 2792 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
2793}
2794
2795/* This function requires the caller holds hdev->lock */
373110c5 2796void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 2797{
15819a70 2798 struct hci_conn_params *params, *tmp;
77a77a30 2799
f6c63249
JH
2800 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2801 hci_conn_params_free(params);
77a77a30 2802
a4790dbd 2803 hci_update_background_scan(hdev);
77a77a30 2804
15819a70 2805 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
2806}
2807
1904a853 2808static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7ba8b4be 2809{
4c87eaab
AG
2810 if (status) {
2811 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2812
4c87eaab
AG
2813 hci_dev_lock(hdev);
2814 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2815 hci_dev_unlock(hdev);
2816 return;
2817 }
7ba8b4be
AG
2818}
2819
1904a853
MH
2820static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2821 u16 opcode)
7ba8b4be 2822{
4c87eaab
AG
2823 /* General inquiry access code (GIAC) */
2824 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2825 struct hci_request req;
2826 struct hci_cp_inquiry cp;
7ba8b4be
AG
2827 int err;
2828
4c87eaab
AG
2829 if (status) {
2830 BT_ERR("Failed to disable LE scanning: status %d", status);
2831 return;
2832 }
7ba8b4be 2833
2d28cfe7
JP
2834 hdev->discovery.scan_start = 0;
2835
4c87eaab
AG
2836 switch (hdev->discovery.type) {
2837 case DISCOV_TYPE_LE:
2838 hci_dev_lock(hdev);
2839 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2840 hci_dev_unlock(hdev);
2841 break;
7ba8b4be 2842
4c87eaab
AG
2843 case DISCOV_TYPE_INTERLEAVED:
2844 hci_req_init(&req, hdev);
7ba8b4be 2845
4c87eaab
AG
2846 memset(&cp, 0, sizeof(cp));
2847 memcpy(&cp.lap, lap, sizeof(cp.lap));
2848 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2849 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2850
4c87eaab 2851 hci_dev_lock(hdev);
7dbfac1d 2852
4c87eaab 2853 hci_inquiry_cache_flush(hdev);
7dbfac1d 2854
4c87eaab
AG
2855 err = hci_req_run(&req, inquiry_complete);
2856 if (err) {
2857 BT_ERR("Inquiry request failed: err %d", err);
2858 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2859 }
7dbfac1d 2860
4c87eaab
AG
2861 hci_dev_unlock(hdev);
2862 break;
7dbfac1d 2863 }
7dbfac1d
AG
2864}
2865
7ba8b4be
AG
2866static void le_scan_disable_work(struct work_struct *work)
2867{
2868 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2869 le_scan_disable.work);
4c87eaab
AG
2870 struct hci_request req;
2871 int err;
7ba8b4be
AG
2872
2873 BT_DBG("%s", hdev->name);
2874
2d28cfe7
JP
2875 cancel_delayed_work_sync(&hdev->le_scan_restart);
2876
4c87eaab 2877 hci_req_init(&req, hdev);
28b75a89 2878
b1efcc28 2879 hci_req_add_le_scan_disable(&req);
28b75a89 2880
4c87eaab
AG
2881 err = hci_req_run(&req, le_scan_disable_work_complete);
2882 if (err)
2883 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2884}
2885
2d28cfe7
JP
2886static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2887 u16 opcode)
2888{
2889 unsigned long timeout, duration, scan_start, now;
2890
2891 BT_DBG("%s", hdev->name);
2892
2893 if (status) {
2894 BT_ERR("Failed to restart LE scan: status %d", status);
2895 return;
2896 }
2897
2898 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2899 !hdev->discovery.scan_start)
2900 return;
2901
2902 /* When the scan was started, hdev->le_scan_disable has been queued
2903 * after duration from scan_start. During scan restart this job
2904 * has been canceled, and we need to queue it again after proper
2905 * timeout, to make sure that scan does not run indefinitely.
2906 */
2907 duration = hdev->discovery.scan_duration;
2908 scan_start = hdev->discovery.scan_start;
2909 now = jiffies;
2910 if (now - scan_start <= duration) {
2911 int elapsed;
2912
2913 if (now >= scan_start)
2914 elapsed = now - scan_start;
2915 else
2916 elapsed = ULONG_MAX - scan_start + now;
2917
2918 timeout = duration - elapsed;
2919 } else {
2920 timeout = 0;
2921 }
2922 queue_delayed_work(hdev->workqueue,
2923 &hdev->le_scan_disable, timeout);
2924}
2925
2926static void le_scan_restart_work(struct work_struct *work)
2927{
2928 struct hci_dev *hdev = container_of(work, struct hci_dev,
2929 le_scan_restart.work);
2930 struct hci_request req;
2931 struct hci_cp_le_set_scan_enable cp;
2932 int err;
2933
2934 BT_DBG("%s", hdev->name);
2935
2936 /* If controller is not scanning we are done. */
2937 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2938 return;
2939
2940 hci_req_init(&req, hdev);
2941
2942 hci_req_add_le_scan_disable(&req);
2943
2944 memset(&cp, 0, sizeof(cp));
2945 cp.enable = LE_SCAN_ENABLE;
2946 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2947 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2948
2949 err = hci_req_run(&req, le_scan_restart_work_complete);
2950 if (err)
2951 BT_ERR("Restart LE scan request failed: err %d", err);
2952}
2953
a1f4c318
JH
2954/* Copy the Identity Address of the controller.
2955 *
2956 * If the controller has a public BD_ADDR, then by default use that one.
2957 * If this is a LE only controller without a public address, default to
2958 * the static random address.
2959 *
2960 * For debugging purposes it is possible to force controllers with a
2961 * public address to use the static random address instead.
50b5b952
MH
2962 *
2963 * In case BR/EDR has been disabled on a dual-mode controller and
2964 * userspace has configured a static address, then that address
2965 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
2966 */
2967void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2968 u8 *bdaddr_type)
2969{
111902f7 2970 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
50b5b952
MH
2971 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2972 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2973 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
2974 bacpy(bdaddr, &hdev->static_addr);
2975 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2976 } else {
2977 bacpy(bdaddr, &hdev->bdaddr);
2978 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2979 }
2980}
2981
9be0dab7
DH
2982/* Alloc HCI device */
2983struct hci_dev *hci_alloc_dev(void)
2984{
2985 struct hci_dev *hdev;
2986
27f70f3e 2987 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
2988 if (!hdev)
2989 return NULL;
2990
b1b813d4
DH
2991 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2992 hdev->esco_type = (ESCO_HV1);
2993 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2994 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2995 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 2996 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
2997 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2998 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2999
b1b813d4
DH
3000 hdev->sniff_max_interval = 800;
3001 hdev->sniff_min_interval = 80;
3002
3f959d46 3003 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3004 hdev->le_adv_min_interval = 0x0800;
3005 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3006 hdev->le_scan_interval = 0x0060;
3007 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3008 hdev->le_conn_min_interval = 0x0028;
3009 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3010 hdev->le_conn_latency = 0x0000;
3011 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3012 hdev->le_def_tx_len = 0x001b;
3013 hdev->le_def_tx_time = 0x0148;
3014 hdev->le_max_tx_len = 0x001b;
3015 hdev->le_max_tx_time = 0x0148;
3016 hdev->le_max_rx_len = 0x001b;
3017 hdev->le_max_rx_time = 0x0148;
bef64738 3018
d6bfd59c 3019 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3020 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3021 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3022 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3023
b1b813d4
DH
3024 mutex_init(&hdev->lock);
3025 mutex_init(&hdev->req_lock);
3026
3027 INIT_LIST_HEAD(&hdev->mgmt_pending);
3028 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3029 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3030 INIT_LIST_HEAD(&hdev->uuids);
3031 INIT_LIST_HEAD(&hdev->link_keys);
3032 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3033 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3034 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3035 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3036 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3037 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3038 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3039 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3040
3041 INIT_WORK(&hdev->rx_work, hci_rx_work);
3042 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3043 INIT_WORK(&hdev->tx_work, hci_tx_work);
3044 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3045 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3046
b1b813d4
DH
3047 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3048 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3049 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2d28cfe7 3050 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
b1b813d4 3051
b1b813d4
DH
3052 skb_queue_head_init(&hdev->rx_q);
3053 skb_queue_head_init(&hdev->cmd_q);
3054 skb_queue_head_init(&hdev->raw_q);
3055
3056 init_waitqueue_head(&hdev->req_wait_q);
3057
65cc2b49 3058 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3059
b1b813d4
DH
3060 hci_init_sysfs(hdev);
3061 discovery_init(hdev);
9be0dab7
DH
3062
3063 return hdev;
3064}
3065EXPORT_SYMBOL(hci_alloc_dev);
3066
3067/* Free HCI device */
3068void hci_free_dev(struct hci_dev *hdev)
3069{
9be0dab7
DH
3070 /* will free via device release */
3071 put_device(&hdev->dev);
3072}
3073EXPORT_SYMBOL(hci_free_dev);
3074
1da177e4
LT
3075/* Register HCI device */
3076int hci_register_dev(struct hci_dev *hdev)
3077{
b1b813d4 3078 int id, error;
1da177e4 3079
74292d5a 3080 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3081 return -EINVAL;
3082
08add513
MM
3083 /* Do not allow HCI_AMP devices to register at index 0,
3084 * so the index can be used as the AMP controller ID.
3085 */
3df92b31
SL
3086 switch (hdev->dev_type) {
3087 case HCI_BREDR:
3088 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3089 break;
3090 case HCI_AMP:
3091 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3092 break;
3093 default:
3094 return -EINVAL;
1da177e4 3095 }
8e87d142 3096
3df92b31
SL
3097 if (id < 0)
3098 return id;
3099
1da177e4
LT
3100 sprintf(hdev->name, "hci%d", id);
3101 hdev->id = id;
2d8b3a11
AE
3102
3103 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3104
d8537548
KC
3105 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3106 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3107 if (!hdev->workqueue) {
3108 error = -ENOMEM;
3109 goto err;
3110 }
f48fd9c8 3111
d8537548
KC
3112 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3113 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3114 if (!hdev->req_workqueue) {
3115 destroy_workqueue(hdev->workqueue);
3116 error = -ENOMEM;
3117 goto err;
3118 }
3119
0153e2ec
MH
3120 if (!IS_ERR_OR_NULL(bt_debugfs))
3121 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3122
bdc3e0f1
MH
3123 dev_set_name(&hdev->dev, "%s", hdev->name);
3124
3125 error = device_add(&hdev->dev);
33ca954d 3126 if (error < 0)
54506918 3127 goto err_wqueue;
1da177e4 3128
611b30f7 3129 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3130 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3131 hdev);
611b30f7
MH
3132 if (hdev->rfkill) {
3133 if (rfkill_register(hdev->rfkill) < 0) {
3134 rfkill_destroy(hdev->rfkill);
3135 hdev->rfkill = NULL;
3136 }
3137 }
3138
5e130367
JH
3139 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3140 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3141
a8b2d5c2 3142 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3143 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3144
01cd3404 3145 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3146 /* Assume BR/EDR support until proven otherwise (such as
3147 * through reading supported features during init.
3148 */
3149 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3150 }
ce2be9ac 3151
fcee3377
GP
3152 write_lock(&hci_dev_list_lock);
3153 list_add(&hdev->list, &hci_dev_list);
3154 write_unlock(&hci_dev_list_lock);
3155
4a964404
MH
3156 /* Devices that are marked for raw-only usage are unconfigured
3157 * and should not be included in normal operation.
fee746b0
MH
3158 */
3159 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 3160 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 3161
1da177e4 3162 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3163 hci_dev_hold(hdev);
1da177e4 3164
19202573 3165 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3166
1da177e4 3167 return id;
f48fd9c8 3168
33ca954d
DH
3169err_wqueue:
3170 destroy_workqueue(hdev->workqueue);
6ead1bbc 3171 destroy_workqueue(hdev->req_workqueue);
33ca954d 3172err:
3df92b31 3173 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3174
33ca954d 3175 return error;
1da177e4
LT
3176}
3177EXPORT_SYMBOL(hci_register_dev);
3178
3179/* Unregister HCI device */
59735631 3180void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3181{
3df92b31 3182 int i, id;
ef222013 3183
c13854ce 3184 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3185
94324962
JH
3186 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3187
3df92b31
SL
3188 id = hdev->id;
3189
f20d09d5 3190 write_lock(&hci_dev_list_lock);
1da177e4 3191 list_del(&hdev->list);
f20d09d5 3192 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3193
3194 hci_dev_do_close(hdev);
3195
cd4c5391 3196 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3197 kfree_skb(hdev->reassembly[i]);
3198
b9b5ef18
GP
3199 cancel_work_sync(&hdev->power_on);
3200
ab81cbf9 3201 if (!test_bit(HCI_INIT, &hdev->flags) &&
d603b76b
MH
3202 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3203 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
09fd0de5 3204 hci_dev_lock(hdev);
744cf19e 3205 mgmt_index_removed(hdev);
09fd0de5 3206 hci_dev_unlock(hdev);
56e5cb86 3207 }
ab81cbf9 3208
2e58ef3e
JH
3209 /* mgmt_index_removed should take care of emptying the
3210 * pending list */
3211 BUG_ON(!list_empty(&hdev->mgmt_pending));
3212
1da177e4
LT
3213 hci_notify(hdev, HCI_DEV_UNREG);
3214
611b30f7
MH
3215 if (hdev->rfkill) {
3216 rfkill_unregister(hdev->rfkill);
3217 rfkill_destroy(hdev->rfkill);
3218 }
3219
bdc3e0f1 3220 device_del(&hdev->dev);
147e2d59 3221
0153e2ec
MH
3222 debugfs_remove_recursive(hdev->debugfs);
3223
f48fd9c8 3224 destroy_workqueue(hdev->workqueue);
6ead1bbc 3225 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3226
09fd0de5 3227 hci_dev_lock(hdev);
dcc36c16 3228 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3229 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3230 hci_uuids_clear(hdev);
55ed8ca1 3231 hci_link_keys_clear(hdev);
b899efaf 3232 hci_smp_ltks_clear(hdev);
970c4e46 3233 hci_smp_irks_clear(hdev);
2763eda6 3234 hci_remote_oob_data_clear(hdev);
dcc36c16 3235 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3236 hci_conn_params_clear_all(hdev);
22078800 3237 hci_discovery_filter_clear(hdev);
09fd0de5 3238 hci_dev_unlock(hdev);
e2e0cacb 3239
dc946bd8 3240 hci_dev_put(hdev);
3df92b31
SL
3241
3242 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3243}
3244EXPORT_SYMBOL(hci_unregister_dev);
3245
3246/* Suspend HCI device */
3247int hci_suspend_dev(struct hci_dev *hdev)
3248{
3249 hci_notify(hdev, HCI_DEV_SUSPEND);
3250 return 0;
3251}
3252EXPORT_SYMBOL(hci_suspend_dev);
3253
3254/* Resume HCI device */
3255int hci_resume_dev(struct hci_dev *hdev)
3256{
3257 hci_notify(hdev, HCI_DEV_RESUME);
3258 return 0;
3259}
3260EXPORT_SYMBOL(hci_resume_dev);
3261
75e0569f
MH
3262/* Reset HCI device */
3263int hci_reset_dev(struct hci_dev *hdev)
3264{
3265 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3266 struct sk_buff *skb;
3267
3268 skb = bt_skb_alloc(3, GFP_ATOMIC);
3269 if (!skb)
3270 return -ENOMEM;
3271
3272 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3273 memcpy(skb_put(skb, 3), hw_err, 3);
3274
3275 /* Send Hardware Error to upper stack */
3276 return hci_recv_frame(hdev, skb);
3277}
3278EXPORT_SYMBOL(hci_reset_dev);
3279
76bca880 3280/* Receive frame from HCI drivers */
e1a26170 3281int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3282{
76bca880 3283 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3284 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3285 kfree_skb(skb);
3286 return -ENXIO;
3287 }
3288
d82603c6 3289 /* Incoming skb */
76bca880
MH
3290 bt_cb(skb)->incoming = 1;
3291
3292 /* Time stamp */
3293 __net_timestamp(skb);
3294
76bca880 3295 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3296 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3297
76bca880
MH
3298 return 0;
3299}
3300EXPORT_SYMBOL(hci_recv_frame);
3301
33e882a5 3302static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3303 int count, __u8 index)
33e882a5
SS
3304{
3305 int len = 0;
3306 int hlen = 0;
3307 int remain = count;
3308 struct sk_buff *skb;
3309 struct bt_skb_cb *scb;
3310
3311 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3312 index >= NUM_REASSEMBLY)
33e882a5
SS
3313 return -EILSEQ;
3314
3315 skb = hdev->reassembly[index];
3316
3317 if (!skb) {
3318 switch (type) {
3319 case HCI_ACLDATA_PKT:
3320 len = HCI_MAX_FRAME_SIZE;
3321 hlen = HCI_ACL_HDR_SIZE;
3322 break;
3323 case HCI_EVENT_PKT:
3324 len = HCI_MAX_EVENT_SIZE;
3325 hlen = HCI_EVENT_HDR_SIZE;
3326 break;
3327 case HCI_SCODATA_PKT:
3328 len = HCI_MAX_SCO_SIZE;
3329 hlen = HCI_SCO_HDR_SIZE;
3330 break;
3331 }
3332
1e429f38 3333 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3334 if (!skb)
3335 return -ENOMEM;
3336
3337 scb = (void *) skb->cb;
3338 scb->expect = hlen;
3339 scb->pkt_type = type;
3340
33e882a5
SS
3341 hdev->reassembly[index] = skb;
3342 }
3343
3344 while (count) {
3345 scb = (void *) skb->cb;
89bb46d0 3346 len = min_t(uint, scb->expect, count);
33e882a5
SS
3347
3348 memcpy(skb_put(skb, len), data, len);
3349
3350 count -= len;
3351 data += len;
3352 scb->expect -= len;
3353 remain = count;
3354
3355 switch (type) {
3356 case HCI_EVENT_PKT:
3357 if (skb->len == HCI_EVENT_HDR_SIZE) {
3358 struct hci_event_hdr *h = hci_event_hdr(skb);
3359 scb->expect = h->plen;
3360
3361 if (skb_tailroom(skb) < scb->expect) {
3362 kfree_skb(skb);
3363 hdev->reassembly[index] = NULL;
3364 return -ENOMEM;
3365 }
3366 }
3367 break;
3368
3369 case HCI_ACLDATA_PKT:
3370 if (skb->len == HCI_ACL_HDR_SIZE) {
3371 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3372 scb->expect = __le16_to_cpu(h->dlen);
3373
3374 if (skb_tailroom(skb) < scb->expect) {
3375 kfree_skb(skb);
3376 hdev->reassembly[index] = NULL;
3377 return -ENOMEM;
3378 }
3379 }
3380 break;
3381
3382 case HCI_SCODATA_PKT:
3383 if (skb->len == HCI_SCO_HDR_SIZE) {
3384 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3385 scb->expect = h->dlen;
3386
3387 if (skb_tailroom(skb) < scb->expect) {
3388 kfree_skb(skb);
3389 hdev->reassembly[index] = NULL;
3390 return -ENOMEM;
3391 }
3392 }
3393 break;
3394 }
3395
3396 if (scb->expect == 0) {
3397 /* Complete frame */
3398
3399 bt_cb(skb)->pkt_type = type;
e1a26170 3400 hci_recv_frame(hdev, skb);
33e882a5
SS
3401
3402 hdev->reassembly[index] = NULL;
3403 return remain;
3404 }
3405 }
3406
3407 return remain;
3408}
3409
99811510
SS
3410#define STREAM_REASSEMBLY 0
3411
3412int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3413{
3414 int type;
3415 int rem = 0;
3416
da5f6c37 3417 while (count) {
99811510
SS
3418 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3419
3420 if (!skb) {
3421 struct { char type; } *pkt;
3422
3423 /* Start of the frame */
3424 pkt = data;
3425 type = pkt->type;
3426
3427 data++;
3428 count--;
3429 } else
3430 type = bt_cb(skb)->pkt_type;
3431
1e429f38 3432 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3433 STREAM_REASSEMBLY);
99811510
SS
3434 if (rem < 0)
3435 return rem;
3436
3437 data += (count - rem);
3438 count = rem;
f81c6224 3439 }
99811510
SS
3440
3441 return rem;
3442}
3443EXPORT_SYMBOL(hci_recv_stream_fragment);
3444
1da177e4
LT
3445/* ---- Interface to upper protocols ---- */
3446
1da177e4
LT
3447int hci_register_cb(struct hci_cb *cb)
3448{
3449 BT_DBG("%p name %s", cb, cb->name);
3450
f20d09d5 3451 write_lock(&hci_cb_list_lock);
1da177e4 3452 list_add(&cb->list, &hci_cb_list);
f20d09d5 3453 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3454
3455 return 0;
3456}
3457EXPORT_SYMBOL(hci_register_cb);
3458
3459int hci_unregister_cb(struct hci_cb *cb)
3460{
3461 BT_DBG("%p name %s", cb, cb->name);
3462
f20d09d5 3463 write_lock(&hci_cb_list_lock);
1da177e4 3464 list_del(&cb->list);
f20d09d5 3465 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3466
3467 return 0;
3468}
3469EXPORT_SYMBOL(hci_unregister_cb);
3470
51086991 3471static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3472{
cdc52faa
MH
3473 int err;
3474
0d48d939 3475 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3476
cd82e61c
MH
3477 /* Time stamp */
3478 __net_timestamp(skb);
1da177e4 3479
cd82e61c
MH
3480 /* Send copy to monitor */
3481 hci_send_to_monitor(hdev, skb);
3482
3483 if (atomic_read(&hdev->promisc)) {
3484 /* Send copy to the sockets */
470fe1b5 3485 hci_send_to_sock(hdev, skb);
1da177e4
LT
3486 }
3487
3488 /* Get rid of skb owner, prior to sending to the driver. */
3489 skb_orphan(skb);
3490
cdc52faa
MH
3491 err = hdev->send(hdev, skb);
3492 if (err < 0) {
3493 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3494 kfree_skb(skb);
3495 }
1da177e4
LT
3496}
3497
899de765
MH
3498bool hci_req_pending(struct hci_dev *hdev)
3499{
3500 return (hdev->req_status == HCI_REQ_PEND);
3501}
3502
1ca3a9d0 3503/* Send HCI command */
07dc93dd
JH
3504int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3505 const void *param)
1ca3a9d0
JH
3506{
3507 struct sk_buff *skb;
3508
3509 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3510
3511 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3512 if (!skb) {
3513 BT_ERR("%s no memory for command", hdev->name);
3514 return -ENOMEM;
3515 }
3516
49c922bb 3517 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3518 * single-command requests.
3519 */
3520 bt_cb(skb)->req.start = true;
3521
1da177e4 3522 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3523 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3524
3525 return 0;
3526}
1da177e4
LT
3527
3528/* Get data from the previously sent command */
a9de9248 3529void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3530{
3531 struct hci_command_hdr *hdr;
3532
3533 if (!hdev->sent_cmd)
3534 return NULL;
3535
3536 hdr = (void *) hdev->sent_cmd->data;
3537
a9de9248 3538 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3539 return NULL;
3540
f0e09510 3541 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3542
3543 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3544}
3545
3546/* Send ACL data */
3547static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3548{
3549 struct hci_acl_hdr *hdr;
3550 int len = skb->len;
3551
badff6d0
ACM
3552 skb_push(skb, HCI_ACL_HDR_SIZE);
3553 skb_reset_transport_header(skb);
9c70220b 3554 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3555 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3556 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3557}
3558
ee22be7e 3559static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3560 struct sk_buff *skb, __u16 flags)
1da177e4 3561{
ee22be7e 3562 struct hci_conn *conn = chan->conn;
1da177e4
LT
3563 struct hci_dev *hdev = conn->hdev;
3564 struct sk_buff *list;
3565
087bfd99
GP
3566 skb->len = skb_headlen(skb);
3567 skb->data_len = 0;
3568
3569 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3570
3571 switch (hdev->dev_type) {
3572 case HCI_BREDR:
3573 hci_add_acl_hdr(skb, conn->handle, flags);
3574 break;
3575 case HCI_AMP:
3576 hci_add_acl_hdr(skb, chan->handle, flags);
3577 break;
3578 default:
3579 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3580 return;
3581 }
087bfd99 3582
70f23020
AE
3583 list = skb_shinfo(skb)->frag_list;
3584 if (!list) {
1da177e4
LT
3585 /* Non fragmented */
3586 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3587
73d80deb 3588 skb_queue_tail(queue, skb);
1da177e4
LT
3589 } else {
3590 /* Fragmented */
3591 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3592
3593 skb_shinfo(skb)->frag_list = NULL;
3594
9cfd5a23
JR
3595 /* Queue all fragments atomically. We need to use spin_lock_bh
3596 * here because of 6LoWPAN links, as there this function is
3597 * called from softirq and using normal spin lock could cause
3598 * deadlocks.
3599 */
3600 spin_lock_bh(&queue->lock);
1da177e4 3601
73d80deb 3602 __skb_queue_tail(queue, skb);
e702112f
AE
3603
3604 flags &= ~ACL_START;
3605 flags |= ACL_CONT;
1da177e4
LT
3606 do {
3607 skb = list; list = list->next;
8e87d142 3608
0d48d939 3609 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3610 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3611
3612 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3613
73d80deb 3614 __skb_queue_tail(queue, skb);
1da177e4
LT
3615 } while (list);
3616
9cfd5a23 3617 spin_unlock_bh(&queue->lock);
1da177e4 3618 }
73d80deb
LAD
3619}
3620
3621void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3622{
ee22be7e 3623 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3624
f0e09510 3625 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3626
ee22be7e 3627 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3628
3eff45ea 3629 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3630}
1da177e4
LT
3631
3632/* Send SCO data */
0d861d8b 3633void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3634{
3635 struct hci_dev *hdev = conn->hdev;
3636 struct hci_sco_hdr hdr;
3637
3638 BT_DBG("%s len %d", hdev->name, skb->len);
3639
aca3192c 3640 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3641 hdr.dlen = skb->len;
3642
badff6d0
ACM
3643 skb_push(skb, HCI_SCO_HDR_SIZE);
3644 skb_reset_transport_header(skb);
9c70220b 3645 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3646
0d48d939 3647 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3648
1da177e4 3649 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3650 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3651}
1da177e4
LT
3652
3653/* ---- HCI TX task (outgoing data) ---- */
3654
3655/* HCI Connection scheduler */
6039aa73
GP
3656static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3657 int *quote)
1da177e4
LT
3658{
3659 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3660 struct hci_conn *conn = NULL, *c;
abc5de8f 3661 unsigned int num = 0, min = ~0;
1da177e4 3662
8e87d142 3663 /* We don't have to lock device here. Connections are always
1da177e4 3664 * added and removed with TX task disabled. */
bf4c6325
GP
3665
3666 rcu_read_lock();
3667
3668 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3669 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3670 continue;
769be974
MH
3671
3672 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3673 continue;
3674
1da177e4
LT
3675 num++;
3676
3677 if (c->sent < min) {
3678 min = c->sent;
3679 conn = c;
3680 }
52087a79
LAD
3681
3682 if (hci_conn_num(hdev, type) == num)
3683 break;
1da177e4
LT
3684 }
3685
bf4c6325
GP
3686 rcu_read_unlock();
3687
1da177e4 3688 if (conn) {
6ed58ec5
VT
3689 int cnt, q;
3690
3691 switch (conn->type) {
3692 case ACL_LINK:
3693 cnt = hdev->acl_cnt;
3694 break;
3695 case SCO_LINK:
3696 case ESCO_LINK:
3697 cnt = hdev->sco_cnt;
3698 break;
3699 case LE_LINK:
3700 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3701 break;
3702 default:
3703 cnt = 0;
3704 BT_ERR("Unknown link type");
3705 }
3706
3707 q = cnt / num;
1da177e4
LT
3708 *quote = q ? q : 1;
3709 } else
3710 *quote = 0;
3711
3712 BT_DBG("conn %p quote %d", conn, *quote);
3713 return conn;
3714}
3715
6039aa73 3716static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3717{
3718 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3719 struct hci_conn *c;
1da177e4 3720
bae1f5d9 3721 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3722
bf4c6325
GP
3723 rcu_read_lock();
3724
1da177e4 3725 /* Kill stalled connections */
bf4c6325 3726 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3727 if (c->type == type && c->sent) {
6ed93dc6
AE
3728 BT_ERR("%s killing stalled connection %pMR",
3729 hdev->name, &c->dst);
bed71748 3730 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3731 }
3732 }
bf4c6325
GP
3733
3734 rcu_read_unlock();
1da177e4
LT
3735}
3736
6039aa73
GP
3737static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3738 int *quote)
1da177e4 3739{
73d80deb
LAD
3740 struct hci_conn_hash *h = &hdev->conn_hash;
3741 struct hci_chan *chan = NULL;
abc5de8f 3742 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3743 struct hci_conn *conn;
73d80deb
LAD
3744 int cnt, q, conn_num = 0;
3745
3746 BT_DBG("%s", hdev->name);
3747
bf4c6325
GP
3748 rcu_read_lock();
3749
3750 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3751 struct hci_chan *tmp;
3752
3753 if (conn->type != type)
3754 continue;
3755
3756 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3757 continue;
3758
3759 conn_num++;
3760
8192edef 3761 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3762 struct sk_buff *skb;
3763
3764 if (skb_queue_empty(&tmp->data_q))
3765 continue;
3766
3767 skb = skb_peek(&tmp->data_q);
3768 if (skb->priority < cur_prio)
3769 continue;
3770
3771 if (skb->priority > cur_prio) {
3772 num = 0;
3773 min = ~0;
3774 cur_prio = skb->priority;
3775 }
3776
3777 num++;
3778
3779 if (conn->sent < min) {
3780 min = conn->sent;
3781 chan = tmp;
3782 }
3783 }
3784
3785 if (hci_conn_num(hdev, type) == conn_num)
3786 break;
3787 }
3788
bf4c6325
GP
3789 rcu_read_unlock();
3790
73d80deb
LAD
3791 if (!chan)
3792 return NULL;
3793
3794 switch (chan->conn->type) {
3795 case ACL_LINK:
3796 cnt = hdev->acl_cnt;
3797 break;
bd1eb66b
AE
3798 case AMP_LINK:
3799 cnt = hdev->block_cnt;
3800 break;
73d80deb
LAD
3801 case SCO_LINK:
3802 case ESCO_LINK:
3803 cnt = hdev->sco_cnt;
3804 break;
3805 case LE_LINK:
3806 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3807 break;
3808 default:
3809 cnt = 0;
3810 BT_ERR("Unknown link type");
3811 }
3812
3813 q = cnt / num;
3814 *quote = q ? q : 1;
3815 BT_DBG("chan %p quote %d", chan, *quote);
3816 return chan;
3817}
3818
02b20f0b
LAD
3819static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3820{
3821 struct hci_conn_hash *h = &hdev->conn_hash;
3822 struct hci_conn *conn;
3823 int num = 0;
3824
3825 BT_DBG("%s", hdev->name);
3826
bf4c6325
GP
3827 rcu_read_lock();
3828
3829 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3830 struct hci_chan *chan;
3831
3832 if (conn->type != type)
3833 continue;
3834
3835 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3836 continue;
3837
3838 num++;
3839
8192edef 3840 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3841 struct sk_buff *skb;
3842
3843 if (chan->sent) {
3844 chan->sent = 0;
3845 continue;
3846 }
3847
3848 if (skb_queue_empty(&chan->data_q))
3849 continue;
3850
3851 skb = skb_peek(&chan->data_q);
3852 if (skb->priority >= HCI_PRIO_MAX - 1)
3853 continue;
3854
3855 skb->priority = HCI_PRIO_MAX - 1;
3856
3857 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3858 skb->priority);
02b20f0b
LAD
3859 }
3860
3861 if (hci_conn_num(hdev, type) == num)
3862 break;
3863 }
bf4c6325
GP
3864
3865 rcu_read_unlock();
3866
02b20f0b
LAD
3867}
3868
b71d385a
AE
3869static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3870{
3871 /* Calculate count of blocks used by this packet */
3872 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3873}
3874
6039aa73 3875static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3876{
4a964404 3877 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
3878 /* ACL tx timeout must be longer than maximum
3879 * link supervision timeout (40.9 seconds) */
63d2bc1b 3880 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3881 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3882 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3883 }
63d2bc1b 3884}
1da177e4 3885
6039aa73 3886static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3887{
3888 unsigned int cnt = hdev->acl_cnt;
3889 struct hci_chan *chan;
3890 struct sk_buff *skb;
3891 int quote;
3892
3893 __check_timeout(hdev, cnt);
04837f64 3894
73d80deb 3895 while (hdev->acl_cnt &&
a8c5fb1a 3896 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3897 u32 priority = (skb_peek(&chan->data_q))->priority;
3898 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3899 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3900 skb->len, skb->priority);
73d80deb 3901
ec1cce24
LAD
3902 /* Stop if priority has changed */
3903 if (skb->priority < priority)
3904 break;
3905
3906 skb = skb_dequeue(&chan->data_q);
3907
73d80deb 3908 hci_conn_enter_active_mode(chan->conn,
04124681 3909 bt_cb(skb)->force_active);
04837f64 3910
57d17d70 3911 hci_send_frame(hdev, skb);
1da177e4
LT
3912 hdev->acl_last_tx = jiffies;
3913
3914 hdev->acl_cnt--;
73d80deb
LAD
3915 chan->sent++;
3916 chan->conn->sent++;
1da177e4
LT
3917 }
3918 }
02b20f0b
LAD
3919
3920 if (cnt != hdev->acl_cnt)
3921 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3922}
3923
6039aa73 3924static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3925{
63d2bc1b 3926 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3927 struct hci_chan *chan;
3928 struct sk_buff *skb;
3929 int quote;
bd1eb66b 3930 u8 type;
b71d385a 3931
63d2bc1b 3932 __check_timeout(hdev, cnt);
b71d385a 3933
bd1eb66b
AE
3934 BT_DBG("%s", hdev->name);
3935
3936 if (hdev->dev_type == HCI_AMP)
3937 type = AMP_LINK;
3938 else
3939 type = ACL_LINK;
3940
b71d385a 3941 while (hdev->block_cnt > 0 &&
bd1eb66b 3942 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3943 u32 priority = (skb_peek(&chan->data_q))->priority;
3944 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3945 int blocks;
3946
3947 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3948 skb->len, skb->priority);
b71d385a
AE
3949
3950 /* Stop if priority has changed */
3951 if (skb->priority < priority)
3952 break;
3953
3954 skb = skb_dequeue(&chan->data_q);
3955
3956 blocks = __get_blocks(hdev, skb);
3957 if (blocks > hdev->block_cnt)
3958 return;
3959
3960 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3961 bt_cb(skb)->force_active);
b71d385a 3962
57d17d70 3963 hci_send_frame(hdev, skb);
b71d385a
AE
3964 hdev->acl_last_tx = jiffies;
3965
3966 hdev->block_cnt -= blocks;
3967 quote -= blocks;
3968
3969 chan->sent += blocks;
3970 chan->conn->sent += blocks;
3971 }
3972 }
3973
3974 if (cnt != hdev->block_cnt)
bd1eb66b 3975 hci_prio_recalculate(hdev, type);
b71d385a
AE
3976}
3977
6039aa73 3978static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3979{
3980 BT_DBG("%s", hdev->name);
3981
bd1eb66b
AE
3982 /* No ACL link over BR/EDR controller */
3983 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3984 return;
3985
3986 /* No AMP link over AMP controller */
3987 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3988 return;
3989
3990 switch (hdev->flow_ctl_mode) {
3991 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3992 hci_sched_acl_pkt(hdev);
3993 break;
3994
3995 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3996 hci_sched_acl_blk(hdev);
3997 break;
3998 }
3999}
4000
1da177e4 4001/* Schedule SCO */
6039aa73 4002static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4003{
4004 struct hci_conn *conn;
4005 struct sk_buff *skb;
4006 int quote;
4007
4008 BT_DBG("%s", hdev->name);
4009
52087a79
LAD
4010 if (!hci_conn_num(hdev, SCO_LINK))
4011 return;
4012
1da177e4
LT
4013 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4014 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4015 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4016 hci_send_frame(hdev, skb);
1da177e4
LT
4017
4018 conn->sent++;
4019 if (conn->sent == ~0)
4020 conn->sent = 0;
4021 }
4022 }
4023}
4024
6039aa73 4025static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4026{
4027 struct hci_conn *conn;
4028 struct sk_buff *skb;
4029 int quote;
4030
4031 BT_DBG("%s", hdev->name);
4032
52087a79
LAD
4033 if (!hci_conn_num(hdev, ESCO_LINK))
4034 return;
4035
8fc9ced3
GP
4036 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4037 &quote))) {
b6a0dc82
MH
4038 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4039 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4040 hci_send_frame(hdev, skb);
b6a0dc82
MH
4041
4042 conn->sent++;
4043 if (conn->sent == ~0)
4044 conn->sent = 0;
4045 }
4046 }
4047}
4048
6039aa73 4049static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4050{
73d80deb 4051 struct hci_chan *chan;
6ed58ec5 4052 struct sk_buff *skb;
02b20f0b 4053 int quote, cnt, tmp;
6ed58ec5
VT
4054
4055 BT_DBG("%s", hdev->name);
4056
52087a79
LAD
4057 if (!hci_conn_num(hdev, LE_LINK))
4058 return;
4059
4a964404 4060 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
4061 /* LE tx timeout must be longer than maximum
4062 * link supervision timeout (40.9 seconds) */
bae1f5d9 4063 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4064 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4065 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4066 }
4067
4068 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4069 tmp = cnt;
73d80deb 4070 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4071 u32 priority = (skb_peek(&chan->data_q))->priority;
4072 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4073 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4074 skb->len, skb->priority);
6ed58ec5 4075
ec1cce24
LAD
4076 /* Stop if priority has changed */
4077 if (skb->priority < priority)
4078 break;
4079
4080 skb = skb_dequeue(&chan->data_q);
4081
57d17d70 4082 hci_send_frame(hdev, skb);
6ed58ec5
VT
4083 hdev->le_last_tx = jiffies;
4084
4085 cnt--;
73d80deb
LAD
4086 chan->sent++;
4087 chan->conn->sent++;
6ed58ec5
VT
4088 }
4089 }
73d80deb 4090
6ed58ec5
VT
4091 if (hdev->le_pkts)
4092 hdev->le_cnt = cnt;
4093 else
4094 hdev->acl_cnt = cnt;
02b20f0b
LAD
4095
4096 if (cnt != tmp)
4097 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4098}
4099
3eff45ea 4100static void hci_tx_work(struct work_struct *work)
1da177e4 4101{
3eff45ea 4102 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4103 struct sk_buff *skb;
4104
6ed58ec5 4105 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4106 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4107
52de599e
MH
4108 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4109 /* Schedule queues and send stuff to HCI driver */
4110 hci_sched_acl(hdev);
4111 hci_sched_sco(hdev);
4112 hci_sched_esco(hdev);
4113 hci_sched_le(hdev);
4114 }
6ed58ec5 4115
1da177e4
LT
4116 /* Send next queued raw (unknown type) packet */
4117 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4118 hci_send_frame(hdev, skb);
1da177e4
LT
4119}
4120
25985edc 4121/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4122
4123/* ACL data packet */
6039aa73 4124static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4125{
4126 struct hci_acl_hdr *hdr = (void *) skb->data;
4127 struct hci_conn *conn;
4128 __u16 handle, flags;
4129
4130 skb_pull(skb, HCI_ACL_HDR_SIZE);
4131
4132 handle = __le16_to_cpu(hdr->handle);
4133 flags = hci_flags(handle);
4134 handle = hci_handle(handle);
4135
f0e09510 4136 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4137 handle, flags);
1da177e4
LT
4138
4139 hdev->stat.acl_rx++;
4140
4141 hci_dev_lock(hdev);
4142 conn = hci_conn_hash_lookup_handle(hdev, handle);
4143 hci_dev_unlock(hdev);
8e87d142 4144
1da177e4 4145 if (conn) {
65983fc7 4146 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4147
1da177e4 4148 /* Send to upper protocol */
686ebf28
UF
4149 l2cap_recv_acldata(conn, skb, flags);
4150 return;
1da177e4 4151 } else {
8e87d142 4152 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4153 hdev->name, handle);
1da177e4
LT
4154 }
4155
4156 kfree_skb(skb);
4157}
4158
4159/* SCO data packet */
6039aa73 4160static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4161{
4162 struct hci_sco_hdr *hdr = (void *) skb->data;
4163 struct hci_conn *conn;
4164 __u16 handle;
4165
4166 skb_pull(skb, HCI_SCO_HDR_SIZE);
4167
4168 handle = __le16_to_cpu(hdr->handle);
4169
f0e09510 4170 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4171
4172 hdev->stat.sco_rx++;
4173
4174 hci_dev_lock(hdev);
4175 conn = hci_conn_hash_lookup_handle(hdev, handle);
4176 hci_dev_unlock(hdev);
4177
4178 if (conn) {
1da177e4 4179 /* Send to upper protocol */
686ebf28
UF
4180 sco_recv_scodata(conn, skb);
4181 return;
1da177e4 4182 } else {
8e87d142 4183 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4184 hdev->name, handle);
1da177e4
LT
4185 }
4186
4187 kfree_skb(skb);
4188}
4189
9238f36a
JH
4190static bool hci_req_is_complete(struct hci_dev *hdev)
4191{
4192 struct sk_buff *skb;
4193
4194 skb = skb_peek(&hdev->cmd_q);
4195 if (!skb)
4196 return true;
4197
4198 return bt_cb(skb)->req.start;
4199}
4200
42c6b129
JH
4201static void hci_resend_last(struct hci_dev *hdev)
4202{
4203 struct hci_command_hdr *sent;
4204 struct sk_buff *skb;
4205 u16 opcode;
4206
4207 if (!hdev->sent_cmd)
4208 return;
4209
4210 sent = (void *) hdev->sent_cmd->data;
4211 opcode = __le16_to_cpu(sent->opcode);
4212 if (opcode == HCI_OP_RESET)
4213 return;
4214
4215 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4216 if (!skb)
4217 return;
4218
4219 skb_queue_head(&hdev->cmd_q, skb);
4220 queue_work(hdev->workqueue, &hdev->cmd_work);
4221}
4222
9238f36a
JH
4223void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4224{
4225 hci_req_complete_t req_complete = NULL;
4226 struct sk_buff *skb;
4227 unsigned long flags;
4228
4229 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4230
42c6b129
JH
4231 /* If the completed command doesn't match the last one that was
4232 * sent we need to do special handling of it.
9238f36a 4233 */
42c6b129
JH
4234 if (!hci_sent_cmd_data(hdev, opcode)) {
4235 /* Some CSR based controllers generate a spontaneous
4236 * reset complete event during init and any pending
4237 * command will never be completed. In such a case we
4238 * need to resend whatever was the last sent
4239 * command.
4240 */
4241 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4242 hci_resend_last(hdev);
4243
9238f36a 4244 return;
42c6b129 4245 }
9238f36a
JH
4246
4247 /* If the command succeeded and there's still more commands in
4248 * this request the request is not yet complete.
4249 */
4250 if (!status && !hci_req_is_complete(hdev))
4251 return;
4252
4253 /* If this was the last command in a request the complete
4254 * callback would be found in hdev->sent_cmd instead of the
4255 * command queue (hdev->cmd_q).
4256 */
4257 if (hdev->sent_cmd) {
4258 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4259
4260 if (req_complete) {
4261 /* We must set the complete callback to NULL to
4262 * avoid calling the callback more than once if
4263 * this function gets called again.
4264 */
4265 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4266
9238f36a 4267 goto call_complete;
53e21fbc 4268 }
9238f36a
JH
4269 }
4270
4271 /* Remove all pending commands belonging to this request */
4272 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4273 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4274 if (bt_cb(skb)->req.start) {
4275 __skb_queue_head(&hdev->cmd_q, skb);
4276 break;
4277 }
4278
4279 req_complete = bt_cb(skb)->req.complete;
4280 kfree_skb(skb);
4281 }
4282 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4283
4284call_complete:
4285 if (req_complete)
1904a853 4286 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
9238f36a
JH
4287}
4288
b78752cc 4289static void hci_rx_work(struct work_struct *work)
1da177e4 4290{
b78752cc 4291 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4292 struct sk_buff *skb;
4293
4294 BT_DBG("%s", hdev->name);
4295
1da177e4 4296 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4297 /* Send copy to monitor */
4298 hci_send_to_monitor(hdev, skb);
4299
1da177e4
LT
4300 if (atomic_read(&hdev->promisc)) {
4301 /* Send copy to the sockets */
470fe1b5 4302 hci_send_to_sock(hdev, skb);
1da177e4
LT
4303 }
4304
fee746b0 4305 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4306 kfree_skb(skb);
4307 continue;
4308 }
4309
4310 if (test_bit(HCI_INIT, &hdev->flags)) {
4311 /* Don't process data packets in this states. */
0d48d939 4312 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4313 case HCI_ACLDATA_PKT:
4314 case HCI_SCODATA_PKT:
4315 kfree_skb(skb);
4316 continue;
3ff50b79 4317 }
1da177e4
LT
4318 }
4319
4320 /* Process frame */
0d48d939 4321 switch (bt_cb(skb)->pkt_type) {
1da177e4 4322 case HCI_EVENT_PKT:
b78752cc 4323 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4324 hci_event_packet(hdev, skb);
4325 break;
4326
4327 case HCI_ACLDATA_PKT:
4328 BT_DBG("%s ACL data packet", hdev->name);
4329 hci_acldata_packet(hdev, skb);
4330 break;
4331
4332 case HCI_SCODATA_PKT:
4333 BT_DBG("%s SCO data packet", hdev->name);
4334 hci_scodata_packet(hdev, skb);
4335 break;
4336
4337 default:
4338 kfree_skb(skb);
4339 break;
4340 }
4341 }
1da177e4
LT
4342}
4343
c347b765 4344static void hci_cmd_work(struct work_struct *work)
1da177e4 4345{
c347b765 4346 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4347 struct sk_buff *skb;
4348
2104786b
AE
4349 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4350 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4351
1da177e4 4352 /* Send queued commands */
5a08ecce
AE
4353 if (atomic_read(&hdev->cmd_cnt)) {
4354 skb = skb_dequeue(&hdev->cmd_q);
4355 if (!skb)
4356 return;
4357
7585b97a 4358 kfree_skb(hdev->sent_cmd);
1da177e4 4359
a675d7f1 4360 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4361 if (hdev->sent_cmd) {
1da177e4 4362 atomic_dec(&hdev->cmd_cnt);
57d17d70 4363 hci_send_frame(hdev, skb);
7bdb8a5c 4364 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4365 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4366 else
65cc2b49
MH
4367 schedule_delayed_work(&hdev->cmd_timer,
4368 HCI_CMD_TIMEOUT);
1da177e4
LT
4369 } else {
4370 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4371 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4372 }
4373 }
4374}