]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Introduce HCI_DEV_OPEN and HCI_DEV_CLOSE events
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46
JH
42#include "smp.h"
43
b78752cc 44static void hci_rx_work(struct work_struct *work);
c347b765 45static void hci_cmd_work(struct work_struct *work);
3eff45ea 46static void hci_tx_work(struct work_struct *work);
1da177e4 47
1da177e4
LT
48/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
fba7ecf0 54DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 55
3df92b31
SL
56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
899de765
MH
59/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
1da177e4
LT
68/* ---- HCI notifications ---- */
69
6516455d 70static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 71{
040030ef 72 hci_sock_dev_event(hdev, event);
1da177e4
LT
73}
74
baf27f6e
MH
75/* ---- HCI debugfs entries ---- */
76
4b4148e9
MH
77static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
b7cb93e5 83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
4b4148e9
MH
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
4b4148e9
MH
97
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
b7cb93e5 108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
4b4148e9
MH
123 kfree_skb(skb);
124
b7cb93e5 125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
126
127 return count;
128}
129
130static const struct file_operations dut_mode_fops = {
131 .open = simple_open,
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
135};
136
1da177e4
LT
137/* ---- HCI requests ---- */
138
f60cb305
JH
139static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
140 struct sk_buff *skb)
1da177e4 141{
42c6b129 142 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
143
144 if (hdev->req_status == HCI_REQ_PEND) {
145 hdev->req_result = result;
146 hdev->req_status = HCI_REQ_DONE;
f60cb305
JH
147 if (skb)
148 hdev->req_skb = skb_get(skb);
1da177e4
LT
149 wake_up_interruptible(&hdev->req_wait_q);
150 }
151}
152
153static void hci_req_cancel(struct hci_dev *hdev, int err)
154{
155 BT_DBG("%s err 0x%2.2x", hdev->name, err);
156
157 if (hdev->req_status == HCI_REQ_PEND) {
158 hdev->req_result = err;
159 hdev->req_status = HCI_REQ_CANCELED;
160 wake_up_interruptible(&hdev->req_wait_q);
161 }
162}
163
7b1abbbe 164struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 165 const void *param, u8 event, u32 timeout)
75e84b7c
JH
166{
167 DECLARE_WAITQUEUE(wait, current);
168 struct hci_request req;
f60cb305 169 struct sk_buff *skb;
75e84b7c
JH
170 int err = 0;
171
172 BT_DBG("%s", hdev->name);
173
174 hci_req_init(&req, hdev);
175
7b1abbbe 176 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
177
178 hdev->req_status = HCI_REQ_PEND;
179
75e84b7c
JH
180 add_wait_queue(&hdev->req_wait_q, &wait);
181 set_current_state(TASK_INTERRUPTIBLE);
182
f60cb305 183 err = hci_req_run_skb(&req, hci_req_sync_complete);
039fada5
CP
184 if (err < 0) {
185 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 186 set_current_state(TASK_RUNNING);
039fada5
CP
187 return ERR_PTR(err);
188 }
189
75e84b7c
JH
190 schedule_timeout(timeout);
191
192 remove_wait_queue(&hdev->req_wait_q, &wait);
193
194 if (signal_pending(current))
195 return ERR_PTR(-EINTR);
196
197 switch (hdev->req_status) {
198 case HCI_REQ_DONE:
199 err = -bt_to_errno(hdev->req_result);
200 break;
201
202 case HCI_REQ_CANCELED:
203 err = -hdev->req_result;
204 break;
205
206 default:
207 err = -ETIMEDOUT;
208 break;
209 }
210
211 hdev->req_status = hdev->req_result = 0;
f60cb305
JH
212 skb = hdev->req_skb;
213 hdev->req_skb = NULL;
75e84b7c
JH
214
215 BT_DBG("%s end: err %d", hdev->name, err);
216
f60cb305
JH
217 if (err < 0) {
218 kfree_skb(skb);
75e84b7c 219 return ERR_PTR(err);
f60cb305 220 }
75e84b7c 221
757aa0b5
JH
222 if (!skb)
223 return ERR_PTR(-ENODATA);
224
225 return skb;
7b1abbbe
JH
226}
227EXPORT_SYMBOL(__hci_cmd_sync_ev);
228
229struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 230 const void *param, u32 timeout)
7b1abbbe
JH
231{
232 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
233}
234EXPORT_SYMBOL(__hci_cmd_sync);
235
1da177e4 236/* Execute request and wait for completion. */
01178cd4 237static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
238 void (*func)(struct hci_request *req,
239 unsigned long opt),
01178cd4 240 unsigned long opt, __u32 timeout)
1da177e4 241{
42c6b129 242 struct hci_request req;
1da177e4
LT
243 DECLARE_WAITQUEUE(wait, current);
244 int err = 0;
245
246 BT_DBG("%s start", hdev->name);
247
42c6b129
JH
248 hci_req_init(&req, hdev);
249
1da177e4
LT
250 hdev->req_status = HCI_REQ_PEND;
251
42c6b129 252 func(&req, opt);
53cce22d 253
039fada5
CP
254 add_wait_queue(&hdev->req_wait_q, &wait);
255 set_current_state(TASK_INTERRUPTIBLE);
256
f60cb305 257 err = hci_req_run_skb(&req, hci_req_sync_complete);
42c6b129 258 if (err < 0) {
53cce22d 259 hdev->req_status = 0;
920c8300 260
039fada5 261 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 262 set_current_state(TASK_RUNNING);
039fada5 263
920c8300
AG
264 /* ENODATA means the HCI request command queue is empty.
265 * This can happen when a request with conditionals doesn't
266 * trigger any commands to be sent. This is normal behavior
267 * and should not trigger an error return.
42c6b129 268 */
920c8300
AG
269 if (err == -ENODATA)
270 return 0;
271
272 return err;
53cce22d
JH
273 }
274
1da177e4
LT
275 schedule_timeout(timeout);
276
277 remove_wait_queue(&hdev->req_wait_q, &wait);
278
279 if (signal_pending(current))
280 return -EINTR;
281
282 switch (hdev->req_status) {
283 case HCI_REQ_DONE:
e175072f 284 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
285 break;
286
287 case HCI_REQ_CANCELED:
288 err = -hdev->req_result;
289 break;
290
291 default:
292 err = -ETIMEDOUT;
293 break;
3ff50b79 294 }
1da177e4 295
a5040efa 296 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
297
298 BT_DBG("%s end: err %d", hdev->name, err);
299
300 return err;
301}
302
01178cd4 303static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
304 void (*req)(struct hci_request *req,
305 unsigned long opt),
01178cd4 306 unsigned long opt, __u32 timeout)
1da177e4
LT
307{
308 int ret;
309
7c6a329e
MH
310 if (!test_bit(HCI_UP, &hdev->flags))
311 return -ENETDOWN;
312
1da177e4
LT
313 /* Serialize all requests */
314 hci_req_lock(hdev);
01178cd4 315 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
316 hci_req_unlock(hdev);
317
318 return ret;
319}
320
42c6b129 321static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 322{
42c6b129 323 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
324
325 /* Reset device */
42c6b129
JH
326 set_bit(HCI_RESET, &req->hdev->flags);
327 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
328}
329
42c6b129 330static void bredr_init(struct hci_request *req)
1da177e4 331{
42c6b129 332 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 333
1da177e4 334 /* Read Local Supported Features */
42c6b129 335 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 336
1143e5a6 337 /* Read Local Version */
42c6b129 338 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
339
340 /* Read BD Address */
42c6b129 341 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
342}
343
0af801b9 344static void amp_init1(struct hci_request *req)
e61ef499 345{
42c6b129 346 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 347
e61ef499 348 /* Read Local Version */
42c6b129 349 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 350
f6996cfe
MH
351 /* Read Local Supported Commands */
352 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
353
6bcbc489 354 /* Read Local AMP Info */
42c6b129 355 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
356
357 /* Read Data Blk size */
42c6b129 358 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 359
f38ba941
MH
360 /* Read Flow Control Mode */
361 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
362
7528ca1c
MH
363 /* Read Location Data */
364 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
365}
366
0af801b9
JH
367static void amp_init2(struct hci_request *req)
368{
369 /* Read Local Supported Features. Not all AMP controllers
370 * support this so it's placed conditionally in the second
371 * stage init.
372 */
373 if (req->hdev->commands[14] & 0x20)
374 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
375}
376
42c6b129 377static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 378{
42c6b129 379 struct hci_dev *hdev = req->hdev;
e61ef499
AE
380
381 BT_DBG("%s %ld", hdev->name, opt);
382
11778716
AE
383 /* Reset */
384 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 385 hci_reset_req(req, 0);
11778716 386
e61ef499
AE
387 switch (hdev->dev_type) {
388 case HCI_BREDR:
42c6b129 389 bredr_init(req);
e61ef499
AE
390 break;
391
392 case HCI_AMP:
0af801b9 393 amp_init1(req);
e61ef499
AE
394 break;
395
396 default:
397 BT_ERR("Unknown device type %d", hdev->dev_type);
398 break;
399 }
e61ef499
AE
400}
401
42c6b129 402static void bredr_setup(struct hci_request *req)
2177bab5 403{
2177bab5
JH
404 __le16 param;
405 __u8 flt_type;
406
407 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 408 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
409
410 /* Read Class of Device */
42c6b129 411 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
412
413 /* Read Local Name */
42c6b129 414 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
415
416 /* Read Voice Setting */
42c6b129 417 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 418
b4cb9fb2
MH
419 /* Read Number of Supported IAC */
420 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
421
4b836f39
MH
422 /* Read Current IAC LAP */
423 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
424
2177bab5
JH
425 /* Clear Event Filters */
426 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 427 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
428
429 /* Connection accept timeout ~20 secs */
dcf4adbf 430 param = cpu_to_le16(0x7d00);
42c6b129 431 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
432}
433
42c6b129 434static void le_setup(struct hci_request *req)
2177bab5 435{
c73eee91
JH
436 struct hci_dev *hdev = req->hdev;
437
2177bab5 438 /* Read LE Buffer Size */
42c6b129 439 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
440
441 /* Read LE Local Supported Features */
42c6b129 442 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 443
747d3f03
MH
444 /* Read LE Supported States */
445 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
446
2177bab5 447 /* Read LE White List Size */
42c6b129 448 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 449
747d3f03
MH
450 /* Clear LE White List */
451 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
452
453 /* LE-only controllers have LE implicitly enabled */
454 if (!lmp_bredr_capable(hdev))
a1536da2 455 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
456}
457
42c6b129 458static void hci_setup_event_mask(struct hci_request *req)
2177bab5 459{
42c6b129
JH
460 struct hci_dev *hdev = req->hdev;
461
2177bab5
JH
462 /* The second byte is 0xff instead of 0x9f (two reserved bits
463 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
464 * command otherwise.
465 */
466 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
467
468 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
469 * any event mask for pre 1.2 devices.
470 */
471 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
472 return;
473
474 if (lmp_bredr_capable(hdev)) {
475 events[4] |= 0x01; /* Flow Specification Complete */
476 events[4] |= 0x02; /* Inquiry Result with RSSI */
477 events[4] |= 0x04; /* Read Remote Extended Features Complete */
478 events[5] |= 0x08; /* Synchronous Connection Complete */
479 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
480 } else {
481 /* Use a different default for LE-only devices */
482 memset(events, 0, sizeof(events));
483 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
484 events[1] |= 0x08; /* Read Remote Version Information Complete */
485 events[1] |= 0x20; /* Command Complete */
486 events[1] |= 0x40; /* Command Status */
487 events[1] |= 0x80; /* Hardware Error */
488 events[2] |= 0x04; /* Number of Completed Packets */
489 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
490
491 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492 events[0] |= 0x80; /* Encryption Change */
493 events[5] |= 0x80; /* Encryption Key Refresh Complete */
494 }
2177bab5
JH
495 }
496
497 if (lmp_inq_rssi_capable(hdev))
498 events[4] |= 0x02; /* Inquiry Result with RSSI */
499
500 if (lmp_sniffsubr_capable(hdev))
501 events[5] |= 0x20; /* Sniff Subrating */
502
503 if (lmp_pause_enc_capable(hdev))
504 events[5] |= 0x80; /* Encryption Key Refresh Complete */
505
506 if (lmp_ext_inq_capable(hdev))
507 events[5] |= 0x40; /* Extended Inquiry Result */
508
509 if (lmp_no_flush_capable(hdev))
510 events[7] |= 0x01; /* Enhanced Flush Complete */
511
512 if (lmp_lsto_capable(hdev))
513 events[6] |= 0x80; /* Link Supervision Timeout Changed */
514
515 if (lmp_ssp_capable(hdev)) {
516 events[6] |= 0x01; /* IO Capability Request */
517 events[6] |= 0x02; /* IO Capability Response */
518 events[6] |= 0x04; /* User Confirmation Request */
519 events[6] |= 0x08; /* User Passkey Request */
520 events[6] |= 0x10; /* Remote OOB Data Request */
521 events[6] |= 0x20; /* Simple Pairing Complete */
522 events[7] |= 0x04; /* User Passkey Notification */
523 events[7] |= 0x08; /* Keypress Notification */
524 events[7] |= 0x10; /* Remote Host Supported
525 * Features Notification
526 */
527 }
528
529 if (lmp_le_capable(hdev))
530 events[7] |= 0x20; /* LE Meta-Event */
531
42c6b129 532 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
533}
534
42c6b129 535static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 536{
42c6b129
JH
537 struct hci_dev *hdev = req->hdev;
538
0af801b9
JH
539 if (hdev->dev_type == HCI_AMP)
540 return amp_init2(req);
541
2177bab5 542 if (lmp_bredr_capable(hdev))
42c6b129 543 bredr_setup(req);
56f87901 544 else
a358dc11 545 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
546
547 if (lmp_le_capable(hdev))
42c6b129 548 le_setup(req);
2177bab5 549
0f3adeae
MH
550 /* All Bluetooth 1.2 and later controllers should support the
551 * HCI command for reading the local supported commands.
552 *
553 * Unfortunately some controllers indicate Bluetooth 1.2 support,
554 * but do not have support for this command. If that is the case,
555 * the driver can quirk the behavior and skip reading the local
556 * supported commands.
3f8e2d75 557 */
0f3adeae
MH
558 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 560 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
561
562 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
563 /* When SSP is available, then the host features page
564 * should also be available as well. However some
565 * controllers list the max_page as 0 as long as SSP
566 * has not been enabled. To achieve proper debugging
567 * output, force the minimum max_page to 1 at least.
568 */
569 hdev->max_page = 0x01;
570
d7a5a11d 571 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 572 u8 mode = 0x01;
574ea3c7 573
42c6b129
JH
574 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575 sizeof(mode), &mode);
2177bab5
JH
576 } else {
577 struct hci_cp_write_eir cp;
578
579 memset(hdev->eir, 0, sizeof(hdev->eir));
580 memset(&cp, 0, sizeof(cp));
581
42c6b129 582 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
583 }
584 }
585
043ec9bf
MH
586 if (lmp_inq_rssi_capable(hdev) ||
587 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
588 u8 mode;
589
590 /* If Extended Inquiry Result events are supported, then
591 * they are clearly preferred over Inquiry Result with RSSI
592 * events.
593 */
594 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
595
596 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
597 }
2177bab5
JH
598
599 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 600 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
601
602 if (lmp_ext_feat_capable(hdev)) {
603 struct hci_cp_read_local_ext_features cp;
604
605 cp.page = 0x01;
42c6b129
JH
606 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
607 sizeof(cp), &cp);
2177bab5
JH
608 }
609
d7a5a11d 610 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 611 u8 enable = 1;
42c6b129
JH
612 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
613 &enable);
2177bab5
JH
614 }
615}
616
42c6b129 617static void hci_setup_link_policy(struct hci_request *req)
2177bab5 618{
42c6b129 619 struct hci_dev *hdev = req->hdev;
2177bab5
JH
620 struct hci_cp_write_def_link_policy cp;
621 u16 link_policy = 0;
622
623 if (lmp_rswitch_capable(hdev))
624 link_policy |= HCI_LP_RSWITCH;
625 if (lmp_hold_capable(hdev))
626 link_policy |= HCI_LP_HOLD;
627 if (lmp_sniff_capable(hdev))
628 link_policy |= HCI_LP_SNIFF;
629 if (lmp_park_capable(hdev))
630 link_policy |= HCI_LP_PARK;
631
632 cp.policy = cpu_to_le16(link_policy);
42c6b129 633 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
634}
635
42c6b129 636static void hci_set_le_support(struct hci_request *req)
2177bab5 637{
42c6b129 638 struct hci_dev *hdev = req->hdev;
2177bab5
JH
639 struct hci_cp_write_le_host_supported cp;
640
c73eee91
JH
641 /* LE-only devices do not support explicit enablement */
642 if (!lmp_bredr_capable(hdev))
643 return;
644
2177bab5
JH
645 memset(&cp, 0, sizeof(cp));
646
d7a5a11d 647 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 648 cp.le = 0x01;
32226e4f 649 cp.simul = 0x00;
2177bab5
JH
650 }
651
652 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
653 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
654 &cp);
2177bab5
JH
655}
656
d62e6d67
JH
657static void hci_set_event_mask_page_2(struct hci_request *req)
658{
659 struct hci_dev *hdev = req->hdev;
660 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
661
662 /* If Connectionless Slave Broadcast master role is supported
663 * enable all necessary events for it.
664 */
53b834d2 665 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
666 events[1] |= 0x40; /* Triggered Clock Capture */
667 events[1] |= 0x80; /* Synchronization Train Complete */
668 events[2] |= 0x10; /* Slave Page Response Timeout */
669 events[2] |= 0x20; /* CSB Channel Map Change */
670 }
671
672 /* If Connectionless Slave Broadcast slave role is supported
673 * enable all necessary events for it.
674 */
53b834d2 675 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
676 events[2] |= 0x01; /* Synchronization Train Received */
677 events[2] |= 0x02; /* CSB Receive */
678 events[2] |= 0x04; /* CSB Timeout */
679 events[2] |= 0x08; /* Truncated Page Complete */
680 }
681
40c59fcb 682 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 683 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
684 events[2] |= 0x80;
685
d62e6d67
JH
686 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
687}
688
42c6b129 689static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 690{
42c6b129 691 struct hci_dev *hdev = req->hdev;
d2c5d77f 692 u8 p;
42c6b129 693
0da71f1b
MH
694 hci_setup_event_mask(req);
695
e81be90b
JH
696 if (hdev->commands[6] & 0x20 &&
697 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
698 struct hci_cp_read_stored_link_key cp;
699
700 bacpy(&cp.bdaddr, BDADDR_ANY);
701 cp.read_all = 0x01;
702 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
703 }
704
2177bab5 705 if (hdev->commands[5] & 0x10)
42c6b129 706 hci_setup_link_policy(req);
2177bab5 707
417287de
MH
708 if (hdev->commands[8] & 0x01)
709 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
710
711 /* Some older Broadcom based Bluetooth 1.2 controllers do not
712 * support the Read Page Scan Type command. Check support for
713 * this command in the bit mask of supported commands.
714 */
715 if (hdev->commands[13] & 0x01)
716 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
717
9193c6e8
AG
718 if (lmp_le_capable(hdev)) {
719 u8 events[8];
720
721 memset(events, 0, sizeof(events));
4d6c705b
MH
722 events[0] = 0x0f;
723
724 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
725 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
726
727 /* If controller supports the Connection Parameters Request
728 * Link Layer Procedure, enable the corresponding event.
729 */
730 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
731 events[0] |= 0x20; /* LE Remote Connection
732 * Parameter Request
733 */
734
a9f6068e
MH
735 /* If the controller supports the Data Length Extension
736 * feature, enable the corresponding event.
737 */
738 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
739 events[0] |= 0x40; /* LE Data Length Change */
740
4b71bba4
MH
741 /* If the controller supports Extended Scanner Filter
742 * Policies, enable the correspondig event.
743 */
744 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
745 events[1] |= 0x04; /* LE Direct Advertising
746 * Report
747 */
748
5a34bd5f
MH
749 /* If the controller supports the LE Read Local P-256
750 * Public Key command, enable the corresponding event.
751 */
752 if (hdev->commands[34] & 0x02)
753 events[0] |= 0x80; /* LE Read Local P-256
754 * Public Key Complete
755 */
756
757 /* If the controller supports the LE Generate DHKey
758 * command, enable the corresponding event.
759 */
760 if (hdev->commands[34] & 0x04)
761 events[1] |= 0x01; /* LE Generate DHKey Complete */
762
9193c6e8
AG
763 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
764 events);
765
15a49cca
MH
766 if (hdev->commands[25] & 0x40) {
767 /* Read LE Advertising Channel TX Power */
768 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
769 }
770
a9f6068e
MH
771 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
772 /* Read LE Maximum Data Length */
773 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
774
775 /* Read LE Suggested Default Data Length */
776 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
777 }
778
42c6b129 779 hci_set_le_support(req);
9193c6e8 780 }
d2c5d77f
JH
781
782 /* Read features beyond page 1 if available */
783 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
784 struct hci_cp_read_local_ext_features cp;
785
786 cp.page = p;
787 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
788 sizeof(cp), &cp);
789 }
2177bab5
JH
790}
791
5d4e7e8d
JH
792static void hci_init4_req(struct hci_request *req, unsigned long opt)
793{
794 struct hci_dev *hdev = req->hdev;
795
36f260ce
MH
796 /* Some Broadcom based Bluetooth controllers do not support the
797 * Delete Stored Link Key command. They are clearly indicating its
798 * absence in the bit mask of supported commands.
799 *
800 * Check the supported commands and only if the the command is marked
801 * as supported send it. If not supported assume that the controller
802 * does not have actual support for stored link keys which makes this
803 * command redundant anyway.
804 *
805 * Some controllers indicate that they support handling deleting
806 * stored link keys, but they don't. The quirk lets a driver
807 * just disable this command.
808 */
809 if (hdev->commands[6] & 0x80 &&
810 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
811 struct hci_cp_delete_stored_link_key cp;
812
813 bacpy(&cp.bdaddr, BDADDR_ANY);
814 cp.delete_all = 0x01;
815 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
816 sizeof(cp), &cp);
817 }
818
d62e6d67
JH
819 /* Set event mask page 2 if the HCI command for it is supported */
820 if (hdev->commands[22] & 0x04)
821 hci_set_event_mask_page_2(req);
822
109e3191
MH
823 /* Read local codec list if the HCI command is supported */
824 if (hdev->commands[29] & 0x20)
825 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
826
f4fe73ed
MH
827 /* Get MWS transport configuration if the HCI command is supported */
828 if (hdev->commands[30] & 0x08)
829 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
830
5d4e7e8d 831 /* Check for Synchronization Train support */
53b834d2 832 if (lmp_sync_train_capable(hdev))
5d4e7e8d 833 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
834
835 /* Enable Secure Connections if supported and configured */
d7a5a11d 836 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 837 bredr_sc_enabled(hdev)) {
a6d0d690 838 u8 support = 0x01;
574ea3c7 839
a6d0d690
MH
840 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
841 sizeof(support), &support);
842 }
5d4e7e8d
JH
843}
844
2177bab5
JH
845static int __hci_init(struct hci_dev *hdev)
846{
847 int err;
848
849 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
850 if (err < 0)
851 return err;
852
4b4148e9
MH
853 /* The Device Under Test (DUT) mode is special and available for
854 * all controller types. So just create it early on.
855 */
d7a5a11d 856 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
4b4148e9
MH
857 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
858 &dut_mode_fops);
859 }
860
0af801b9
JH
861 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
862 if (err < 0)
863 return err;
864
2177bab5
JH
865 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
866 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 867 * first two stages of init.
2177bab5
JH
868 */
869 if (hdev->dev_type != HCI_BREDR)
870 return 0;
871
5d4e7e8d
JH
872 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
873 if (err < 0)
874 return err;
875
baf27f6e
MH
876 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
877 if (err < 0)
878 return err;
879
ec6cef9c
MH
880 /* This function is only called when the controller is actually in
881 * configured state. When the controller is marked as unconfigured,
882 * this initialization procedure is not run.
883 *
884 * It means that it is possible that a controller runs through its
885 * setup phase and then discovers missing settings. If that is the
886 * case, then this function will not be called. It then will only
887 * be called during the config phase.
888 *
889 * So only when in setup phase or config phase, create the debugfs
890 * entries and register the SMP channels.
baf27f6e 891 */
d7a5a11d
MH
892 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
893 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
894 return 0;
895
60c5f5fb
MH
896 hci_debugfs_create_common(hdev);
897
71c3b60e 898 if (lmp_bredr_capable(hdev))
60c5f5fb 899 hci_debugfs_create_bredr(hdev);
2bfa3531 900
162a3bac 901 if (lmp_le_capable(hdev))
60c5f5fb 902 hci_debugfs_create_le(hdev);
e7b8fc92 903
baf27f6e 904 return 0;
2177bab5
JH
905}
906
0ebca7d6
MH
907static void hci_init0_req(struct hci_request *req, unsigned long opt)
908{
909 struct hci_dev *hdev = req->hdev;
910
911 BT_DBG("%s %ld", hdev->name, opt);
912
913 /* Reset */
914 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
915 hci_reset_req(req, 0);
916
917 /* Read Local Version */
918 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
919
920 /* Read BD Address */
921 if (hdev->set_bdaddr)
922 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
923}
924
925static int __hci_unconf_init(struct hci_dev *hdev)
926{
927 int err;
928
cc78b44b
MH
929 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
930 return 0;
931
0ebca7d6
MH
932 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
933 if (err < 0)
934 return err;
935
936 return 0;
937}
938
42c6b129 939static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
940{
941 __u8 scan = opt;
942
42c6b129 943 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
944
945 /* Inquiry and Page scans */
42c6b129 946 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
947}
948
42c6b129 949static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
950{
951 __u8 auth = opt;
952
42c6b129 953 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
954
955 /* Authentication */
42c6b129 956 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
957}
958
42c6b129 959static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
960{
961 __u8 encrypt = opt;
962
42c6b129 963 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 964
e4e8e37c 965 /* Encryption */
42c6b129 966 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
967}
968
42c6b129 969static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
970{
971 __le16 policy = cpu_to_le16(opt);
972
42c6b129 973 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
974
975 /* Default link policy */
42c6b129 976 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
977}
978
8e87d142 979/* Get HCI device by index.
1da177e4
LT
980 * Device is held on return. */
981struct hci_dev *hci_dev_get(int index)
982{
8035ded4 983 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
984
985 BT_DBG("%d", index);
986
987 if (index < 0)
988 return NULL;
989
990 read_lock(&hci_dev_list_lock);
8035ded4 991 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
992 if (d->id == index) {
993 hdev = hci_dev_hold(d);
994 break;
995 }
996 }
997 read_unlock(&hci_dev_list_lock);
998 return hdev;
999}
1da177e4
LT
1000
1001/* ---- Inquiry support ---- */
ff9ef578 1002
30dc78e1
JH
1003bool hci_discovery_active(struct hci_dev *hdev)
1004{
1005 struct discovery_state *discov = &hdev->discovery;
1006
6fbe195d 1007 switch (discov->state) {
343f935b 1008 case DISCOVERY_FINDING:
6fbe195d 1009 case DISCOVERY_RESOLVING:
30dc78e1
JH
1010 return true;
1011
6fbe195d
AG
1012 default:
1013 return false;
1014 }
30dc78e1
JH
1015}
1016
ff9ef578
JH
1017void hci_discovery_set_state(struct hci_dev *hdev, int state)
1018{
bb3e0a33
JH
1019 int old_state = hdev->discovery.state;
1020
ff9ef578
JH
1021 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1022
bb3e0a33 1023 if (old_state == state)
ff9ef578
JH
1024 return;
1025
bb3e0a33
JH
1026 hdev->discovery.state = state;
1027
ff9ef578
JH
1028 switch (state) {
1029 case DISCOVERY_STOPPED:
c54c3860
AG
1030 hci_update_background_scan(hdev);
1031
bb3e0a33 1032 if (old_state != DISCOVERY_STARTING)
7b99b659 1033 mgmt_discovering(hdev, 0);
ff9ef578
JH
1034 break;
1035 case DISCOVERY_STARTING:
1036 break;
343f935b 1037 case DISCOVERY_FINDING:
ff9ef578
JH
1038 mgmt_discovering(hdev, 1);
1039 break;
30dc78e1
JH
1040 case DISCOVERY_RESOLVING:
1041 break;
ff9ef578
JH
1042 case DISCOVERY_STOPPING:
1043 break;
1044 }
ff9ef578
JH
1045}
1046
1f9b9a5d 1047void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1048{
30883512 1049 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1050 struct inquiry_entry *p, *n;
1da177e4 1051
561aafbc
JH
1052 list_for_each_entry_safe(p, n, &cache->all, all) {
1053 list_del(&p->all);
b57c1a56 1054 kfree(p);
1da177e4 1055 }
561aafbc
JH
1056
1057 INIT_LIST_HEAD(&cache->unknown);
1058 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1059}
1060
a8c5fb1a
GP
1061struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1062 bdaddr_t *bdaddr)
1da177e4 1063{
30883512 1064 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1065 struct inquiry_entry *e;
1066
6ed93dc6 1067 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1068
561aafbc
JH
1069 list_for_each_entry(e, &cache->all, all) {
1070 if (!bacmp(&e->data.bdaddr, bdaddr))
1071 return e;
1072 }
1073
1074 return NULL;
1075}
1076
1077struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1078 bdaddr_t *bdaddr)
561aafbc 1079{
30883512 1080 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1081 struct inquiry_entry *e;
1082
6ed93dc6 1083 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1084
1085 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1086 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1087 return e;
1088 }
1089
1090 return NULL;
1da177e4
LT
1091}
1092
30dc78e1 1093struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1094 bdaddr_t *bdaddr,
1095 int state)
30dc78e1
JH
1096{
1097 struct discovery_state *cache = &hdev->discovery;
1098 struct inquiry_entry *e;
1099
6ed93dc6 1100 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1101
1102 list_for_each_entry(e, &cache->resolve, list) {
1103 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1104 return e;
1105 if (!bacmp(&e->data.bdaddr, bdaddr))
1106 return e;
1107 }
1108
1109 return NULL;
1110}
1111
a3d4e20a 1112void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1113 struct inquiry_entry *ie)
a3d4e20a
JH
1114{
1115 struct discovery_state *cache = &hdev->discovery;
1116 struct list_head *pos = &cache->resolve;
1117 struct inquiry_entry *p;
1118
1119 list_del(&ie->list);
1120
1121 list_for_each_entry(p, &cache->resolve, list) {
1122 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1123 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1124 break;
1125 pos = &p->list;
1126 }
1127
1128 list_add(&ie->list, pos);
1129}
1130
af58925c
MH
1131u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1132 bool name_known)
1da177e4 1133{
30883512 1134 struct discovery_state *cache = &hdev->discovery;
70f23020 1135 struct inquiry_entry *ie;
af58925c 1136 u32 flags = 0;
1da177e4 1137
6ed93dc6 1138 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1139
6928a924 1140 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1141
af58925c
MH
1142 if (!data->ssp_mode)
1143 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1144
70f23020 1145 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1146 if (ie) {
af58925c
MH
1147 if (!ie->data.ssp_mode)
1148 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1149
a3d4e20a 1150 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1151 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1152 ie->data.rssi = data->rssi;
1153 hci_inquiry_cache_update_resolve(hdev, ie);
1154 }
1155
561aafbc 1156 goto update;
a3d4e20a 1157 }
561aafbc
JH
1158
1159 /* Entry not in the cache. Add new one. */
27f70f3e 1160 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1161 if (!ie) {
1162 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1163 goto done;
1164 }
561aafbc
JH
1165
1166 list_add(&ie->all, &cache->all);
1167
1168 if (name_known) {
1169 ie->name_state = NAME_KNOWN;
1170 } else {
1171 ie->name_state = NAME_NOT_KNOWN;
1172 list_add(&ie->list, &cache->unknown);
1173 }
70f23020 1174
561aafbc
JH
1175update:
1176 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1177 ie->name_state != NAME_PENDING) {
561aafbc
JH
1178 ie->name_state = NAME_KNOWN;
1179 list_del(&ie->list);
1da177e4
LT
1180 }
1181
70f23020
AE
1182 memcpy(&ie->data, data, sizeof(*data));
1183 ie->timestamp = jiffies;
1da177e4 1184 cache->timestamp = jiffies;
3175405b
JH
1185
1186 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1187 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1188
af58925c
MH
1189done:
1190 return flags;
1da177e4
LT
1191}
1192
1193static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1194{
30883512 1195 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1196 struct inquiry_info *info = (struct inquiry_info *) buf;
1197 struct inquiry_entry *e;
1198 int copied = 0;
1199
561aafbc 1200 list_for_each_entry(e, &cache->all, all) {
1da177e4 1201 struct inquiry_data *data = &e->data;
b57c1a56
JH
1202
1203 if (copied >= num)
1204 break;
1205
1da177e4
LT
1206 bacpy(&info->bdaddr, &data->bdaddr);
1207 info->pscan_rep_mode = data->pscan_rep_mode;
1208 info->pscan_period_mode = data->pscan_period_mode;
1209 info->pscan_mode = data->pscan_mode;
1210 memcpy(info->dev_class, data->dev_class, 3);
1211 info->clock_offset = data->clock_offset;
b57c1a56 1212
1da177e4 1213 info++;
b57c1a56 1214 copied++;
1da177e4
LT
1215 }
1216
1217 BT_DBG("cache %p, copied %d", cache, copied);
1218 return copied;
1219}
1220
42c6b129 1221static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1222{
1223 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1224 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1225 struct hci_cp_inquiry cp;
1226
1227 BT_DBG("%s", hdev->name);
1228
1229 if (test_bit(HCI_INQUIRY, &hdev->flags))
1230 return;
1231
1232 /* Start Inquiry */
1233 memcpy(&cp.lap, &ir->lap, 3);
1234 cp.length = ir->length;
1235 cp.num_rsp = ir->num_rsp;
42c6b129 1236 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1237}
1238
1239int hci_inquiry(void __user *arg)
1240{
1241 __u8 __user *ptr = arg;
1242 struct hci_inquiry_req ir;
1243 struct hci_dev *hdev;
1244 int err = 0, do_inquiry = 0, max_rsp;
1245 long timeo;
1246 __u8 *buf;
1247
1248 if (copy_from_user(&ir, ptr, sizeof(ir)))
1249 return -EFAULT;
1250
5a08ecce
AE
1251 hdev = hci_dev_get(ir.dev_id);
1252 if (!hdev)
1da177e4
LT
1253 return -ENODEV;
1254
d7a5a11d 1255 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1256 err = -EBUSY;
1257 goto done;
1258 }
1259
d7a5a11d 1260 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1261 err = -EOPNOTSUPP;
1262 goto done;
1263 }
1264
5b69bef5
MH
1265 if (hdev->dev_type != HCI_BREDR) {
1266 err = -EOPNOTSUPP;
1267 goto done;
1268 }
1269
d7a5a11d 1270 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1271 err = -EOPNOTSUPP;
1272 goto done;
1273 }
1274
09fd0de5 1275 hci_dev_lock(hdev);
8e87d142 1276 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1277 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1278 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1279 do_inquiry = 1;
1280 }
09fd0de5 1281 hci_dev_unlock(hdev);
1da177e4 1282
04837f64 1283 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1284
1285 if (do_inquiry) {
01178cd4
JH
1286 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1287 timeo);
70f23020
AE
1288 if (err < 0)
1289 goto done;
3e13fa1e
AG
1290
1291 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1292 * cleared). If it is interrupted by a signal, return -EINTR.
1293 */
74316201 1294 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1295 TASK_INTERRUPTIBLE))
1296 return -EINTR;
70f23020 1297 }
1da177e4 1298
8fc9ced3
GP
1299 /* for unlimited number of responses we will use buffer with
1300 * 255 entries
1301 */
1da177e4
LT
1302 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1303
1304 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1305 * copy it to the user space.
1306 */
01df8c31 1307 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1308 if (!buf) {
1da177e4
LT
1309 err = -ENOMEM;
1310 goto done;
1311 }
1312
09fd0de5 1313 hci_dev_lock(hdev);
1da177e4 1314 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1315 hci_dev_unlock(hdev);
1da177e4
LT
1316
1317 BT_DBG("num_rsp %d", ir.num_rsp);
1318
1319 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1320 ptr += sizeof(ir);
1321 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1322 ir.num_rsp))
1da177e4 1323 err = -EFAULT;
8e87d142 1324 } else
1da177e4
LT
1325 err = -EFAULT;
1326
1327 kfree(buf);
1328
1329done:
1330 hci_dev_put(hdev);
1331 return err;
1332}
1333
cbed0ca1 1334static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1335{
1da177e4
LT
1336 int ret = 0;
1337
1da177e4
LT
1338 BT_DBG("%s %p", hdev->name, hdev);
1339
1340 hci_req_lock(hdev);
1341
d7a5a11d 1342 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1343 ret = -ENODEV;
1344 goto done;
1345 }
1346
d7a5a11d
MH
1347 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1348 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1349 /* Check for rfkill but allow the HCI setup stage to
1350 * proceed (which in itself doesn't cause any RF activity).
1351 */
d7a5a11d 1352 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1353 ret = -ERFKILL;
1354 goto done;
1355 }
1356
1357 /* Check for valid public address or a configured static
1358 * random adddress, but let the HCI setup proceed to
1359 * be able to determine if there is a public address
1360 * or not.
1361 *
c6beca0e
MH
1362 * In case of user channel usage, it is not important
1363 * if a public address or static random address is
1364 * available.
1365 *
a5c8f270
MH
1366 * This check is only valid for BR/EDR controllers
1367 * since AMP controllers do not have an address.
1368 */
d7a5a11d 1369 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
c6beca0e 1370 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1371 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1372 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1373 ret = -EADDRNOTAVAIL;
1374 goto done;
1375 }
611b30f7
MH
1376 }
1377
1da177e4
LT
1378 if (test_bit(HCI_UP, &hdev->flags)) {
1379 ret = -EALREADY;
1380 goto done;
1381 }
1382
1da177e4
LT
1383 if (hdev->open(hdev)) {
1384 ret = -EIO;
1385 goto done;
1386 }
1387
4a3f95b7
MH
1388 hci_notify(hdev, HCI_DEV_OPEN);
1389
f41c70c4
MH
1390 atomic_set(&hdev->cmd_cnt, 1);
1391 set_bit(HCI_INIT, &hdev->flags);
1392
d7a5a11d 1393 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
af202f84
MH
1394 if (hdev->setup)
1395 ret = hdev->setup(hdev);
f41c70c4 1396
af202f84
MH
1397 /* The transport driver can set these quirks before
1398 * creating the HCI device or in its setup callback.
1399 *
1400 * In case any of them is set, the controller has to
1401 * start up as unconfigured.
1402 */
eb1904f4
MH
1403 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1404 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1405 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1406
0ebca7d6
MH
1407 /* For an unconfigured controller it is required to
1408 * read at least the version information provided by
1409 * the Read Local Version Information command.
1410 *
1411 * If the set_bdaddr driver callback is provided, then
1412 * also the original Bluetooth public device address
1413 * will be read using the Read BD Address command.
1414 */
d7a5a11d 1415 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1416 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1417 }
1418
d7a5a11d 1419 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1420 /* If public address change is configured, ensure that
1421 * the address gets programmed. If the driver does not
1422 * support changing the public address, fail the power
1423 * on procedure.
1424 */
1425 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1426 hdev->set_bdaddr)
24c457e2
MH
1427 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1428 else
1429 ret = -EADDRNOTAVAIL;
1430 }
1431
f41c70c4 1432 if (!ret) {
d7a5a11d
MH
1433 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1434 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
f41c70c4 1435 ret = __hci_init(hdev);
1da177e4
LT
1436 }
1437
f41c70c4
MH
1438 clear_bit(HCI_INIT, &hdev->flags);
1439
1da177e4
LT
1440 if (!ret) {
1441 hci_dev_hold(hdev);
a1536da2 1442 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4
LT
1443 set_bit(HCI_UP, &hdev->flags);
1444 hci_notify(hdev, HCI_DEV_UP);
d7a5a11d
MH
1445 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1446 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1447 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1448 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1514b892 1449 hdev->dev_type == HCI_BREDR) {
09fd0de5 1450 hci_dev_lock(hdev);
744cf19e 1451 mgmt_powered(hdev, 1);
09fd0de5 1452 hci_dev_unlock(hdev);
56e5cb86 1453 }
8e87d142 1454 } else {
1da177e4 1455 /* Init failed, cleanup */
3eff45ea 1456 flush_work(&hdev->tx_work);
c347b765 1457 flush_work(&hdev->cmd_work);
b78752cc 1458 flush_work(&hdev->rx_work);
1da177e4
LT
1459
1460 skb_queue_purge(&hdev->cmd_q);
1461 skb_queue_purge(&hdev->rx_q);
1462
1463 if (hdev->flush)
1464 hdev->flush(hdev);
1465
1466 if (hdev->sent_cmd) {
1467 kfree_skb(hdev->sent_cmd);
1468 hdev->sent_cmd = NULL;
1469 }
1470
4a3f95b7
MH
1471 hci_notify(hdev, HCI_DEV_CLOSE);
1472
1da177e4 1473 hdev->close(hdev);
fee746b0 1474 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1475 }
1476
1477done:
1478 hci_req_unlock(hdev);
1da177e4
LT
1479 return ret;
1480}
1481
cbed0ca1
JH
1482/* ---- HCI ioctl helpers ---- */
1483
1484int hci_dev_open(__u16 dev)
1485{
1486 struct hci_dev *hdev;
1487 int err;
1488
1489 hdev = hci_dev_get(dev);
1490 if (!hdev)
1491 return -ENODEV;
1492
4a964404 1493 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1494 * up as user channel. Trying to bring them up as normal devices
1495 * will result into a failure. Only user channel operation is
1496 * possible.
1497 *
1498 * When this function is called for a user channel, the flag
1499 * HCI_USER_CHANNEL will be set first before attempting to
1500 * open the device.
1501 */
d7a5a11d
MH
1502 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1503 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1504 err = -EOPNOTSUPP;
1505 goto done;
1506 }
1507
e1d08f40
JH
1508 /* We need to ensure that no other power on/off work is pending
1509 * before proceeding to call hci_dev_do_open. This is
1510 * particularly important if the setup procedure has not yet
1511 * completed.
1512 */
a69d8927 1513 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1514 cancel_delayed_work(&hdev->power_off);
1515
a5c8f270
MH
1516 /* After this call it is guaranteed that the setup procedure
1517 * has finished. This means that error conditions like RFKILL
1518 * or no valid public or static random address apply.
1519 */
e1d08f40
JH
1520 flush_workqueue(hdev->req_workqueue);
1521
12aa4f0a 1522 /* For controllers not using the management interface and that
b6ae8457 1523 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1524 * so that pairing works for them. Once the management interface
1525 * is in use this bit will be cleared again and userspace has
1526 * to explicitly enable it.
1527 */
d7a5a11d
MH
1528 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1529 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1530 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1531
cbed0ca1
JH
1532 err = hci_dev_do_open(hdev);
1533
fee746b0 1534done:
cbed0ca1 1535 hci_dev_put(hdev);
cbed0ca1
JH
1536 return err;
1537}
1538
d7347f3c
JH
1539/* This function requires the caller holds hdev->lock */
1540static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1541{
1542 struct hci_conn_params *p;
1543
f161dd41
JH
1544 list_for_each_entry(p, &hdev->le_conn_params, list) {
1545 if (p->conn) {
1546 hci_conn_drop(p->conn);
f8aaf9b6 1547 hci_conn_put(p->conn);
f161dd41
JH
1548 p->conn = NULL;
1549 }
d7347f3c 1550 list_del_init(&p->action);
f161dd41 1551 }
d7347f3c
JH
1552
1553 BT_DBG("All LE pending actions cleared");
1554}
1555
6b3cc1db 1556int hci_dev_do_close(struct hci_dev *hdev)
1da177e4
LT
1557{
1558 BT_DBG("%s %p", hdev->name, hdev);
1559
d24d8144 1560 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1561 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1562 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1563 /* Execute vendor specific shutdown routine */
1564 if (hdev->shutdown)
1565 hdev->shutdown(hdev);
1566 }
1567
78c04c0b
VCG
1568 cancel_delayed_work(&hdev->power_off);
1569
1da177e4
LT
1570 hci_req_cancel(hdev, ENODEV);
1571 hci_req_lock(hdev);
1572
1573 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1574 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1575 hci_req_unlock(hdev);
1576 return 0;
1577 }
1578
3eff45ea
GP
1579 /* Flush RX and TX works */
1580 flush_work(&hdev->tx_work);
b78752cc 1581 flush_work(&hdev->rx_work);
1da177e4 1582
16ab91ab 1583 if (hdev->discov_timeout > 0) {
e0f9309f 1584 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1585 hdev->discov_timeout = 0;
a358dc11
MH
1586 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1587 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1588 }
1589
a69d8927 1590 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1591 cancel_delayed_work(&hdev->service_cache);
1592
7ba8b4be 1593 cancel_delayed_work_sync(&hdev->le_scan_disable);
2d28cfe7 1594 cancel_delayed_work_sync(&hdev->le_scan_restart);
4518bb0f 1595
d7a5a11d 1596 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1597 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1598
5d900e46
FG
1599 if (hdev->adv_instance_timeout) {
1600 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1601 hdev->adv_instance_timeout = 0;
1602 }
1603
76727c02
JH
1604 /* Avoid potential lockdep warnings from the *_flush() calls by
1605 * ensuring the workqueue is empty up front.
1606 */
1607 drain_workqueue(hdev->workqueue);
1608
09fd0de5 1609 hci_dev_lock(hdev);
1aeb9c65 1610
8f502f84
JH
1611 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1612
a69d8927 1613 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1aeb9c65
JH
1614 if (hdev->dev_type == HCI_BREDR)
1615 mgmt_powered(hdev, 0);
1616 }
1617
1f9b9a5d 1618 hci_inquiry_cache_flush(hdev);
d7347f3c 1619 hci_pend_le_actions_clear(hdev);
f161dd41 1620 hci_conn_hash_flush(hdev);
09fd0de5 1621 hci_dev_unlock(hdev);
1da177e4 1622
64dae967
MH
1623 smp_unregister(hdev);
1624
1da177e4
LT
1625 hci_notify(hdev, HCI_DEV_DOWN);
1626
1627 if (hdev->flush)
1628 hdev->flush(hdev);
1629
1630 /* Reset device */
1631 skb_queue_purge(&hdev->cmd_q);
1632 atomic_set(&hdev->cmd_cnt, 1);
d7a5a11d
MH
1633 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1634 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
a6c511c6 1635 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1636 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1637 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1638 clear_bit(HCI_INIT, &hdev->flags);
1639 }
1640
c347b765
GP
1641 /* flush cmd work */
1642 flush_work(&hdev->cmd_work);
1da177e4
LT
1643
1644 /* Drop queues */
1645 skb_queue_purge(&hdev->rx_q);
1646 skb_queue_purge(&hdev->cmd_q);
1647 skb_queue_purge(&hdev->raw_q);
1648
1649 /* Drop last sent command */
1650 if (hdev->sent_cmd) {
65cc2b49 1651 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1652 kfree_skb(hdev->sent_cmd);
1653 hdev->sent_cmd = NULL;
1654 }
1655
4a3f95b7
MH
1656 hci_notify(hdev, HCI_DEV_CLOSE);
1657
1da177e4
LT
1658 /* After this point our queues are empty
1659 * and no tasks are scheduled. */
1660 hdev->close(hdev);
1661
35b973c9 1662 /* Clear flags */
fee746b0 1663 hdev->flags &= BIT(HCI_RAW);
eacb44df 1664 hci_dev_clear_volatile_flags(hdev);
35b973c9 1665
ced5c338 1666 /* Controller radio is available but is currently powered down */
536619e8 1667 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1668
e59fda8d 1669 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1670 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1671 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1672
1da177e4
LT
1673 hci_req_unlock(hdev);
1674
1675 hci_dev_put(hdev);
1676 return 0;
1677}
1678
1679int hci_dev_close(__u16 dev)
1680{
1681 struct hci_dev *hdev;
1682 int err;
1683
70f23020
AE
1684 hdev = hci_dev_get(dev);
1685 if (!hdev)
1da177e4 1686 return -ENODEV;
8ee56540 1687
d7a5a11d 1688 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1689 err = -EBUSY;
1690 goto done;
1691 }
1692
a69d8927 1693 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1694 cancel_delayed_work(&hdev->power_off);
1695
1da177e4 1696 err = hci_dev_do_close(hdev);
8ee56540 1697
0736cfa8 1698done:
1da177e4
LT
1699 hci_dev_put(hdev);
1700 return err;
1701}
1702
5c912495 1703static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1704{
5c912495 1705 int ret;
1da177e4 1706
5c912495 1707 BT_DBG("%s %p", hdev->name, hdev);
1da177e4
LT
1708
1709 hci_req_lock(hdev);
1da177e4 1710
1da177e4
LT
1711 /* Drop queues */
1712 skb_queue_purge(&hdev->rx_q);
1713 skb_queue_purge(&hdev->cmd_q);
1714
76727c02
JH
1715 /* Avoid potential lockdep warnings from the *_flush() calls by
1716 * ensuring the workqueue is empty up front.
1717 */
1718 drain_workqueue(hdev->workqueue);
1719
09fd0de5 1720 hci_dev_lock(hdev);
1f9b9a5d 1721 hci_inquiry_cache_flush(hdev);
1da177e4 1722 hci_conn_hash_flush(hdev);
09fd0de5 1723 hci_dev_unlock(hdev);
1da177e4
LT
1724
1725 if (hdev->flush)
1726 hdev->flush(hdev);
1727
8e87d142 1728 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1729 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1730
fee746b0 1731 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4 1732
1da177e4 1733 hci_req_unlock(hdev);
1da177e4
LT
1734 return ret;
1735}
1736
5c912495
MH
1737int hci_dev_reset(__u16 dev)
1738{
1739 struct hci_dev *hdev;
1740 int err;
1741
1742 hdev = hci_dev_get(dev);
1743 if (!hdev)
1744 return -ENODEV;
1745
1746 if (!test_bit(HCI_UP, &hdev->flags)) {
1747 err = -ENETDOWN;
1748 goto done;
1749 }
1750
d7a5a11d 1751 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1752 err = -EBUSY;
1753 goto done;
1754 }
1755
d7a5a11d 1756 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1757 err = -EOPNOTSUPP;
1758 goto done;
1759 }
1760
1761 err = hci_dev_do_reset(hdev);
1762
1763done:
1764 hci_dev_put(hdev);
1765 return err;
1766}
1767
1da177e4
LT
1768int hci_dev_reset_stat(__u16 dev)
1769{
1770 struct hci_dev *hdev;
1771 int ret = 0;
1772
70f23020
AE
1773 hdev = hci_dev_get(dev);
1774 if (!hdev)
1da177e4
LT
1775 return -ENODEV;
1776
d7a5a11d 1777 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1778 ret = -EBUSY;
1779 goto done;
1780 }
1781
d7a5a11d 1782 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1783 ret = -EOPNOTSUPP;
1784 goto done;
1785 }
1786
1da177e4
LT
1787 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1788
0736cfa8 1789done:
1da177e4 1790 hci_dev_put(hdev);
1da177e4
LT
1791 return ret;
1792}
1793
123abc08
JH
1794static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1795{
bc6d2d04 1796 bool conn_changed, discov_changed;
123abc08
JH
1797
1798 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1799
1800 if ((scan & SCAN_PAGE))
238be788
MH
1801 conn_changed = !hci_dev_test_and_set_flag(hdev,
1802 HCI_CONNECTABLE);
123abc08 1803 else
a69d8927
MH
1804 conn_changed = hci_dev_test_and_clear_flag(hdev,
1805 HCI_CONNECTABLE);
123abc08 1806
bc6d2d04 1807 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1808 discov_changed = !hci_dev_test_and_set_flag(hdev,
1809 HCI_DISCOVERABLE);
bc6d2d04 1810 } else {
a358dc11 1811 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1812 discov_changed = hci_dev_test_and_clear_flag(hdev,
1813 HCI_DISCOVERABLE);
bc6d2d04
JH
1814 }
1815
d7a5a11d 1816 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1817 return;
1818
bc6d2d04
JH
1819 if (conn_changed || discov_changed) {
1820 /* In case this was disabled through mgmt */
a1536da2 1821 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1822
d7a5a11d 1823 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
bc6d2d04
JH
1824 mgmt_update_adv_data(hdev);
1825
123abc08 1826 mgmt_new_settings(hdev);
bc6d2d04 1827 }
123abc08
JH
1828}
1829
1da177e4
LT
1830int hci_dev_cmd(unsigned int cmd, void __user *arg)
1831{
1832 struct hci_dev *hdev;
1833 struct hci_dev_req dr;
1834 int err = 0;
1835
1836 if (copy_from_user(&dr, arg, sizeof(dr)))
1837 return -EFAULT;
1838
70f23020
AE
1839 hdev = hci_dev_get(dr.dev_id);
1840 if (!hdev)
1da177e4
LT
1841 return -ENODEV;
1842
d7a5a11d 1843 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1844 err = -EBUSY;
1845 goto done;
1846 }
1847
d7a5a11d 1848 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1849 err = -EOPNOTSUPP;
1850 goto done;
1851 }
1852
5b69bef5
MH
1853 if (hdev->dev_type != HCI_BREDR) {
1854 err = -EOPNOTSUPP;
1855 goto done;
1856 }
1857
d7a5a11d 1858 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1859 err = -EOPNOTSUPP;
1860 goto done;
1861 }
1862
1da177e4
LT
1863 switch (cmd) {
1864 case HCISETAUTH:
01178cd4
JH
1865 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1866 HCI_INIT_TIMEOUT);
1da177e4
LT
1867 break;
1868
1869 case HCISETENCRYPT:
1870 if (!lmp_encrypt_capable(hdev)) {
1871 err = -EOPNOTSUPP;
1872 break;
1873 }
1874
1875 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1876 /* Auth must be enabled first */
01178cd4
JH
1877 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1878 HCI_INIT_TIMEOUT);
1da177e4
LT
1879 if (err)
1880 break;
1881 }
1882
01178cd4
JH
1883 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1884 HCI_INIT_TIMEOUT);
1da177e4
LT
1885 break;
1886
1887 case HCISETSCAN:
01178cd4
JH
1888 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1889 HCI_INIT_TIMEOUT);
91a668b0 1890
bc6d2d04
JH
1891 /* Ensure that the connectable and discoverable states
1892 * get correctly modified as this was a non-mgmt change.
91a668b0 1893 */
123abc08
JH
1894 if (!err)
1895 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1896 break;
1897
1da177e4 1898 case HCISETLINKPOL:
01178cd4
JH
1899 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1900 HCI_INIT_TIMEOUT);
1da177e4
LT
1901 break;
1902
1903 case HCISETLINKMODE:
e4e8e37c
MH
1904 hdev->link_mode = ((__u16) dr.dev_opt) &
1905 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1906 break;
1907
1908 case HCISETPTYPE:
1909 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1910 break;
1911
1912 case HCISETACLMTU:
e4e8e37c
MH
1913 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1914 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1915 break;
1916
1917 case HCISETSCOMTU:
e4e8e37c
MH
1918 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1919 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1920 break;
1921
1922 default:
1923 err = -EINVAL;
1924 break;
1925 }
e4e8e37c 1926
0736cfa8 1927done:
1da177e4
LT
1928 hci_dev_put(hdev);
1929 return err;
1930}
1931
1932int hci_get_dev_list(void __user *arg)
1933{
8035ded4 1934 struct hci_dev *hdev;
1da177e4
LT
1935 struct hci_dev_list_req *dl;
1936 struct hci_dev_req *dr;
1da177e4
LT
1937 int n = 0, size, err;
1938 __u16 dev_num;
1939
1940 if (get_user(dev_num, (__u16 __user *) arg))
1941 return -EFAULT;
1942
1943 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1944 return -EINVAL;
1945
1946 size = sizeof(*dl) + dev_num * sizeof(*dr);
1947
70f23020
AE
1948 dl = kzalloc(size, GFP_KERNEL);
1949 if (!dl)
1da177e4
LT
1950 return -ENOMEM;
1951
1952 dr = dl->dev_req;
1953
f20d09d5 1954 read_lock(&hci_dev_list_lock);
8035ded4 1955 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 1956 unsigned long flags = hdev->flags;
c542a06c 1957
2e84d8db
MH
1958 /* When the auto-off is configured it means the transport
1959 * is running, but in that case still indicate that the
1960 * device is actually down.
1961 */
d7a5a11d 1962 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 1963 flags &= ~BIT(HCI_UP);
c542a06c 1964
1da177e4 1965 (dr + n)->dev_id = hdev->id;
2e84d8db 1966 (dr + n)->dev_opt = flags;
c542a06c 1967
1da177e4
LT
1968 if (++n >= dev_num)
1969 break;
1970 }
f20d09d5 1971 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1972
1973 dl->dev_num = n;
1974 size = sizeof(*dl) + n * sizeof(*dr);
1975
1976 err = copy_to_user(arg, dl, size);
1977 kfree(dl);
1978
1979 return err ? -EFAULT : 0;
1980}
1981
1982int hci_get_dev_info(void __user *arg)
1983{
1984 struct hci_dev *hdev;
1985 struct hci_dev_info di;
2e84d8db 1986 unsigned long flags;
1da177e4
LT
1987 int err = 0;
1988
1989 if (copy_from_user(&di, arg, sizeof(di)))
1990 return -EFAULT;
1991
70f23020
AE
1992 hdev = hci_dev_get(di.dev_id);
1993 if (!hdev)
1da177e4
LT
1994 return -ENODEV;
1995
2e84d8db
MH
1996 /* When the auto-off is configured it means the transport
1997 * is running, but in that case still indicate that the
1998 * device is actually down.
1999 */
d7a5a11d 2000 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
2001 flags = hdev->flags & ~BIT(HCI_UP);
2002 else
2003 flags = hdev->flags;
c542a06c 2004
1da177e4
LT
2005 strcpy(di.name, hdev->name);
2006 di.bdaddr = hdev->bdaddr;
60f2a3ed 2007 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2008 di.flags = flags;
1da177e4 2009 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2010 if (lmp_bredr_capable(hdev)) {
2011 di.acl_mtu = hdev->acl_mtu;
2012 di.acl_pkts = hdev->acl_pkts;
2013 di.sco_mtu = hdev->sco_mtu;
2014 di.sco_pkts = hdev->sco_pkts;
2015 } else {
2016 di.acl_mtu = hdev->le_mtu;
2017 di.acl_pkts = hdev->le_pkts;
2018 di.sco_mtu = 0;
2019 di.sco_pkts = 0;
2020 }
1da177e4
LT
2021 di.link_policy = hdev->link_policy;
2022 di.link_mode = hdev->link_mode;
2023
2024 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2025 memcpy(&di.features, &hdev->features, sizeof(di.features));
2026
2027 if (copy_to_user(arg, &di, sizeof(di)))
2028 err = -EFAULT;
2029
2030 hci_dev_put(hdev);
2031
2032 return err;
2033}
2034
2035/* ---- Interface to HCI drivers ---- */
2036
611b30f7
MH
2037static int hci_rfkill_set_block(void *data, bool blocked)
2038{
2039 struct hci_dev *hdev = data;
2040
2041 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2042
d7a5a11d 2043 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2044 return -EBUSY;
2045
5e130367 2046 if (blocked) {
a1536da2 2047 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2048 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2049 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2050 hci_dev_do_close(hdev);
5e130367 2051 } else {
a358dc11 2052 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2053 }
611b30f7
MH
2054
2055 return 0;
2056}
2057
2058static const struct rfkill_ops hci_rfkill_ops = {
2059 .set_block = hci_rfkill_set_block,
2060};
2061
ab81cbf9
JH
2062static void hci_power_on(struct work_struct *work)
2063{
2064 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2065 int err;
ab81cbf9
JH
2066
2067 BT_DBG("%s", hdev->name);
2068
cbed0ca1 2069 err = hci_dev_do_open(hdev);
96570ffc 2070 if (err < 0) {
3ad67582 2071 hci_dev_lock(hdev);
96570ffc 2072 mgmt_set_powered_failed(hdev, err);
3ad67582 2073 hci_dev_unlock(hdev);
ab81cbf9 2074 return;
96570ffc 2075 }
ab81cbf9 2076
a5c8f270
MH
2077 /* During the HCI setup phase, a few error conditions are
2078 * ignored and they need to be checked now. If they are still
2079 * valid, it is important to turn the device back off.
2080 */
d7a5a11d
MH
2081 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2082 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
a5c8f270
MH
2083 (hdev->dev_type == HCI_BREDR &&
2084 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2085 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2086 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2087 hci_dev_do_close(hdev);
d7a5a11d 2088 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2089 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2090 HCI_AUTO_OFF_TIMEOUT);
bf543036 2091 }
ab81cbf9 2092
a69d8927 2093 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2094 /* For unconfigured devices, set the HCI_RAW flag
2095 * so that userspace can easily identify them.
4a964404 2096 */
d7a5a11d 2097 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2098 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2099
2100 /* For fully configured devices, this will send
2101 * the Index Added event. For unconfigured devices,
2102 * it will send Unconfigued Index Added event.
2103 *
2104 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2105 * and no event will be send.
2106 */
2107 mgmt_index_added(hdev);
a69d8927 2108 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2109 /* When the controller is now configured, then it
2110 * is important to clear the HCI_RAW flag.
2111 */
d7a5a11d 2112 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2113 clear_bit(HCI_RAW, &hdev->flags);
2114
d603b76b
MH
2115 /* Powering on the controller with HCI_CONFIG set only
2116 * happens with the transition from unconfigured to
2117 * configured. This will send the Index Added event.
2118 */
744cf19e 2119 mgmt_index_added(hdev);
fee746b0 2120 }
ab81cbf9
JH
2121}
2122
2123static void hci_power_off(struct work_struct *work)
2124{
3243553f 2125 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2126 power_off.work);
ab81cbf9
JH
2127
2128 BT_DBG("%s", hdev->name);
2129
8ee56540 2130 hci_dev_do_close(hdev);
ab81cbf9
JH
2131}
2132
c7741d16
MH
2133static void hci_error_reset(struct work_struct *work)
2134{
2135 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2136
2137 BT_DBG("%s", hdev->name);
2138
2139 if (hdev->hw_error)
2140 hdev->hw_error(hdev, hdev->hw_error_code);
2141 else
2142 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2143 hdev->hw_error_code);
2144
2145 if (hci_dev_do_close(hdev))
2146 return;
2147
c7741d16
MH
2148 hci_dev_do_open(hdev);
2149}
2150
16ab91ab
JH
2151static void hci_discov_off(struct work_struct *work)
2152{
2153 struct hci_dev *hdev;
16ab91ab
JH
2154
2155 hdev = container_of(work, struct hci_dev, discov_off.work);
2156
2157 BT_DBG("%s", hdev->name);
2158
d1967ff8 2159 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2160}
2161
5d900e46
FG
2162static void hci_adv_timeout_expire(struct work_struct *work)
2163{
2164 struct hci_dev *hdev;
2165
2166 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2167
2168 BT_DBG("%s", hdev->name);
2169
2170 mgmt_adv_timeout_expired(hdev);
2171}
2172
35f7498a 2173void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2174{
4821002c 2175 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2176
4821002c
JH
2177 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2178 list_del(&uuid->list);
2aeb9a1a
JH
2179 kfree(uuid);
2180 }
2aeb9a1a
JH
2181}
2182
35f7498a 2183void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2184{
0378b597 2185 struct link_key *key;
55ed8ca1 2186
0378b597
JH
2187 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2188 list_del_rcu(&key->list);
2189 kfree_rcu(key, rcu);
55ed8ca1 2190 }
55ed8ca1
JH
2191}
2192
35f7498a 2193void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2194{
970d0f1b 2195 struct smp_ltk *k;
b899efaf 2196
970d0f1b
JH
2197 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2198 list_del_rcu(&k->list);
2199 kfree_rcu(k, rcu);
b899efaf 2200 }
b899efaf
VCG
2201}
2202
970c4e46
JH
2203void hci_smp_irks_clear(struct hci_dev *hdev)
2204{
adae20cb 2205 struct smp_irk *k;
970c4e46 2206
adae20cb
JH
2207 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2208 list_del_rcu(&k->list);
2209 kfree_rcu(k, rcu);
970c4e46
JH
2210 }
2211}
2212
55ed8ca1
JH
2213struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2214{
8035ded4 2215 struct link_key *k;
55ed8ca1 2216
0378b597
JH
2217 rcu_read_lock();
2218 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2219 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2220 rcu_read_unlock();
55ed8ca1 2221 return k;
0378b597
JH
2222 }
2223 }
2224 rcu_read_unlock();
55ed8ca1
JH
2225
2226 return NULL;
2227}
2228
745c0ce3 2229static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2230 u8 key_type, u8 old_key_type)
d25e28ab
JH
2231{
2232 /* Legacy key */
2233 if (key_type < 0x03)
745c0ce3 2234 return true;
d25e28ab
JH
2235
2236 /* Debug keys are insecure so don't store them persistently */
2237 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2238 return false;
d25e28ab
JH
2239
2240 /* Changed combination key and there's no previous one */
2241 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2242 return false;
d25e28ab
JH
2243
2244 /* Security mode 3 case */
2245 if (!conn)
745c0ce3 2246 return true;
d25e28ab 2247
e3befab9
JH
2248 /* BR/EDR key derived using SC from an LE link */
2249 if (conn->type == LE_LINK)
2250 return true;
2251
d25e28ab
JH
2252 /* Neither local nor remote side had no-bonding as requirement */
2253 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2254 return true;
d25e28ab
JH
2255
2256 /* Local side had dedicated bonding as requirement */
2257 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2258 return true;
d25e28ab
JH
2259
2260 /* Remote side had dedicated bonding as requirement */
2261 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2262 return true;
d25e28ab
JH
2263
2264 /* If none of the above criteria match, then don't store the key
2265 * persistently */
745c0ce3 2266 return false;
d25e28ab
JH
2267}
2268
e804d25d 2269static u8 ltk_role(u8 type)
98a0b845 2270{
e804d25d
JH
2271 if (type == SMP_LTK)
2272 return HCI_ROLE_MASTER;
98a0b845 2273
e804d25d 2274 return HCI_ROLE_SLAVE;
98a0b845
JH
2275}
2276
f3a73d97
JH
2277struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2278 u8 addr_type, u8 role)
75d262c2 2279{
c9839a11 2280 struct smp_ltk *k;
75d262c2 2281
970d0f1b
JH
2282 rcu_read_lock();
2283 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2284 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2285 continue;
2286
923e2414 2287 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2288 rcu_read_unlock();
75d262c2 2289 return k;
970d0f1b
JH
2290 }
2291 }
2292 rcu_read_unlock();
75d262c2
VCG
2293
2294 return NULL;
2295}
75d262c2 2296
970c4e46
JH
2297struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2298{
2299 struct smp_irk *irk;
2300
adae20cb
JH
2301 rcu_read_lock();
2302 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2303 if (!bacmp(&irk->rpa, rpa)) {
2304 rcu_read_unlock();
970c4e46 2305 return irk;
adae20cb 2306 }
970c4e46
JH
2307 }
2308
adae20cb 2309 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2310 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2311 bacpy(&irk->rpa, rpa);
adae20cb 2312 rcu_read_unlock();
970c4e46
JH
2313 return irk;
2314 }
2315 }
adae20cb 2316 rcu_read_unlock();
970c4e46
JH
2317
2318 return NULL;
2319}
2320
2321struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2322 u8 addr_type)
2323{
2324 struct smp_irk *irk;
2325
6cfc9988
JH
2326 /* Identity Address must be public or static random */
2327 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2328 return NULL;
2329
adae20cb
JH
2330 rcu_read_lock();
2331 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2332 if (addr_type == irk->addr_type &&
adae20cb
JH
2333 bacmp(bdaddr, &irk->bdaddr) == 0) {
2334 rcu_read_unlock();
970c4e46 2335 return irk;
adae20cb 2336 }
970c4e46 2337 }
adae20cb 2338 rcu_read_unlock();
970c4e46
JH
2339
2340 return NULL;
2341}
2342
567fa2aa 2343struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2344 bdaddr_t *bdaddr, u8 *val, u8 type,
2345 u8 pin_len, bool *persistent)
55ed8ca1
JH
2346{
2347 struct link_key *key, *old_key;
745c0ce3 2348 u8 old_key_type;
55ed8ca1
JH
2349
2350 old_key = hci_find_link_key(hdev, bdaddr);
2351 if (old_key) {
2352 old_key_type = old_key->type;
2353 key = old_key;
2354 } else {
12adcf3a 2355 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2356 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2357 if (!key)
567fa2aa 2358 return NULL;
0378b597 2359 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2360 }
2361
6ed93dc6 2362 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2363
d25e28ab
JH
2364 /* Some buggy controller combinations generate a changed
2365 * combination key for legacy pairing even when there's no
2366 * previous key */
2367 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2368 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2369 type = HCI_LK_COMBINATION;
655fe6ec
JH
2370 if (conn)
2371 conn->key_type = type;
2372 }
d25e28ab 2373
55ed8ca1 2374 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2375 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2376 key->pin_len = pin_len;
2377
b6020ba0 2378 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2379 key->type = old_key_type;
4748fed2
JH
2380 else
2381 key->type = type;
2382
7652ff6a
JH
2383 if (persistent)
2384 *persistent = hci_persistent_key(hdev, conn, type,
2385 old_key_type);
4df378a1 2386
567fa2aa 2387 return key;
55ed8ca1
JH
2388}
2389
ca9142b8 2390struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2391 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2392 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2393{
c9839a11 2394 struct smp_ltk *key, *old_key;
e804d25d 2395 u8 role = ltk_role(type);
75d262c2 2396
f3a73d97 2397 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2398 if (old_key)
75d262c2 2399 key = old_key;
c9839a11 2400 else {
0a14ab41 2401 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2402 if (!key)
ca9142b8 2403 return NULL;
970d0f1b 2404 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2405 }
2406
75d262c2 2407 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2408 key->bdaddr_type = addr_type;
2409 memcpy(key->val, tk, sizeof(key->val));
2410 key->authenticated = authenticated;
2411 key->ediv = ediv;
fe39c7b2 2412 key->rand = rand;
c9839a11
VCG
2413 key->enc_size = enc_size;
2414 key->type = type;
75d262c2 2415
ca9142b8 2416 return key;
75d262c2
VCG
2417}
2418
ca9142b8
JH
2419struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2420 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2421{
2422 struct smp_irk *irk;
2423
2424 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2425 if (!irk) {
2426 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2427 if (!irk)
ca9142b8 2428 return NULL;
970c4e46
JH
2429
2430 bacpy(&irk->bdaddr, bdaddr);
2431 irk->addr_type = addr_type;
2432
adae20cb 2433 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2434 }
2435
2436 memcpy(irk->val, val, 16);
2437 bacpy(&irk->rpa, rpa);
2438
ca9142b8 2439 return irk;
970c4e46
JH
2440}
2441
55ed8ca1
JH
2442int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2443{
2444 struct link_key *key;
2445
2446 key = hci_find_link_key(hdev, bdaddr);
2447 if (!key)
2448 return -ENOENT;
2449
6ed93dc6 2450 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2451
0378b597
JH
2452 list_del_rcu(&key->list);
2453 kfree_rcu(key, rcu);
55ed8ca1
JH
2454
2455 return 0;
2456}
2457
e0b2b27e 2458int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2459{
970d0f1b 2460 struct smp_ltk *k;
c51ffa0b 2461 int removed = 0;
b899efaf 2462
970d0f1b 2463 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2464 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2465 continue;
2466
6ed93dc6 2467 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2468
970d0f1b
JH
2469 list_del_rcu(&k->list);
2470 kfree_rcu(k, rcu);
c51ffa0b 2471 removed++;
b899efaf
VCG
2472 }
2473
c51ffa0b 2474 return removed ? 0 : -ENOENT;
b899efaf
VCG
2475}
2476
a7ec7338
JH
2477void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2478{
adae20cb 2479 struct smp_irk *k;
a7ec7338 2480
adae20cb 2481 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2482 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2483 continue;
2484
2485 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2486
adae20cb
JH
2487 list_del_rcu(&k->list);
2488 kfree_rcu(k, rcu);
a7ec7338
JH
2489 }
2490}
2491
55e76b38
JH
2492bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2493{
2494 struct smp_ltk *k;
4ba9faf3 2495 struct smp_irk *irk;
55e76b38
JH
2496 u8 addr_type;
2497
2498 if (type == BDADDR_BREDR) {
2499 if (hci_find_link_key(hdev, bdaddr))
2500 return true;
2501 return false;
2502 }
2503
2504 /* Convert to HCI addr type which struct smp_ltk uses */
2505 if (type == BDADDR_LE_PUBLIC)
2506 addr_type = ADDR_LE_DEV_PUBLIC;
2507 else
2508 addr_type = ADDR_LE_DEV_RANDOM;
2509
4ba9faf3
JH
2510 irk = hci_get_irk(hdev, bdaddr, addr_type);
2511 if (irk) {
2512 bdaddr = &irk->bdaddr;
2513 addr_type = irk->addr_type;
2514 }
2515
55e76b38
JH
2516 rcu_read_lock();
2517 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2518 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2519 rcu_read_unlock();
55e76b38 2520 return true;
87c8b28d 2521 }
55e76b38
JH
2522 }
2523 rcu_read_unlock();
2524
2525 return false;
2526}
2527
6bd32326 2528/* HCI command timer function */
65cc2b49 2529static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2530{
65cc2b49
MH
2531 struct hci_dev *hdev = container_of(work, struct hci_dev,
2532 cmd_timer.work);
6bd32326 2533
bda4f23a
AE
2534 if (hdev->sent_cmd) {
2535 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2536 u16 opcode = __le16_to_cpu(sent->opcode);
2537
2538 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2539 } else {
2540 BT_ERR("%s command tx timeout", hdev->name);
2541 }
2542
6bd32326 2543 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2544 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2545}
2546
2763eda6 2547struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2548 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2549{
2550 struct oob_data *data;
2551
6928a924
JH
2552 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2553 if (bacmp(bdaddr, &data->bdaddr) != 0)
2554 continue;
2555 if (data->bdaddr_type != bdaddr_type)
2556 continue;
2557 return data;
2558 }
2763eda6
SJ
2559
2560 return NULL;
2561}
2562
6928a924
JH
2563int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2564 u8 bdaddr_type)
2763eda6
SJ
2565{
2566 struct oob_data *data;
2567
6928a924 2568 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2569 if (!data)
2570 return -ENOENT;
2571
6928a924 2572 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2573
2574 list_del(&data->list);
2575 kfree(data);
2576
2577 return 0;
2578}
2579
35f7498a 2580void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2581{
2582 struct oob_data *data, *n;
2583
2584 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2585 list_del(&data->list);
2586 kfree(data);
2587 }
2763eda6
SJ
2588}
2589
0798872e 2590int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2591 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2592 u8 *hash256, u8 *rand256)
2763eda6
SJ
2593{
2594 struct oob_data *data;
2595
6928a924 2596 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2597 if (!data) {
0a14ab41 2598 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2599 if (!data)
2600 return -ENOMEM;
2601
2602 bacpy(&data->bdaddr, bdaddr);
6928a924 2603 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2604 list_add(&data->list, &hdev->remote_oob_data);
2605 }
2606
81328d5c
JH
2607 if (hash192 && rand192) {
2608 memcpy(data->hash192, hash192, sizeof(data->hash192));
2609 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2610 if (hash256 && rand256)
2611 data->present = 0x03;
81328d5c
JH
2612 } else {
2613 memset(data->hash192, 0, sizeof(data->hash192));
2614 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2615 if (hash256 && rand256)
2616 data->present = 0x02;
2617 else
2618 data->present = 0x00;
0798872e
MH
2619 }
2620
81328d5c
JH
2621 if (hash256 && rand256) {
2622 memcpy(data->hash256, hash256, sizeof(data->hash256));
2623 memcpy(data->rand256, rand256, sizeof(data->rand256));
2624 } else {
2625 memset(data->hash256, 0, sizeof(data->hash256));
2626 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2627 if (hash192 && rand192)
2628 data->present = 0x01;
81328d5c 2629 }
0798872e 2630
6ed93dc6 2631 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2632
2633 return 0;
2634}
2635
d2609b34
FG
2636/* This function requires the caller holds hdev->lock */
2637struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2638{
2639 struct adv_info *adv_instance;
2640
2641 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2642 if (adv_instance->instance == instance)
2643 return adv_instance;
2644 }
2645
2646 return NULL;
2647}
2648
2649/* This function requires the caller holds hdev->lock */
2650struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2651 struct adv_info *cur_instance;
2652
2653 cur_instance = hci_find_adv_instance(hdev, instance);
2654 if (!cur_instance)
2655 return NULL;
2656
2657 if (cur_instance == list_last_entry(&hdev->adv_instances,
2658 struct adv_info, list))
2659 return list_first_entry(&hdev->adv_instances,
2660 struct adv_info, list);
2661 else
2662 return list_next_entry(cur_instance, list);
2663}
2664
2665/* This function requires the caller holds hdev->lock */
2666int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2667{
2668 struct adv_info *adv_instance;
2669
2670 adv_instance = hci_find_adv_instance(hdev, instance);
2671 if (!adv_instance)
2672 return -ENOENT;
2673
2674 BT_DBG("%s removing %dMR", hdev->name, instance);
2675
5d900e46
FG
2676 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2677 cancel_delayed_work(&hdev->adv_instance_expire);
2678 hdev->adv_instance_timeout = 0;
2679 }
2680
d2609b34
FG
2681 list_del(&adv_instance->list);
2682 kfree(adv_instance);
2683
2684 hdev->adv_instance_cnt--;
2685
2686 return 0;
2687}
2688
2689/* This function requires the caller holds hdev->lock */
2690void hci_adv_instances_clear(struct hci_dev *hdev)
2691{
2692 struct adv_info *adv_instance, *n;
2693
5d900e46
FG
2694 if (hdev->adv_instance_timeout) {
2695 cancel_delayed_work(&hdev->adv_instance_expire);
2696 hdev->adv_instance_timeout = 0;
2697 }
2698
d2609b34
FG
2699 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2700 list_del(&adv_instance->list);
2701 kfree(adv_instance);
2702 }
2703
2704 hdev->adv_instance_cnt = 0;
2705}
2706
2707/* This function requires the caller holds hdev->lock */
2708int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2709 u16 adv_data_len, u8 *adv_data,
2710 u16 scan_rsp_len, u8 *scan_rsp_data,
2711 u16 timeout, u16 duration)
2712{
2713 struct adv_info *adv_instance;
2714
2715 adv_instance = hci_find_adv_instance(hdev, instance);
2716 if (adv_instance) {
2717 memset(adv_instance->adv_data, 0,
2718 sizeof(adv_instance->adv_data));
2719 memset(adv_instance->scan_rsp_data, 0,
2720 sizeof(adv_instance->scan_rsp_data));
2721 } else {
2722 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2723 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2724 return -EOVERFLOW;
2725
39ecfad6 2726 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2727 if (!adv_instance)
2728 return -ENOMEM;
2729
fffd38bc 2730 adv_instance->pending = true;
d2609b34
FG
2731 adv_instance->instance = instance;
2732 list_add(&adv_instance->list, &hdev->adv_instances);
2733 hdev->adv_instance_cnt++;
2734 }
2735
2736 adv_instance->flags = flags;
2737 adv_instance->adv_data_len = adv_data_len;
2738 adv_instance->scan_rsp_len = scan_rsp_len;
2739
2740 if (adv_data_len)
2741 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2742
2743 if (scan_rsp_len)
2744 memcpy(adv_instance->scan_rsp_data,
2745 scan_rsp_data, scan_rsp_len);
2746
2747 adv_instance->timeout = timeout;
5d900e46 2748 adv_instance->remaining_time = timeout;
d2609b34
FG
2749
2750 if (duration == 0)
2751 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2752 else
2753 adv_instance->duration = duration;
2754
2755 BT_DBG("%s for %dMR", hdev->name, instance);
2756
2757 return 0;
2758}
2759
dcc36c16 2760struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2761 bdaddr_t *bdaddr, u8 type)
b2a66aad 2762{
8035ded4 2763 struct bdaddr_list *b;
b2a66aad 2764
dcc36c16 2765 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2766 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2767 return b;
b9ee0a78 2768 }
b2a66aad
AJ
2769
2770 return NULL;
2771}
2772
dcc36c16 2773void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
2774{
2775 struct list_head *p, *n;
2776
dcc36c16 2777 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 2778 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2779
2780 list_del(p);
2781 kfree(b);
2782 }
b2a66aad
AJ
2783}
2784
dcc36c16 2785int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2786{
2787 struct bdaddr_list *entry;
b2a66aad 2788
b9ee0a78 2789 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2790 return -EBADF;
2791
dcc36c16 2792 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2793 return -EEXIST;
b2a66aad 2794
27f70f3e 2795 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2796 if (!entry)
2797 return -ENOMEM;
b2a66aad
AJ
2798
2799 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2800 entry->bdaddr_type = type;
b2a66aad 2801
dcc36c16 2802 list_add(&entry->list, list);
b2a66aad 2803
2a8357f2 2804 return 0;
b2a66aad
AJ
2805}
2806
dcc36c16 2807int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2808{
2809 struct bdaddr_list *entry;
b2a66aad 2810
35f7498a 2811 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2812 hci_bdaddr_list_clear(list);
35f7498a
JH
2813 return 0;
2814 }
b2a66aad 2815
dcc36c16 2816 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2817 if (!entry)
2818 return -ENOENT;
2819
2820 list_del(&entry->list);
2821 kfree(entry);
2822
2823 return 0;
2824}
2825
15819a70
AG
2826/* This function requires the caller holds hdev->lock */
2827struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2828 bdaddr_t *addr, u8 addr_type)
2829{
2830 struct hci_conn_params *params;
2831
2832 list_for_each_entry(params, &hdev->le_conn_params, list) {
2833 if (bacmp(&params->addr, addr) == 0 &&
2834 params->addr_type == addr_type) {
2835 return params;
2836 }
2837 }
2838
2839 return NULL;
2840}
2841
4b10966f 2842/* This function requires the caller holds hdev->lock */
501f8827
JH
2843struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2844 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2845{
912b42ef 2846 struct hci_conn_params *param;
a9b0a04c 2847
501f8827 2848 list_for_each_entry(param, list, action) {
912b42ef
JH
2849 if (bacmp(&param->addr, addr) == 0 &&
2850 param->addr_type == addr_type)
2851 return param;
4b10966f
MH
2852 }
2853
2854 return NULL;
a9b0a04c
AG
2855}
2856
f75113a2
JP
2857/* This function requires the caller holds hdev->lock */
2858struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2859 bdaddr_t *addr,
2860 u8 addr_type)
2861{
2862 struct hci_conn_params *param;
2863
2864 list_for_each_entry(param, &hdev->pend_le_conns, action) {
2865 if (bacmp(&param->addr, addr) == 0 &&
2866 param->addr_type == addr_type &&
2867 param->explicit_connect)
2868 return param;
2869 }
2870
2871 list_for_each_entry(param, &hdev->pend_le_reports, action) {
2872 if (bacmp(&param->addr, addr) == 0 &&
2873 param->addr_type == addr_type &&
2874 param->explicit_connect)
2875 return param;
2876 }
2877
2878 return NULL;
2879}
2880
15819a70 2881/* This function requires the caller holds hdev->lock */
51d167c0
MH
2882struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2883 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2884{
2885 struct hci_conn_params *params;
2886
2887 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2888 if (params)
51d167c0 2889 return params;
15819a70
AG
2890
2891 params = kzalloc(sizeof(*params), GFP_KERNEL);
2892 if (!params) {
2893 BT_ERR("Out of memory");
51d167c0 2894 return NULL;
15819a70
AG
2895 }
2896
2897 bacpy(&params->addr, addr);
2898 params->addr_type = addr_type;
cef952ce
AG
2899
2900 list_add(&params->list, &hdev->le_conn_params);
93450c75 2901 INIT_LIST_HEAD(&params->action);
cef952ce 2902
bf5b3c8b
MH
2903 params->conn_min_interval = hdev->le_conn_min_interval;
2904 params->conn_max_interval = hdev->le_conn_max_interval;
2905 params->conn_latency = hdev->le_conn_latency;
2906 params->supervision_timeout = hdev->le_supv_timeout;
2907 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2908
2909 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2910
51d167c0 2911 return params;
bf5b3c8b
MH
2912}
2913
f6c63249 2914static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2915{
f8aaf9b6 2916 if (params->conn) {
f161dd41 2917 hci_conn_drop(params->conn);
f8aaf9b6
JH
2918 hci_conn_put(params->conn);
2919 }
f161dd41 2920
95305baa 2921 list_del(&params->action);
15819a70
AG
2922 list_del(&params->list);
2923 kfree(params);
f6c63249
JH
2924}
2925
2926/* This function requires the caller holds hdev->lock */
2927void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2928{
2929 struct hci_conn_params *params;
2930
2931 params = hci_conn_params_lookup(hdev, addr, addr_type);
2932 if (!params)
2933 return;
2934
2935 hci_conn_params_free(params);
15819a70 2936
95305baa
JH
2937 hci_update_background_scan(hdev);
2938
15819a70
AG
2939 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2940}
2941
2942/* This function requires the caller holds hdev->lock */
55af49a8 2943void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2944{
2945 struct hci_conn_params *params, *tmp;
2946
2947 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
2948 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2949 continue;
f75113a2
JP
2950
2951 /* If trying to estabilish one time connection to disabled
2952 * device, leave the params, but mark them as just once.
2953 */
2954 if (params->explicit_connect) {
2955 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2956 continue;
2957 }
2958
15819a70
AG
2959 list_del(&params->list);
2960 kfree(params);
2961 }
2962
55af49a8 2963 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
2964}
2965
2966/* This function requires the caller holds hdev->lock */
373110c5 2967void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 2968{
15819a70 2969 struct hci_conn_params *params, *tmp;
77a77a30 2970
f6c63249
JH
2971 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2972 hci_conn_params_free(params);
77a77a30 2973
a4790dbd 2974 hci_update_background_scan(hdev);
77a77a30 2975
15819a70 2976 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
2977}
2978
1904a853 2979static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7ba8b4be 2980{
4c87eaab
AG
2981 if (status) {
2982 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2983
4c87eaab
AG
2984 hci_dev_lock(hdev);
2985 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2986 hci_dev_unlock(hdev);
2987 return;
2988 }
7ba8b4be
AG
2989}
2990
1904a853
MH
2991static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2992 u16 opcode)
7ba8b4be 2993{
4c87eaab
AG
2994 /* General inquiry access code (GIAC) */
2995 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4c87eaab 2996 struct hci_cp_inquiry cp;
7ba8b4be
AG
2997 int err;
2998
4c87eaab
AG
2999 if (status) {
3000 BT_ERR("Failed to disable LE scanning: status %d", status);
3001 return;
3002 }
7ba8b4be 3003
2d28cfe7
JP
3004 hdev->discovery.scan_start = 0;
3005
4c87eaab
AG
3006 switch (hdev->discovery.type) {
3007 case DISCOV_TYPE_LE:
3008 hci_dev_lock(hdev);
3009 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3010 hci_dev_unlock(hdev);
3011 break;
7ba8b4be 3012
4c87eaab 3013 case DISCOV_TYPE_INTERLEAVED:
4c87eaab 3014 hci_dev_lock(hdev);
7dbfac1d 3015
07d2334a
JP
3016 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3017 &hdev->quirks)) {
3018 /* If we were running LE only scan, change discovery
3019 * state. If we were running both LE and BR/EDR inquiry
3020 * simultaneously, and BR/EDR inquiry is already
3021 * finished, stop discovery, otherwise BR/EDR inquiry
177d0506
WK
3022 * will stop discovery when finished. If we will resolve
3023 * remote device name, do not change discovery state.
07d2334a 3024 */
177d0506
WK
3025 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3026 hdev->discovery.state != DISCOVERY_RESOLVING)
07d2334a
JP
3027 hci_discovery_set_state(hdev,
3028 DISCOVERY_STOPPED);
3029 } else {
baf880a9
JH
3030 struct hci_request req;
3031
07d2334a
JP
3032 hci_inquiry_cache_flush(hdev);
3033
baf880a9
JH
3034 hci_req_init(&req, hdev);
3035
3036 memset(&cp, 0, sizeof(cp));
3037 memcpy(&cp.lap, lap, sizeof(cp.lap));
3038 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3039 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3040
07d2334a
JP
3041 err = hci_req_run(&req, inquiry_complete);
3042 if (err) {
3043 BT_ERR("Inquiry request failed: err %d", err);
3044 hci_discovery_set_state(hdev,
3045 DISCOVERY_STOPPED);
3046 }
4c87eaab 3047 }
7dbfac1d 3048
4c87eaab
AG
3049 hci_dev_unlock(hdev);
3050 break;
7dbfac1d 3051 }
7dbfac1d
AG
3052}
3053
7ba8b4be
AG
3054static void le_scan_disable_work(struct work_struct *work)
3055{
3056 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3057 le_scan_disable.work);
4c87eaab
AG
3058 struct hci_request req;
3059 int err;
7ba8b4be
AG
3060
3061 BT_DBG("%s", hdev->name);
3062
2d28cfe7
JP
3063 cancel_delayed_work_sync(&hdev->le_scan_restart);
3064
4c87eaab 3065 hci_req_init(&req, hdev);
28b75a89 3066
b1efcc28 3067 hci_req_add_le_scan_disable(&req);
28b75a89 3068
4c87eaab
AG
3069 err = hci_req_run(&req, le_scan_disable_work_complete);
3070 if (err)
3071 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3072}
3073
2d28cfe7
JP
3074static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3075 u16 opcode)
3076{
3077 unsigned long timeout, duration, scan_start, now;
3078
3079 BT_DBG("%s", hdev->name);
3080
3081 if (status) {
3082 BT_ERR("Failed to restart LE scan: status %d", status);
3083 return;
3084 }
3085
3086 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3087 !hdev->discovery.scan_start)
3088 return;
3089
3090 /* When the scan was started, hdev->le_scan_disable has been queued
3091 * after duration from scan_start. During scan restart this job
3092 * has been canceled, and we need to queue it again after proper
3093 * timeout, to make sure that scan does not run indefinitely.
3094 */
3095 duration = hdev->discovery.scan_duration;
3096 scan_start = hdev->discovery.scan_start;
3097 now = jiffies;
3098 if (now - scan_start <= duration) {
3099 int elapsed;
3100
3101 if (now >= scan_start)
3102 elapsed = now - scan_start;
3103 else
3104 elapsed = ULONG_MAX - scan_start + now;
3105
3106 timeout = duration - elapsed;
3107 } else {
3108 timeout = 0;
3109 }
3110 queue_delayed_work(hdev->workqueue,
3111 &hdev->le_scan_disable, timeout);
3112}
3113
3114static void le_scan_restart_work(struct work_struct *work)
3115{
3116 struct hci_dev *hdev = container_of(work, struct hci_dev,
3117 le_scan_restart.work);
3118 struct hci_request req;
3119 struct hci_cp_le_set_scan_enable cp;
3120 int err;
3121
3122 BT_DBG("%s", hdev->name);
3123
3124 /* If controller is not scanning we are done. */
d7a5a11d 3125 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2d28cfe7
JP
3126 return;
3127
3128 hci_req_init(&req, hdev);
3129
3130 hci_req_add_le_scan_disable(&req);
3131
3132 memset(&cp, 0, sizeof(cp));
3133 cp.enable = LE_SCAN_ENABLE;
3134 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3135 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3136
3137 err = hci_req_run(&req, le_scan_restart_work_complete);
3138 if (err)
3139 BT_ERR("Restart LE scan request failed: err %d", err);
3140}
3141
a1f4c318
JH
3142/* Copy the Identity Address of the controller.
3143 *
3144 * If the controller has a public BD_ADDR, then by default use that one.
3145 * If this is a LE only controller without a public address, default to
3146 * the static random address.
3147 *
3148 * For debugging purposes it is possible to force controllers with a
3149 * public address to use the static random address instead.
50b5b952
MH
3150 *
3151 * In case BR/EDR has been disabled on a dual-mode controller and
3152 * userspace has configured a static address, then that address
3153 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3154 */
3155void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3156 u8 *bdaddr_type)
3157{
b7cb93e5 3158 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3159 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3160 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3161 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3162 bacpy(bdaddr, &hdev->static_addr);
3163 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3164 } else {
3165 bacpy(bdaddr, &hdev->bdaddr);
3166 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3167 }
3168}
3169
9be0dab7
DH
3170/* Alloc HCI device */
3171struct hci_dev *hci_alloc_dev(void)
3172{
3173 struct hci_dev *hdev;
3174
27f70f3e 3175 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3176 if (!hdev)
3177 return NULL;
3178
b1b813d4
DH
3179 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3180 hdev->esco_type = (ESCO_HV1);
3181 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3182 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3183 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3184 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3185 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3186 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3187 hdev->adv_instance_cnt = 0;
3188 hdev->cur_adv_instance = 0x00;
5d900e46 3189 hdev->adv_instance_timeout = 0;
b1b813d4 3190
b1b813d4
DH
3191 hdev->sniff_max_interval = 800;
3192 hdev->sniff_min_interval = 80;
3193
3f959d46 3194 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3195 hdev->le_adv_min_interval = 0x0800;
3196 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3197 hdev->le_scan_interval = 0x0060;
3198 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3199 hdev->le_conn_min_interval = 0x0028;
3200 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3201 hdev->le_conn_latency = 0x0000;
3202 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3203 hdev->le_def_tx_len = 0x001b;
3204 hdev->le_def_tx_time = 0x0148;
3205 hdev->le_max_tx_len = 0x001b;
3206 hdev->le_max_tx_time = 0x0148;
3207 hdev->le_max_rx_len = 0x001b;
3208 hdev->le_max_rx_time = 0x0148;
bef64738 3209
d6bfd59c 3210 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3211 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3212 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3213 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3214
b1b813d4
DH
3215 mutex_init(&hdev->lock);
3216 mutex_init(&hdev->req_lock);
3217
3218 INIT_LIST_HEAD(&hdev->mgmt_pending);
3219 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3220 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3221 INIT_LIST_HEAD(&hdev->uuids);
3222 INIT_LIST_HEAD(&hdev->link_keys);
3223 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3224 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3225 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3226 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3227 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3228 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3229 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3230 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3231 INIT_LIST_HEAD(&hdev->adv_instances);
b1b813d4
DH
3232
3233 INIT_WORK(&hdev->rx_work, hci_rx_work);
3234 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3235 INIT_WORK(&hdev->tx_work, hci_tx_work);
3236 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3237 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3238
b1b813d4
DH
3239 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3240 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3241 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2d28cfe7 3242 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
5d900e46 3243 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
b1b813d4 3244
b1b813d4
DH
3245 skb_queue_head_init(&hdev->rx_q);
3246 skb_queue_head_init(&hdev->cmd_q);
3247 skb_queue_head_init(&hdev->raw_q);
3248
3249 init_waitqueue_head(&hdev->req_wait_q);
3250
65cc2b49 3251 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3252
b1b813d4
DH
3253 hci_init_sysfs(hdev);
3254 discovery_init(hdev);
9be0dab7
DH
3255
3256 return hdev;
3257}
3258EXPORT_SYMBOL(hci_alloc_dev);
3259
3260/* Free HCI device */
3261void hci_free_dev(struct hci_dev *hdev)
3262{
9be0dab7
DH
3263 /* will free via device release */
3264 put_device(&hdev->dev);
3265}
3266EXPORT_SYMBOL(hci_free_dev);
3267
1da177e4
LT
3268/* Register HCI device */
3269int hci_register_dev(struct hci_dev *hdev)
3270{
b1b813d4 3271 int id, error;
1da177e4 3272
74292d5a 3273 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3274 return -EINVAL;
3275
08add513
MM
3276 /* Do not allow HCI_AMP devices to register at index 0,
3277 * so the index can be used as the AMP controller ID.
3278 */
3df92b31
SL
3279 switch (hdev->dev_type) {
3280 case HCI_BREDR:
3281 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3282 break;
3283 case HCI_AMP:
3284 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3285 break;
3286 default:
3287 return -EINVAL;
1da177e4 3288 }
8e87d142 3289
3df92b31
SL
3290 if (id < 0)
3291 return id;
3292
1da177e4
LT
3293 sprintf(hdev->name, "hci%d", id);
3294 hdev->id = id;
2d8b3a11
AE
3295
3296 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3297
d8537548
KC
3298 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3299 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3300 if (!hdev->workqueue) {
3301 error = -ENOMEM;
3302 goto err;
3303 }
f48fd9c8 3304
d8537548
KC
3305 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3306 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3307 if (!hdev->req_workqueue) {
3308 destroy_workqueue(hdev->workqueue);
3309 error = -ENOMEM;
3310 goto err;
3311 }
3312
0153e2ec
MH
3313 if (!IS_ERR_OR_NULL(bt_debugfs))
3314 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3315
bdc3e0f1
MH
3316 dev_set_name(&hdev->dev, "%s", hdev->name);
3317
3318 error = device_add(&hdev->dev);
33ca954d 3319 if (error < 0)
54506918 3320 goto err_wqueue;
1da177e4 3321
611b30f7 3322 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3323 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3324 hdev);
611b30f7
MH
3325 if (hdev->rfkill) {
3326 if (rfkill_register(hdev->rfkill) < 0) {
3327 rfkill_destroy(hdev->rfkill);
3328 hdev->rfkill = NULL;
3329 }
3330 }
3331
5e130367 3332 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3333 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3334
a1536da2
MH
3335 hci_dev_set_flag(hdev, HCI_SETUP);
3336 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3337
01cd3404 3338 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3339 /* Assume BR/EDR support until proven otherwise (such as
3340 * through reading supported features during init.
3341 */
a1536da2 3342 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3343 }
ce2be9ac 3344
fcee3377
GP
3345 write_lock(&hci_dev_list_lock);
3346 list_add(&hdev->list, &hci_dev_list);
3347 write_unlock(&hci_dev_list_lock);
3348
4a964404
MH
3349 /* Devices that are marked for raw-only usage are unconfigured
3350 * and should not be included in normal operation.
fee746b0
MH
3351 */
3352 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3353 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3354
1da177e4 3355 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3356 hci_dev_hold(hdev);
1da177e4 3357
19202573 3358 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3359
1da177e4 3360 return id;
f48fd9c8 3361
33ca954d
DH
3362err_wqueue:
3363 destroy_workqueue(hdev->workqueue);
6ead1bbc 3364 destroy_workqueue(hdev->req_workqueue);
33ca954d 3365err:
3df92b31 3366 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3367
33ca954d 3368 return error;
1da177e4
LT
3369}
3370EXPORT_SYMBOL(hci_register_dev);
3371
3372/* Unregister HCI device */
59735631 3373void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3374{
2d7cc19e 3375 int id;
ef222013 3376
c13854ce 3377 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3378
a1536da2 3379 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3380
3df92b31
SL
3381 id = hdev->id;
3382
f20d09d5 3383 write_lock(&hci_dev_list_lock);
1da177e4 3384 list_del(&hdev->list);
f20d09d5 3385 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3386
3387 hci_dev_do_close(hdev);
3388
b9b5ef18
GP
3389 cancel_work_sync(&hdev->power_on);
3390
ab81cbf9 3391 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3392 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3393 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3394 hci_dev_lock(hdev);
744cf19e 3395 mgmt_index_removed(hdev);
09fd0de5 3396 hci_dev_unlock(hdev);
56e5cb86 3397 }
ab81cbf9 3398
2e58ef3e
JH
3399 /* mgmt_index_removed should take care of emptying the
3400 * pending list */
3401 BUG_ON(!list_empty(&hdev->mgmt_pending));
3402
1da177e4
LT
3403 hci_notify(hdev, HCI_DEV_UNREG);
3404
611b30f7
MH
3405 if (hdev->rfkill) {
3406 rfkill_unregister(hdev->rfkill);
3407 rfkill_destroy(hdev->rfkill);
3408 }
3409
bdc3e0f1 3410 device_del(&hdev->dev);
147e2d59 3411
0153e2ec
MH
3412 debugfs_remove_recursive(hdev->debugfs);
3413
f48fd9c8 3414 destroy_workqueue(hdev->workqueue);
6ead1bbc 3415 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3416
09fd0de5 3417 hci_dev_lock(hdev);
dcc36c16 3418 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3419 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3420 hci_uuids_clear(hdev);
55ed8ca1 3421 hci_link_keys_clear(hdev);
b899efaf 3422 hci_smp_ltks_clear(hdev);
970c4e46 3423 hci_smp_irks_clear(hdev);
2763eda6 3424 hci_remote_oob_data_clear(hdev);
d2609b34 3425 hci_adv_instances_clear(hdev);
dcc36c16 3426 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3427 hci_conn_params_clear_all(hdev);
22078800 3428 hci_discovery_filter_clear(hdev);
09fd0de5 3429 hci_dev_unlock(hdev);
e2e0cacb 3430
dc946bd8 3431 hci_dev_put(hdev);
3df92b31
SL
3432
3433 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3434}
3435EXPORT_SYMBOL(hci_unregister_dev);
3436
3437/* Suspend HCI device */
3438int hci_suspend_dev(struct hci_dev *hdev)
3439{
3440 hci_notify(hdev, HCI_DEV_SUSPEND);
3441 return 0;
3442}
3443EXPORT_SYMBOL(hci_suspend_dev);
3444
3445/* Resume HCI device */
3446int hci_resume_dev(struct hci_dev *hdev)
3447{
3448 hci_notify(hdev, HCI_DEV_RESUME);
3449 return 0;
3450}
3451EXPORT_SYMBOL(hci_resume_dev);
3452
75e0569f
MH
3453/* Reset HCI device */
3454int hci_reset_dev(struct hci_dev *hdev)
3455{
3456 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3457 struct sk_buff *skb;
3458
3459 skb = bt_skb_alloc(3, GFP_ATOMIC);
3460 if (!skb)
3461 return -ENOMEM;
3462
3463 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3464 memcpy(skb_put(skb, 3), hw_err, 3);
3465
3466 /* Send Hardware Error to upper stack */
3467 return hci_recv_frame(hdev, skb);
3468}
3469EXPORT_SYMBOL(hci_reset_dev);
3470
76bca880 3471/* Receive frame from HCI drivers */
e1a26170 3472int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3473{
76bca880 3474 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3475 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3476 kfree_skb(skb);
3477 return -ENXIO;
3478 }
3479
d82603c6 3480 /* Incoming skb */
76bca880
MH
3481 bt_cb(skb)->incoming = 1;
3482
3483 /* Time stamp */
3484 __net_timestamp(skb);
3485
76bca880 3486 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3487 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3488
76bca880
MH
3489 return 0;
3490}
3491EXPORT_SYMBOL(hci_recv_frame);
3492
1da177e4
LT
3493/* ---- Interface to upper protocols ---- */
3494
1da177e4
LT
3495int hci_register_cb(struct hci_cb *cb)
3496{
3497 BT_DBG("%p name %s", cb, cb->name);
3498
fba7ecf0 3499 mutex_lock(&hci_cb_list_lock);
00629e0f 3500 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3501 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3502
3503 return 0;
3504}
3505EXPORT_SYMBOL(hci_register_cb);
3506
3507int hci_unregister_cb(struct hci_cb *cb)
3508{
3509 BT_DBG("%p name %s", cb, cb->name);
3510
fba7ecf0 3511 mutex_lock(&hci_cb_list_lock);
1da177e4 3512 list_del(&cb->list);
fba7ecf0 3513 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3514
3515 return 0;
3516}
3517EXPORT_SYMBOL(hci_unregister_cb);
3518
51086991 3519static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3520{
cdc52faa
MH
3521 int err;
3522
0d48d939 3523 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3524
cd82e61c
MH
3525 /* Time stamp */
3526 __net_timestamp(skb);
1da177e4 3527
cd82e61c
MH
3528 /* Send copy to monitor */
3529 hci_send_to_monitor(hdev, skb);
3530
3531 if (atomic_read(&hdev->promisc)) {
3532 /* Send copy to the sockets */
470fe1b5 3533 hci_send_to_sock(hdev, skb);
1da177e4
LT
3534 }
3535
3536 /* Get rid of skb owner, prior to sending to the driver. */
3537 skb_orphan(skb);
3538
cdc52faa
MH
3539 err = hdev->send(hdev, skb);
3540 if (err < 0) {
3541 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3542 kfree_skb(skb);
3543 }
1da177e4
LT
3544}
3545
1ca3a9d0 3546/* Send HCI command */
07dc93dd
JH
3547int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3548 const void *param)
1ca3a9d0
JH
3549{
3550 struct sk_buff *skb;
3551
3552 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3553
3554 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3555 if (!skb) {
3556 BT_ERR("%s no memory for command", hdev->name);
3557 return -ENOMEM;
3558 }
3559
49c922bb 3560 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3561 * single-command requests.
3562 */
db6e3e8d 3563 bt_cb(skb)->req.start = true;
11714b3d 3564
1da177e4 3565 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3566 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3567
3568 return 0;
3569}
1da177e4
LT
3570
3571/* Get data from the previously sent command */
a9de9248 3572void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3573{
3574 struct hci_command_hdr *hdr;
3575
3576 if (!hdev->sent_cmd)
3577 return NULL;
3578
3579 hdr = (void *) hdev->sent_cmd->data;
3580
a9de9248 3581 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3582 return NULL;
3583
f0e09510 3584 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3585
3586 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3587}
3588
fbef168f
LP
3589/* Send HCI command and wait for command commplete event */
3590struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3591 const void *param, u32 timeout)
3592{
3593 struct sk_buff *skb;
3594
3595 if (!test_bit(HCI_UP, &hdev->flags))
3596 return ERR_PTR(-ENETDOWN);
3597
3598 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3599
3600 hci_req_lock(hdev);
3601 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3602 hci_req_unlock(hdev);
3603
3604 return skb;
3605}
3606EXPORT_SYMBOL(hci_cmd_sync);
3607
1da177e4
LT
3608/* Send ACL data */
3609static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3610{
3611 struct hci_acl_hdr *hdr;
3612 int len = skb->len;
3613
badff6d0
ACM
3614 skb_push(skb, HCI_ACL_HDR_SIZE);
3615 skb_reset_transport_header(skb);
9c70220b 3616 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3617 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3618 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3619}
3620
ee22be7e 3621static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3622 struct sk_buff *skb, __u16 flags)
1da177e4 3623{
ee22be7e 3624 struct hci_conn *conn = chan->conn;
1da177e4
LT
3625 struct hci_dev *hdev = conn->hdev;
3626 struct sk_buff *list;
3627
087bfd99
GP
3628 skb->len = skb_headlen(skb);
3629 skb->data_len = 0;
3630
3631 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3632
3633 switch (hdev->dev_type) {
3634 case HCI_BREDR:
3635 hci_add_acl_hdr(skb, conn->handle, flags);
3636 break;
3637 case HCI_AMP:
3638 hci_add_acl_hdr(skb, chan->handle, flags);
3639 break;
3640 default:
3641 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3642 return;
3643 }
087bfd99 3644
70f23020
AE
3645 list = skb_shinfo(skb)->frag_list;
3646 if (!list) {
1da177e4
LT
3647 /* Non fragmented */
3648 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3649
73d80deb 3650 skb_queue_tail(queue, skb);
1da177e4
LT
3651 } else {
3652 /* Fragmented */
3653 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3654
3655 skb_shinfo(skb)->frag_list = NULL;
3656
9cfd5a23
JR
3657 /* Queue all fragments atomically. We need to use spin_lock_bh
3658 * here because of 6LoWPAN links, as there this function is
3659 * called from softirq and using normal spin lock could cause
3660 * deadlocks.
3661 */
3662 spin_lock_bh(&queue->lock);
1da177e4 3663
73d80deb 3664 __skb_queue_tail(queue, skb);
e702112f
AE
3665
3666 flags &= ~ACL_START;
3667 flags |= ACL_CONT;
1da177e4
LT
3668 do {
3669 skb = list; list = list->next;
8e87d142 3670
0d48d939 3671 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3672 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3673
3674 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3675
73d80deb 3676 __skb_queue_tail(queue, skb);
1da177e4
LT
3677 } while (list);
3678
9cfd5a23 3679 spin_unlock_bh(&queue->lock);
1da177e4 3680 }
73d80deb
LAD
3681}
3682
3683void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3684{
ee22be7e 3685 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3686
f0e09510 3687 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3688
ee22be7e 3689 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3690
3eff45ea 3691 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3692}
1da177e4
LT
3693
3694/* Send SCO data */
0d861d8b 3695void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3696{
3697 struct hci_dev *hdev = conn->hdev;
3698 struct hci_sco_hdr hdr;
3699
3700 BT_DBG("%s len %d", hdev->name, skb->len);
3701
aca3192c 3702 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3703 hdr.dlen = skb->len;
3704
badff6d0
ACM
3705 skb_push(skb, HCI_SCO_HDR_SIZE);
3706 skb_reset_transport_header(skb);
9c70220b 3707 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3708
0d48d939 3709 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3710
1da177e4 3711 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3712 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3713}
1da177e4
LT
3714
3715/* ---- HCI TX task (outgoing data) ---- */
3716
3717/* HCI Connection scheduler */
6039aa73
GP
3718static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3719 int *quote)
1da177e4
LT
3720{
3721 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3722 struct hci_conn *conn = NULL, *c;
abc5de8f 3723 unsigned int num = 0, min = ~0;
1da177e4 3724
8e87d142 3725 /* We don't have to lock device here. Connections are always
1da177e4 3726 * added and removed with TX task disabled. */
bf4c6325
GP
3727
3728 rcu_read_lock();
3729
3730 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3731 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3732 continue;
769be974
MH
3733
3734 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3735 continue;
3736
1da177e4
LT
3737 num++;
3738
3739 if (c->sent < min) {
3740 min = c->sent;
3741 conn = c;
3742 }
52087a79
LAD
3743
3744 if (hci_conn_num(hdev, type) == num)
3745 break;
1da177e4
LT
3746 }
3747
bf4c6325
GP
3748 rcu_read_unlock();
3749
1da177e4 3750 if (conn) {
6ed58ec5
VT
3751 int cnt, q;
3752
3753 switch (conn->type) {
3754 case ACL_LINK:
3755 cnt = hdev->acl_cnt;
3756 break;
3757 case SCO_LINK:
3758 case ESCO_LINK:
3759 cnt = hdev->sco_cnt;
3760 break;
3761 case LE_LINK:
3762 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3763 break;
3764 default:
3765 cnt = 0;
3766 BT_ERR("Unknown link type");
3767 }
3768
3769 q = cnt / num;
1da177e4
LT
3770 *quote = q ? q : 1;
3771 } else
3772 *quote = 0;
3773
3774 BT_DBG("conn %p quote %d", conn, *quote);
3775 return conn;
3776}
3777
6039aa73 3778static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3779{
3780 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3781 struct hci_conn *c;
1da177e4 3782
bae1f5d9 3783 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3784
bf4c6325
GP
3785 rcu_read_lock();
3786
1da177e4 3787 /* Kill stalled connections */
bf4c6325 3788 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3789 if (c->type == type && c->sent) {
6ed93dc6
AE
3790 BT_ERR("%s killing stalled connection %pMR",
3791 hdev->name, &c->dst);
bed71748 3792 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3793 }
3794 }
bf4c6325
GP
3795
3796 rcu_read_unlock();
1da177e4
LT
3797}
3798
6039aa73
GP
3799static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3800 int *quote)
1da177e4 3801{
73d80deb
LAD
3802 struct hci_conn_hash *h = &hdev->conn_hash;
3803 struct hci_chan *chan = NULL;
abc5de8f 3804 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3805 struct hci_conn *conn;
73d80deb
LAD
3806 int cnt, q, conn_num = 0;
3807
3808 BT_DBG("%s", hdev->name);
3809
bf4c6325
GP
3810 rcu_read_lock();
3811
3812 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3813 struct hci_chan *tmp;
3814
3815 if (conn->type != type)
3816 continue;
3817
3818 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3819 continue;
3820
3821 conn_num++;
3822
8192edef 3823 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3824 struct sk_buff *skb;
3825
3826 if (skb_queue_empty(&tmp->data_q))
3827 continue;
3828
3829 skb = skb_peek(&tmp->data_q);
3830 if (skb->priority < cur_prio)
3831 continue;
3832
3833 if (skb->priority > cur_prio) {
3834 num = 0;
3835 min = ~0;
3836 cur_prio = skb->priority;
3837 }
3838
3839 num++;
3840
3841 if (conn->sent < min) {
3842 min = conn->sent;
3843 chan = tmp;
3844 }
3845 }
3846
3847 if (hci_conn_num(hdev, type) == conn_num)
3848 break;
3849 }
3850
bf4c6325
GP
3851 rcu_read_unlock();
3852
73d80deb
LAD
3853 if (!chan)
3854 return NULL;
3855
3856 switch (chan->conn->type) {
3857 case ACL_LINK:
3858 cnt = hdev->acl_cnt;
3859 break;
bd1eb66b
AE
3860 case AMP_LINK:
3861 cnt = hdev->block_cnt;
3862 break;
73d80deb
LAD
3863 case SCO_LINK:
3864 case ESCO_LINK:
3865 cnt = hdev->sco_cnt;
3866 break;
3867 case LE_LINK:
3868 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3869 break;
3870 default:
3871 cnt = 0;
3872 BT_ERR("Unknown link type");
3873 }
3874
3875 q = cnt / num;
3876 *quote = q ? q : 1;
3877 BT_DBG("chan %p quote %d", chan, *quote);
3878 return chan;
3879}
3880
02b20f0b
LAD
3881static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3882{
3883 struct hci_conn_hash *h = &hdev->conn_hash;
3884 struct hci_conn *conn;
3885 int num = 0;
3886
3887 BT_DBG("%s", hdev->name);
3888
bf4c6325
GP
3889 rcu_read_lock();
3890
3891 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3892 struct hci_chan *chan;
3893
3894 if (conn->type != type)
3895 continue;
3896
3897 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3898 continue;
3899
3900 num++;
3901
8192edef 3902 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3903 struct sk_buff *skb;
3904
3905 if (chan->sent) {
3906 chan->sent = 0;
3907 continue;
3908 }
3909
3910 if (skb_queue_empty(&chan->data_q))
3911 continue;
3912
3913 skb = skb_peek(&chan->data_q);
3914 if (skb->priority >= HCI_PRIO_MAX - 1)
3915 continue;
3916
3917 skb->priority = HCI_PRIO_MAX - 1;
3918
3919 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3920 skb->priority);
02b20f0b
LAD
3921 }
3922
3923 if (hci_conn_num(hdev, type) == num)
3924 break;
3925 }
bf4c6325
GP
3926
3927 rcu_read_unlock();
3928
02b20f0b
LAD
3929}
3930
b71d385a
AE
3931static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3932{
3933 /* Calculate count of blocks used by this packet */
3934 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3935}
3936
6039aa73 3937static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3938{
d7a5a11d 3939 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
3940 /* ACL tx timeout must be longer than maximum
3941 * link supervision timeout (40.9 seconds) */
63d2bc1b 3942 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3943 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3944 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3945 }
63d2bc1b 3946}
1da177e4 3947
6039aa73 3948static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3949{
3950 unsigned int cnt = hdev->acl_cnt;
3951 struct hci_chan *chan;
3952 struct sk_buff *skb;
3953 int quote;
3954
3955 __check_timeout(hdev, cnt);
04837f64 3956
73d80deb 3957 while (hdev->acl_cnt &&
a8c5fb1a 3958 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3959 u32 priority = (skb_peek(&chan->data_q))->priority;
3960 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3961 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3962 skb->len, skb->priority);
73d80deb 3963
ec1cce24
LAD
3964 /* Stop if priority has changed */
3965 if (skb->priority < priority)
3966 break;
3967
3968 skb = skb_dequeue(&chan->data_q);
3969
73d80deb 3970 hci_conn_enter_active_mode(chan->conn,
04124681 3971 bt_cb(skb)->force_active);
04837f64 3972
57d17d70 3973 hci_send_frame(hdev, skb);
1da177e4
LT
3974 hdev->acl_last_tx = jiffies;
3975
3976 hdev->acl_cnt--;
73d80deb
LAD
3977 chan->sent++;
3978 chan->conn->sent++;
1da177e4
LT
3979 }
3980 }
02b20f0b
LAD
3981
3982 if (cnt != hdev->acl_cnt)
3983 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3984}
3985
6039aa73 3986static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3987{
63d2bc1b 3988 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3989 struct hci_chan *chan;
3990 struct sk_buff *skb;
3991 int quote;
bd1eb66b 3992 u8 type;
b71d385a 3993
63d2bc1b 3994 __check_timeout(hdev, cnt);
b71d385a 3995
bd1eb66b
AE
3996 BT_DBG("%s", hdev->name);
3997
3998 if (hdev->dev_type == HCI_AMP)
3999 type = AMP_LINK;
4000 else
4001 type = ACL_LINK;
4002
b71d385a 4003 while (hdev->block_cnt > 0 &&
bd1eb66b 4004 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4005 u32 priority = (skb_peek(&chan->data_q))->priority;
4006 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4007 int blocks;
4008
4009 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4010 skb->len, skb->priority);
b71d385a
AE
4011
4012 /* Stop if priority has changed */
4013 if (skb->priority < priority)
4014 break;
4015
4016 skb = skb_dequeue(&chan->data_q);
4017
4018 blocks = __get_blocks(hdev, skb);
4019 if (blocks > hdev->block_cnt)
4020 return;
4021
4022 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4023 bt_cb(skb)->force_active);
b71d385a 4024
57d17d70 4025 hci_send_frame(hdev, skb);
b71d385a
AE
4026 hdev->acl_last_tx = jiffies;
4027
4028 hdev->block_cnt -= blocks;
4029 quote -= blocks;
4030
4031 chan->sent += blocks;
4032 chan->conn->sent += blocks;
4033 }
4034 }
4035
4036 if (cnt != hdev->block_cnt)
bd1eb66b 4037 hci_prio_recalculate(hdev, type);
b71d385a
AE
4038}
4039
6039aa73 4040static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4041{
4042 BT_DBG("%s", hdev->name);
4043
bd1eb66b
AE
4044 /* No ACL link over BR/EDR controller */
4045 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4046 return;
4047
4048 /* No AMP link over AMP controller */
4049 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4050 return;
4051
4052 switch (hdev->flow_ctl_mode) {
4053 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4054 hci_sched_acl_pkt(hdev);
4055 break;
4056
4057 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4058 hci_sched_acl_blk(hdev);
4059 break;
4060 }
4061}
4062
1da177e4 4063/* Schedule SCO */
6039aa73 4064static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4065{
4066 struct hci_conn *conn;
4067 struct sk_buff *skb;
4068 int quote;
4069
4070 BT_DBG("%s", hdev->name);
4071
52087a79
LAD
4072 if (!hci_conn_num(hdev, SCO_LINK))
4073 return;
4074
1da177e4
LT
4075 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4076 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4077 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4078 hci_send_frame(hdev, skb);
1da177e4
LT
4079
4080 conn->sent++;
4081 if (conn->sent == ~0)
4082 conn->sent = 0;
4083 }
4084 }
4085}
4086
6039aa73 4087static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4088{
4089 struct hci_conn *conn;
4090 struct sk_buff *skb;
4091 int quote;
4092
4093 BT_DBG("%s", hdev->name);
4094
52087a79
LAD
4095 if (!hci_conn_num(hdev, ESCO_LINK))
4096 return;
4097
8fc9ced3
GP
4098 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4099 &quote))) {
b6a0dc82
MH
4100 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4101 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4102 hci_send_frame(hdev, skb);
b6a0dc82
MH
4103
4104 conn->sent++;
4105 if (conn->sent == ~0)
4106 conn->sent = 0;
4107 }
4108 }
4109}
4110
6039aa73 4111static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4112{
73d80deb 4113 struct hci_chan *chan;
6ed58ec5 4114 struct sk_buff *skb;
02b20f0b 4115 int quote, cnt, tmp;
6ed58ec5
VT
4116
4117 BT_DBG("%s", hdev->name);
4118
52087a79
LAD
4119 if (!hci_conn_num(hdev, LE_LINK))
4120 return;
4121
d7a5a11d 4122 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
4123 /* LE tx timeout must be longer than maximum
4124 * link supervision timeout (40.9 seconds) */
bae1f5d9 4125 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4126 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4127 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4128 }
4129
4130 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4131 tmp = cnt;
73d80deb 4132 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4133 u32 priority = (skb_peek(&chan->data_q))->priority;
4134 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4135 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4136 skb->len, skb->priority);
6ed58ec5 4137
ec1cce24
LAD
4138 /* Stop if priority has changed */
4139 if (skb->priority < priority)
4140 break;
4141
4142 skb = skb_dequeue(&chan->data_q);
4143
57d17d70 4144 hci_send_frame(hdev, skb);
6ed58ec5
VT
4145 hdev->le_last_tx = jiffies;
4146
4147 cnt--;
73d80deb
LAD
4148 chan->sent++;
4149 chan->conn->sent++;
6ed58ec5
VT
4150 }
4151 }
73d80deb 4152
6ed58ec5
VT
4153 if (hdev->le_pkts)
4154 hdev->le_cnt = cnt;
4155 else
4156 hdev->acl_cnt = cnt;
02b20f0b
LAD
4157
4158 if (cnt != tmp)
4159 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4160}
4161
3eff45ea 4162static void hci_tx_work(struct work_struct *work)
1da177e4 4163{
3eff45ea 4164 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4165 struct sk_buff *skb;
4166
6ed58ec5 4167 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4168 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4169
d7a5a11d 4170 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
4171 /* Schedule queues and send stuff to HCI driver */
4172 hci_sched_acl(hdev);
4173 hci_sched_sco(hdev);
4174 hci_sched_esco(hdev);
4175 hci_sched_le(hdev);
4176 }
6ed58ec5 4177
1da177e4
LT
4178 /* Send next queued raw (unknown type) packet */
4179 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4180 hci_send_frame(hdev, skb);
1da177e4
LT
4181}
4182
25985edc 4183/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4184
4185/* ACL data packet */
6039aa73 4186static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4187{
4188 struct hci_acl_hdr *hdr = (void *) skb->data;
4189 struct hci_conn *conn;
4190 __u16 handle, flags;
4191
4192 skb_pull(skb, HCI_ACL_HDR_SIZE);
4193
4194 handle = __le16_to_cpu(hdr->handle);
4195 flags = hci_flags(handle);
4196 handle = hci_handle(handle);
4197
f0e09510 4198 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4199 handle, flags);
1da177e4
LT
4200
4201 hdev->stat.acl_rx++;
4202
4203 hci_dev_lock(hdev);
4204 conn = hci_conn_hash_lookup_handle(hdev, handle);
4205 hci_dev_unlock(hdev);
8e87d142 4206
1da177e4 4207 if (conn) {
65983fc7 4208 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4209
1da177e4 4210 /* Send to upper protocol */
686ebf28
UF
4211 l2cap_recv_acldata(conn, skb, flags);
4212 return;
1da177e4 4213 } else {
8e87d142 4214 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4215 hdev->name, handle);
1da177e4
LT
4216 }
4217
4218 kfree_skb(skb);
4219}
4220
4221/* SCO data packet */
6039aa73 4222static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4223{
4224 struct hci_sco_hdr *hdr = (void *) skb->data;
4225 struct hci_conn *conn;
4226 __u16 handle;
4227
4228 skb_pull(skb, HCI_SCO_HDR_SIZE);
4229
4230 handle = __le16_to_cpu(hdr->handle);
4231
f0e09510 4232 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4233
4234 hdev->stat.sco_rx++;
4235
4236 hci_dev_lock(hdev);
4237 conn = hci_conn_hash_lookup_handle(hdev, handle);
4238 hci_dev_unlock(hdev);
4239
4240 if (conn) {
1da177e4 4241 /* Send to upper protocol */
686ebf28
UF
4242 sco_recv_scodata(conn, skb);
4243 return;
1da177e4 4244 } else {
8e87d142 4245 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4246 hdev->name, handle);
1da177e4
LT
4247 }
4248
4249 kfree_skb(skb);
4250}
4251
9238f36a
JH
4252static bool hci_req_is_complete(struct hci_dev *hdev)
4253{
4254 struct sk_buff *skb;
4255
4256 skb = skb_peek(&hdev->cmd_q);
4257 if (!skb)
4258 return true;
4259
db6e3e8d 4260 return bt_cb(skb)->req.start;
9238f36a
JH
4261}
4262
42c6b129
JH
4263static void hci_resend_last(struct hci_dev *hdev)
4264{
4265 struct hci_command_hdr *sent;
4266 struct sk_buff *skb;
4267 u16 opcode;
4268
4269 if (!hdev->sent_cmd)
4270 return;
4271
4272 sent = (void *) hdev->sent_cmd->data;
4273 opcode = __le16_to_cpu(sent->opcode);
4274 if (opcode == HCI_OP_RESET)
4275 return;
4276
4277 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4278 if (!skb)
4279 return;
4280
4281 skb_queue_head(&hdev->cmd_q, skb);
4282 queue_work(hdev->workqueue, &hdev->cmd_work);
4283}
4284
e6214487
JH
4285void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4286 hci_req_complete_t *req_complete,
4287 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4288{
9238f36a
JH
4289 struct sk_buff *skb;
4290 unsigned long flags;
4291
4292 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4293
42c6b129
JH
4294 /* If the completed command doesn't match the last one that was
4295 * sent we need to do special handling of it.
9238f36a 4296 */
42c6b129
JH
4297 if (!hci_sent_cmd_data(hdev, opcode)) {
4298 /* Some CSR based controllers generate a spontaneous
4299 * reset complete event during init and any pending
4300 * command will never be completed. In such a case we
4301 * need to resend whatever was the last sent
4302 * command.
4303 */
4304 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4305 hci_resend_last(hdev);
4306
9238f36a 4307 return;
42c6b129 4308 }
9238f36a
JH
4309
4310 /* If the command succeeded and there's still more commands in
4311 * this request the request is not yet complete.
4312 */
4313 if (!status && !hci_req_is_complete(hdev))
4314 return;
4315
4316 /* If this was the last command in a request the complete
4317 * callback would be found in hdev->sent_cmd instead of the
4318 * command queue (hdev->cmd_q).
4319 */
e6214487
JH
4320 if (bt_cb(hdev->sent_cmd)->req.complete) {
4321 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4322 return;
4323 }
53e21fbc 4324
e6214487
JH
4325 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4326 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4327 return;
9238f36a
JH
4328 }
4329
4330 /* Remove all pending commands belonging to this request */
4331 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4332 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
db6e3e8d 4333 if (bt_cb(skb)->req.start) {
9238f36a
JH
4334 __skb_queue_head(&hdev->cmd_q, skb);
4335 break;
4336 }
4337
e6214487
JH
4338 *req_complete = bt_cb(skb)->req.complete;
4339 *req_complete_skb = bt_cb(skb)->req.complete_skb;
9238f36a
JH
4340 kfree_skb(skb);
4341 }
4342 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4343}
4344
b78752cc 4345static void hci_rx_work(struct work_struct *work)
1da177e4 4346{
b78752cc 4347 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4348 struct sk_buff *skb;
4349
4350 BT_DBG("%s", hdev->name);
4351
1da177e4 4352 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4353 /* Send copy to monitor */
4354 hci_send_to_monitor(hdev, skb);
4355
1da177e4
LT
4356 if (atomic_read(&hdev->promisc)) {
4357 /* Send copy to the sockets */
470fe1b5 4358 hci_send_to_sock(hdev, skb);
1da177e4
LT
4359 }
4360
d7a5a11d 4361 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4362 kfree_skb(skb);
4363 continue;
4364 }
4365
4366 if (test_bit(HCI_INIT, &hdev->flags)) {
4367 /* Don't process data packets in this states. */
0d48d939 4368 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4369 case HCI_ACLDATA_PKT:
4370 case HCI_SCODATA_PKT:
4371 kfree_skb(skb);
4372 continue;
3ff50b79 4373 }
1da177e4
LT
4374 }
4375
4376 /* Process frame */
0d48d939 4377 switch (bt_cb(skb)->pkt_type) {
1da177e4 4378 case HCI_EVENT_PKT:
b78752cc 4379 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4380 hci_event_packet(hdev, skb);
4381 break;
4382
4383 case HCI_ACLDATA_PKT:
4384 BT_DBG("%s ACL data packet", hdev->name);
4385 hci_acldata_packet(hdev, skb);
4386 break;
4387
4388 case HCI_SCODATA_PKT:
4389 BT_DBG("%s SCO data packet", hdev->name);
4390 hci_scodata_packet(hdev, skb);
4391 break;
4392
4393 default:
4394 kfree_skb(skb);
4395 break;
4396 }
4397 }
1da177e4
LT
4398}
4399
c347b765 4400static void hci_cmd_work(struct work_struct *work)
1da177e4 4401{
c347b765 4402 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4403 struct sk_buff *skb;
4404
2104786b
AE
4405 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4406 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4407
1da177e4 4408 /* Send queued commands */
5a08ecce
AE
4409 if (atomic_read(&hdev->cmd_cnt)) {
4410 skb = skb_dequeue(&hdev->cmd_q);
4411 if (!skb)
4412 return;
4413
7585b97a 4414 kfree_skb(hdev->sent_cmd);
1da177e4 4415
a675d7f1 4416 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4417 if (hdev->sent_cmd) {
1da177e4 4418 atomic_dec(&hdev->cmd_cnt);
57d17d70 4419 hci_send_frame(hdev, skb);
7bdb8a5c 4420 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4421 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4422 else
65cc2b49
MH
4423 schedule_delayed_work(&hdev->cmd_timer,
4424 HCI_CMD_TIMEOUT);
1da177e4
LT
4425 } else {
4426 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4427 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4428 }
4429 }
4430}