]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Enforce packet types in hci_recv_frame driver function
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46
JH
42#include "smp.h"
43
b78752cc 44static void hci_rx_work(struct work_struct *work);
c347b765 45static void hci_cmd_work(struct work_struct *work);
3eff45ea 46static void hci_tx_work(struct work_struct *work);
1da177e4 47
1da177e4
LT
48/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
fba7ecf0 54DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 55
3df92b31
SL
56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
899de765
MH
59/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
1da177e4
LT
68/* ---- HCI notifications ---- */
69
6516455d 70static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 71{
040030ef 72 hci_sock_dev_event(hdev, event);
1da177e4
LT
73}
74
baf27f6e
MH
75/* ---- HCI debugfs entries ---- */
76
4b4148e9
MH
77static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
b7cb93e5 83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
4b4148e9
MH
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
4b4148e9
MH
97
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
b7cb93e5 108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
4b4148e9
MH
123 kfree_skb(skb);
124
b7cb93e5 125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
126
127 return count;
128}
129
130static const struct file_operations dut_mode_fops = {
131 .open = simple_open,
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
135};
136
4b4113d6
MH
137static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
138 size_t count, loff_t *ppos)
139{
140 struct hci_dev *hdev = file->private_data;
141 char buf[3];
142
143 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
144 buf[1] = '\n';
145 buf[2] = '\0';
146 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
147}
148
149static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
150 size_t count, loff_t *ppos)
151{
152 struct hci_dev *hdev = file->private_data;
153 char buf[32];
154 size_t buf_size = min(count, (sizeof(buf)-1));
155 bool enable;
156 int err;
157
158 if (copy_from_user(buf, user_buf, buf_size))
159 return -EFAULT;
160
161 buf[buf_size] = '\0';
162 if (strtobool(buf, &enable))
163 return -EINVAL;
164
165 hci_req_lock(hdev);
166 err = hdev->set_diag(hdev, enable);
167 hci_req_unlock(hdev);
168
169 if (err < 0)
170 return err;
171
172 if (enable)
173 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
174 else
175 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
176
177 return count;
178}
179
180static const struct file_operations vendor_diag_fops = {
181 .open = simple_open,
182 .read = vendor_diag_read,
183 .write = vendor_diag_write,
184 .llseek = default_llseek,
185};
186
1da177e4
LT
187/* ---- HCI requests ---- */
188
f60cb305
JH
189static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
190 struct sk_buff *skb)
1da177e4 191{
42c6b129 192 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
193
194 if (hdev->req_status == HCI_REQ_PEND) {
195 hdev->req_result = result;
196 hdev->req_status = HCI_REQ_DONE;
f60cb305
JH
197 if (skb)
198 hdev->req_skb = skb_get(skb);
1da177e4
LT
199 wake_up_interruptible(&hdev->req_wait_q);
200 }
201}
202
203static void hci_req_cancel(struct hci_dev *hdev, int err)
204{
205 BT_DBG("%s err 0x%2.2x", hdev->name, err);
206
207 if (hdev->req_status == HCI_REQ_PEND) {
208 hdev->req_result = err;
209 hdev->req_status = HCI_REQ_CANCELED;
210 wake_up_interruptible(&hdev->req_wait_q);
211 }
212}
213
7b1abbbe 214struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 215 const void *param, u8 event, u32 timeout)
75e84b7c
JH
216{
217 DECLARE_WAITQUEUE(wait, current);
218 struct hci_request req;
f60cb305 219 struct sk_buff *skb;
75e84b7c
JH
220 int err = 0;
221
222 BT_DBG("%s", hdev->name);
223
224 hci_req_init(&req, hdev);
225
7b1abbbe 226 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
227
228 hdev->req_status = HCI_REQ_PEND;
229
75e84b7c
JH
230 add_wait_queue(&hdev->req_wait_q, &wait);
231 set_current_state(TASK_INTERRUPTIBLE);
232
f60cb305 233 err = hci_req_run_skb(&req, hci_req_sync_complete);
039fada5
CP
234 if (err < 0) {
235 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 236 set_current_state(TASK_RUNNING);
039fada5
CP
237 return ERR_PTR(err);
238 }
239
75e84b7c
JH
240 schedule_timeout(timeout);
241
242 remove_wait_queue(&hdev->req_wait_q, &wait);
243
244 if (signal_pending(current))
245 return ERR_PTR(-EINTR);
246
247 switch (hdev->req_status) {
248 case HCI_REQ_DONE:
249 err = -bt_to_errno(hdev->req_result);
250 break;
251
252 case HCI_REQ_CANCELED:
253 err = -hdev->req_result;
254 break;
255
256 default:
257 err = -ETIMEDOUT;
258 break;
259 }
260
261 hdev->req_status = hdev->req_result = 0;
f60cb305
JH
262 skb = hdev->req_skb;
263 hdev->req_skb = NULL;
75e84b7c
JH
264
265 BT_DBG("%s end: err %d", hdev->name, err);
266
f60cb305
JH
267 if (err < 0) {
268 kfree_skb(skb);
75e84b7c 269 return ERR_PTR(err);
f60cb305 270 }
75e84b7c 271
757aa0b5
JH
272 if (!skb)
273 return ERR_PTR(-ENODATA);
274
275 return skb;
7b1abbbe
JH
276}
277EXPORT_SYMBOL(__hci_cmd_sync_ev);
278
279struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 280 const void *param, u32 timeout)
7b1abbbe
JH
281{
282 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
283}
284EXPORT_SYMBOL(__hci_cmd_sync);
285
1da177e4 286/* Execute request and wait for completion. */
01178cd4 287static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
288 void (*func)(struct hci_request *req,
289 unsigned long opt),
01178cd4 290 unsigned long opt, __u32 timeout)
1da177e4 291{
42c6b129 292 struct hci_request req;
1da177e4
LT
293 DECLARE_WAITQUEUE(wait, current);
294 int err = 0;
295
296 BT_DBG("%s start", hdev->name);
297
42c6b129
JH
298 hci_req_init(&req, hdev);
299
1da177e4
LT
300 hdev->req_status = HCI_REQ_PEND;
301
42c6b129 302 func(&req, opt);
53cce22d 303
039fada5
CP
304 add_wait_queue(&hdev->req_wait_q, &wait);
305 set_current_state(TASK_INTERRUPTIBLE);
306
f60cb305 307 err = hci_req_run_skb(&req, hci_req_sync_complete);
42c6b129 308 if (err < 0) {
53cce22d 309 hdev->req_status = 0;
920c8300 310
039fada5 311 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 312 set_current_state(TASK_RUNNING);
039fada5 313
920c8300
AG
314 /* ENODATA means the HCI request command queue is empty.
315 * This can happen when a request with conditionals doesn't
316 * trigger any commands to be sent. This is normal behavior
317 * and should not trigger an error return.
42c6b129 318 */
920c8300
AG
319 if (err == -ENODATA)
320 return 0;
321
322 return err;
53cce22d
JH
323 }
324
1da177e4
LT
325 schedule_timeout(timeout);
326
327 remove_wait_queue(&hdev->req_wait_q, &wait);
328
329 if (signal_pending(current))
330 return -EINTR;
331
332 switch (hdev->req_status) {
333 case HCI_REQ_DONE:
e175072f 334 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
335 break;
336
337 case HCI_REQ_CANCELED:
338 err = -hdev->req_result;
339 break;
340
341 default:
342 err = -ETIMEDOUT;
343 break;
3ff50b79 344 }
1da177e4 345
a5040efa 346 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
347
348 BT_DBG("%s end: err %d", hdev->name, err);
349
350 return err;
351}
352
01178cd4 353static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
354 void (*req)(struct hci_request *req,
355 unsigned long opt),
01178cd4 356 unsigned long opt, __u32 timeout)
1da177e4
LT
357{
358 int ret;
359
7c6a329e
MH
360 if (!test_bit(HCI_UP, &hdev->flags))
361 return -ENETDOWN;
362
1da177e4
LT
363 /* Serialize all requests */
364 hci_req_lock(hdev);
01178cd4 365 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
366 hci_req_unlock(hdev);
367
368 return ret;
369}
370
42c6b129 371static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 372{
42c6b129 373 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
374
375 /* Reset device */
42c6b129
JH
376 set_bit(HCI_RESET, &req->hdev->flags);
377 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
378}
379
42c6b129 380static void bredr_init(struct hci_request *req)
1da177e4 381{
42c6b129 382 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 383
1da177e4 384 /* Read Local Supported Features */
42c6b129 385 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 386
1143e5a6 387 /* Read Local Version */
42c6b129 388 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
389
390 /* Read BD Address */
42c6b129 391 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
392}
393
0af801b9 394static void amp_init1(struct hci_request *req)
e61ef499 395{
42c6b129 396 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 397
e61ef499 398 /* Read Local Version */
42c6b129 399 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 400
f6996cfe
MH
401 /* Read Local Supported Commands */
402 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
403
6bcbc489 404 /* Read Local AMP Info */
42c6b129 405 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
406
407 /* Read Data Blk size */
42c6b129 408 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 409
f38ba941
MH
410 /* Read Flow Control Mode */
411 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
412
7528ca1c
MH
413 /* Read Location Data */
414 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
415}
416
0af801b9
JH
417static void amp_init2(struct hci_request *req)
418{
419 /* Read Local Supported Features. Not all AMP controllers
420 * support this so it's placed conditionally in the second
421 * stage init.
422 */
423 if (req->hdev->commands[14] & 0x20)
424 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
425}
426
42c6b129 427static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 428{
42c6b129 429 struct hci_dev *hdev = req->hdev;
e61ef499
AE
430
431 BT_DBG("%s %ld", hdev->name, opt);
432
11778716
AE
433 /* Reset */
434 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 435 hci_reset_req(req, 0);
11778716 436
e61ef499
AE
437 switch (hdev->dev_type) {
438 case HCI_BREDR:
42c6b129 439 bredr_init(req);
e61ef499
AE
440 break;
441
442 case HCI_AMP:
0af801b9 443 amp_init1(req);
e61ef499
AE
444 break;
445
446 default:
447 BT_ERR("Unknown device type %d", hdev->dev_type);
448 break;
449 }
e61ef499
AE
450}
451
42c6b129 452static void bredr_setup(struct hci_request *req)
2177bab5 453{
2177bab5
JH
454 __le16 param;
455 __u8 flt_type;
456
457 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 458 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
459
460 /* Read Class of Device */
42c6b129 461 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
462
463 /* Read Local Name */
42c6b129 464 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
465
466 /* Read Voice Setting */
42c6b129 467 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 468
b4cb9fb2
MH
469 /* Read Number of Supported IAC */
470 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
471
4b836f39
MH
472 /* Read Current IAC LAP */
473 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
474
2177bab5
JH
475 /* Clear Event Filters */
476 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 477 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
478
479 /* Connection accept timeout ~20 secs */
dcf4adbf 480 param = cpu_to_le16(0x7d00);
42c6b129 481 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
482}
483
42c6b129 484static void le_setup(struct hci_request *req)
2177bab5 485{
c73eee91
JH
486 struct hci_dev *hdev = req->hdev;
487
2177bab5 488 /* Read LE Buffer Size */
42c6b129 489 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
490
491 /* Read LE Local Supported Features */
42c6b129 492 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 493
747d3f03
MH
494 /* Read LE Supported States */
495 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
496
2177bab5 497 /* Read LE White List Size */
42c6b129 498 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 499
747d3f03
MH
500 /* Clear LE White List */
501 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
502
503 /* LE-only controllers have LE implicitly enabled */
504 if (!lmp_bredr_capable(hdev))
a1536da2 505 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
506}
507
42c6b129 508static void hci_setup_event_mask(struct hci_request *req)
2177bab5 509{
42c6b129
JH
510 struct hci_dev *hdev = req->hdev;
511
2177bab5
JH
512 /* The second byte is 0xff instead of 0x9f (two reserved bits
513 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
514 * command otherwise.
515 */
516 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
517
518 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
519 * any event mask for pre 1.2 devices.
520 */
521 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
522 return;
523
524 if (lmp_bredr_capable(hdev)) {
525 events[4] |= 0x01; /* Flow Specification Complete */
526 events[4] |= 0x02; /* Inquiry Result with RSSI */
527 events[4] |= 0x04; /* Read Remote Extended Features Complete */
528 events[5] |= 0x08; /* Synchronous Connection Complete */
529 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
530 } else {
531 /* Use a different default for LE-only devices */
532 memset(events, 0, sizeof(events));
533 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
534 events[1] |= 0x08; /* Read Remote Version Information Complete */
535 events[1] |= 0x20; /* Command Complete */
536 events[1] |= 0x40; /* Command Status */
537 events[1] |= 0x80; /* Hardware Error */
538 events[2] |= 0x04; /* Number of Completed Packets */
539 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
540
541 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
542 events[0] |= 0x80; /* Encryption Change */
543 events[5] |= 0x80; /* Encryption Key Refresh Complete */
544 }
2177bab5
JH
545 }
546
547 if (lmp_inq_rssi_capable(hdev))
548 events[4] |= 0x02; /* Inquiry Result with RSSI */
549
550 if (lmp_sniffsubr_capable(hdev))
551 events[5] |= 0x20; /* Sniff Subrating */
552
553 if (lmp_pause_enc_capable(hdev))
554 events[5] |= 0x80; /* Encryption Key Refresh Complete */
555
556 if (lmp_ext_inq_capable(hdev))
557 events[5] |= 0x40; /* Extended Inquiry Result */
558
559 if (lmp_no_flush_capable(hdev))
560 events[7] |= 0x01; /* Enhanced Flush Complete */
561
562 if (lmp_lsto_capable(hdev))
563 events[6] |= 0x80; /* Link Supervision Timeout Changed */
564
565 if (lmp_ssp_capable(hdev)) {
566 events[6] |= 0x01; /* IO Capability Request */
567 events[6] |= 0x02; /* IO Capability Response */
568 events[6] |= 0x04; /* User Confirmation Request */
569 events[6] |= 0x08; /* User Passkey Request */
570 events[6] |= 0x10; /* Remote OOB Data Request */
571 events[6] |= 0x20; /* Simple Pairing Complete */
572 events[7] |= 0x04; /* User Passkey Notification */
573 events[7] |= 0x08; /* Keypress Notification */
574 events[7] |= 0x10; /* Remote Host Supported
575 * Features Notification
576 */
577 }
578
579 if (lmp_le_capable(hdev))
580 events[7] |= 0x20; /* LE Meta-Event */
581
42c6b129 582 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
583}
584
42c6b129 585static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 586{
42c6b129
JH
587 struct hci_dev *hdev = req->hdev;
588
0af801b9
JH
589 if (hdev->dev_type == HCI_AMP)
590 return amp_init2(req);
591
2177bab5 592 if (lmp_bredr_capable(hdev))
42c6b129 593 bredr_setup(req);
56f87901 594 else
a358dc11 595 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
596
597 if (lmp_le_capable(hdev))
42c6b129 598 le_setup(req);
2177bab5 599
0f3adeae
MH
600 /* All Bluetooth 1.2 and later controllers should support the
601 * HCI command for reading the local supported commands.
602 *
603 * Unfortunately some controllers indicate Bluetooth 1.2 support,
604 * but do not have support for this command. If that is the case,
605 * the driver can quirk the behavior and skip reading the local
606 * supported commands.
3f8e2d75 607 */
0f3adeae
MH
608 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
609 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 610 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
611
612 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
613 /* When SSP is available, then the host features page
614 * should also be available as well. However some
615 * controllers list the max_page as 0 as long as SSP
616 * has not been enabled. To achieve proper debugging
617 * output, force the minimum max_page to 1 at least.
618 */
619 hdev->max_page = 0x01;
620
d7a5a11d 621 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 622 u8 mode = 0x01;
574ea3c7 623
42c6b129
JH
624 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
625 sizeof(mode), &mode);
2177bab5
JH
626 } else {
627 struct hci_cp_write_eir cp;
628
629 memset(hdev->eir, 0, sizeof(hdev->eir));
630 memset(&cp, 0, sizeof(cp));
631
42c6b129 632 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
633 }
634 }
635
043ec9bf
MH
636 if (lmp_inq_rssi_capable(hdev) ||
637 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
638 u8 mode;
639
640 /* If Extended Inquiry Result events are supported, then
641 * they are clearly preferred over Inquiry Result with RSSI
642 * events.
643 */
644 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
645
646 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
647 }
2177bab5
JH
648
649 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 650 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
651
652 if (lmp_ext_feat_capable(hdev)) {
653 struct hci_cp_read_local_ext_features cp;
654
655 cp.page = 0x01;
42c6b129
JH
656 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
657 sizeof(cp), &cp);
2177bab5
JH
658 }
659
d7a5a11d 660 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 661 u8 enable = 1;
42c6b129
JH
662 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
663 &enable);
2177bab5
JH
664 }
665}
666
42c6b129 667static void hci_setup_link_policy(struct hci_request *req)
2177bab5 668{
42c6b129 669 struct hci_dev *hdev = req->hdev;
2177bab5
JH
670 struct hci_cp_write_def_link_policy cp;
671 u16 link_policy = 0;
672
673 if (lmp_rswitch_capable(hdev))
674 link_policy |= HCI_LP_RSWITCH;
675 if (lmp_hold_capable(hdev))
676 link_policy |= HCI_LP_HOLD;
677 if (lmp_sniff_capable(hdev))
678 link_policy |= HCI_LP_SNIFF;
679 if (lmp_park_capable(hdev))
680 link_policy |= HCI_LP_PARK;
681
682 cp.policy = cpu_to_le16(link_policy);
42c6b129 683 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
684}
685
42c6b129 686static void hci_set_le_support(struct hci_request *req)
2177bab5 687{
42c6b129 688 struct hci_dev *hdev = req->hdev;
2177bab5
JH
689 struct hci_cp_write_le_host_supported cp;
690
c73eee91
JH
691 /* LE-only devices do not support explicit enablement */
692 if (!lmp_bredr_capable(hdev))
693 return;
694
2177bab5
JH
695 memset(&cp, 0, sizeof(cp));
696
d7a5a11d 697 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 698 cp.le = 0x01;
32226e4f 699 cp.simul = 0x00;
2177bab5
JH
700 }
701
702 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
703 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
704 &cp);
2177bab5
JH
705}
706
d62e6d67
JH
707static void hci_set_event_mask_page_2(struct hci_request *req)
708{
709 struct hci_dev *hdev = req->hdev;
710 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
711
712 /* If Connectionless Slave Broadcast master role is supported
713 * enable all necessary events for it.
714 */
53b834d2 715 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
716 events[1] |= 0x40; /* Triggered Clock Capture */
717 events[1] |= 0x80; /* Synchronization Train Complete */
718 events[2] |= 0x10; /* Slave Page Response Timeout */
719 events[2] |= 0x20; /* CSB Channel Map Change */
720 }
721
722 /* If Connectionless Slave Broadcast slave role is supported
723 * enable all necessary events for it.
724 */
53b834d2 725 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
726 events[2] |= 0x01; /* Synchronization Train Received */
727 events[2] |= 0x02; /* CSB Receive */
728 events[2] |= 0x04; /* CSB Timeout */
729 events[2] |= 0x08; /* Truncated Page Complete */
730 }
731
40c59fcb 732 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 733 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
734 events[2] |= 0x80;
735
d62e6d67
JH
736 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
737}
738
42c6b129 739static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 740{
42c6b129 741 struct hci_dev *hdev = req->hdev;
d2c5d77f 742 u8 p;
42c6b129 743
0da71f1b
MH
744 hci_setup_event_mask(req);
745
e81be90b
JH
746 if (hdev->commands[6] & 0x20 &&
747 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
748 struct hci_cp_read_stored_link_key cp;
749
750 bacpy(&cp.bdaddr, BDADDR_ANY);
751 cp.read_all = 0x01;
752 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
753 }
754
2177bab5 755 if (hdev->commands[5] & 0x10)
42c6b129 756 hci_setup_link_policy(req);
2177bab5 757
417287de
MH
758 if (hdev->commands[8] & 0x01)
759 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
760
761 /* Some older Broadcom based Bluetooth 1.2 controllers do not
762 * support the Read Page Scan Type command. Check support for
763 * this command in the bit mask of supported commands.
764 */
765 if (hdev->commands[13] & 0x01)
766 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
767
9193c6e8
AG
768 if (lmp_le_capable(hdev)) {
769 u8 events[8];
770
771 memset(events, 0, sizeof(events));
4d6c705b
MH
772 events[0] = 0x0f;
773
774 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
775 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
776
777 /* If controller supports the Connection Parameters Request
778 * Link Layer Procedure, enable the corresponding event.
779 */
780 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
781 events[0] |= 0x20; /* LE Remote Connection
782 * Parameter Request
783 */
784
a9f6068e
MH
785 /* If the controller supports the Data Length Extension
786 * feature, enable the corresponding event.
787 */
788 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
789 events[0] |= 0x40; /* LE Data Length Change */
790
4b71bba4
MH
791 /* If the controller supports Extended Scanner Filter
792 * Policies, enable the correspondig event.
793 */
794 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
795 events[1] |= 0x04; /* LE Direct Advertising
796 * Report
797 */
798
5a34bd5f
MH
799 /* If the controller supports the LE Read Local P-256
800 * Public Key command, enable the corresponding event.
801 */
802 if (hdev->commands[34] & 0x02)
803 events[0] |= 0x80; /* LE Read Local P-256
804 * Public Key Complete
805 */
806
807 /* If the controller supports the LE Generate DHKey
808 * command, enable the corresponding event.
809 */
810 if (hdev->commands[34] & 0x04)
811 events[1] |= 0x01; /* LE Generate DHKey Complete */
812
9193c6e8
AG
813 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
814 events);
815
15a49cca
MH
816 if (hdev->commands[25] & 0x40) {
817 /* Read LE Advertising Channel TX Power */
818 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
819 }
820
a9f6068e
MH
821 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
822 /* Read LE Maximum Data Length */
823 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
824
825 /* Read LE Suggested Default Data Length */
826 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
827 }
828
42c6b129 829 hci_set_le_support(req);
9193c6e8 830 }
d2c5d77f
JH
831
832 /* Read features beyond page 1 if available */
833 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
834 struct hci_cp_read_local_ext_features cp;
835
836 cp.page = p;
837 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
838 sizeof(cp), &cp);
839 }
2177bab5
JH
840}
841
5d4e7e8d
JH
842static void hci_init4_req(struct hci_request *req, unsigned long opt)
843{
844 struct hci_dev *hdev = req->hdev;
845
36f260ce
MH
846 /* Some Broadcom based Bluetooth controllers do not support the
847 * Delete Stored Link Key command. They are clearly indicating its
848 * absence in the bit mask of supported commands.
849 *
850 * Check the supported commands and only if the the command is marked
851 * as supported send it. If not supported assume that the controller
852 * does not have actual support for stored link keys which makes this
853 * command redundant anyway.
854 *
855 * Some controllers indicate that they support handling deleting
856 * stored link keys, but they don't. The quirk lets a driver
857 * just disable this command.
858 */
859 if (hdev->commands[6] & 0x80 &&
860 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
861 struct hci_cp_delete_stored_link_key cp;
862
863 bacpy(&cp.bdaddr, BDADDR_ANY);
864 cp.delete_all = 0x01;
865 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
866 sizeof(cp), &cp);
867 }
868
d62e6d67
JH
869 /* Set event mask page 2 if the HCI command for it is supported */
870 if (hdev->commands[22] & 0x04)
871 hci_set_event_mask_page_2(req);
872
109e3191
MH
873 /* Read local codec list if the HCI command is supported */
874 if (hdev->commands[29] & 0x20)
875 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
876
f4fe73ed
MH
877 /* Get MWS transport configuration if the HCI command is supported */
878 if (hdev->commands[30] & 0x08)
879 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
880
5d4e7e8d 881 /* Check for Synchronization Train support */
53b834d2 882 if (lmp_sync_train_capable(hdev))
5d4e7e8d 883 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
884
885 /* Enable Secure Connections if supported and configured */
d7a5a11d 886 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 887 bredr_sc_enabled(hdev)) {
a6d0d690 888 u8 support = 0x01;
574ea3c7 889
a6d0d690
MH
890 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
891 sizeof(support), &support);
892 }
5d4e7e8d
JH
893}
894
2177bab5
JH
895static int __hci_init(struct hci_dev *hdev)
896{
897 int err;
898
899 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
900 if (err < 0)
901 return err;
902
d7a5a11d 903 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
4b4113d6
MH
904 /* The Device Under Test (DUT) mode is special and available
905 * for all controller types. So just create it early on.
906 */
4b4148e9
MH
907 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
908 &dut_mode_fops);
4b4113d6
MH
909
910 /* When the driver supports the set_diag callback, then
911 * expose an entry to modify the vendor diagnostic setting.
912 */
913 if (hdev->set_diag)
914 debugfs_create_file("vendor_diag", 0644, hdev->debugfs,
915 hdev, &vendor_diag_fops);
4b4148e9
MH
916 }
917
0af801b9
JH
918 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
919 if (err < 0)
920 return err;
921
2177bab5
JH
922 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
923 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 924 * first two stages of init.
2177bab5
JH
925 */
926 if (hdev->dev_type != HCI_BREDR)
927 return 0;
928
5d4e7e8d
JH
929 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
930 if (err < 0)
931 return err;
932
baf27f6e
MH
933 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
934 if (err < 0)
935 return err;
936
ec6cef9c
MH
937 /* This function is only called when the controller is actually in
938 * configured state. When the controller is marked as unconfigured,
939 * this initialization procedure is not run.
940 *
941 * It means that it is possible that a controller runs through its
942 * setup phase and then discovers missing settings. If that is the
943 * case, then this function will not be called. It then will only
944 * be called during the config phase.
945 *
946 * So only when in setup phase or config phase, create the debugfs
947 * entries and register the SMP channels.
baf27f6e 948 */
d7a5a11d
MH
949 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
950 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
951 return 0;
952
60c5f5fb
MH
953 hci_debugfs_create_common(hdev);
954
71c3b60e 955 if (lmp_bredr_capable(hdev))
60c5f5fb 956 hci_debugfs_create_bredr(hdev);
2bfa3531 957
162a3bac 958 if (lmp_le_capable(hdev))
60c5f5fb 959 hci_debugfs_create_le(hdev);
e7b8fc92 960
baf27f6e 961 return 0;
2177bab5
JH
962}
963
0ebca7d6
MH
964static void hci_init0_req(struct hci_request *req, unsigned long opt)
965{
966 struct hci_dev *hdev = req->hdev;
967
968 BT_DBG("%s %ld", hdev->name, opt);
969
970 /* Reset */
971 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
972 hci_reset_req(req, 0);
973
974 /* Read Local Version */
975 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
976
977 /* Read BD Address */
978 if (hdev->set_bdaddr)
979 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
980}
981
982static int __hci_unconf_init(struct hci_dev *hdev)
983{
984 int err;
985
cc78b44b
MH
986 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
987 return 0;
988
0ebca7d6
MH
989 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
990 if (err < 0)
991 return err;
992
993 return 0;
994}
995
42c6b129 996static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
997{
998 __u8 scan = opt;
999
42c6b129 1000 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1001
1002 /* Inquiry and Page scans */
42c6b129 1003 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1004}
1005
42c6b129 1006static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1007{
1008 __u8 auth = opt;
1009
42c6b129 1010 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1011
1012 /* Authentication */
42c6b129 1013 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1014}
1015
42c6b129 1016static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1017{
1018 __u8 encrypt = opt;
1019
42c6b129 1020 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1021
e4e8e37c 1022 /* Encryption */
42c6b129 1023 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1024}
1025
42c6b129 1026static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1027{
1028 __le16 policy = cpu_to_le16(opt);
1029
42c6b129 1030 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1031
1032 /* Default link policy */
42c6b129 1033 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1034}
1035
8e87d142 1036/* Get HCI device by index.
1da177e4
LT
1037 * Device is held on return. */
1038struct hci_dev *hci_dev_get(int index)
1039{
8035ded4 1040 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1041
1042 BT_DBG("%d", index);
1043
1044 if (index < 0)
1045 return NULL;
1046
1047 read_lock(&hci_dev_list_lock);
8035ded4 1048 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1049 if (d->id == index) {
1050 hdev = hci_dev_hold(d);
1051 break;
1052 }
1053 }
1054 read_unlock(&hci_dev_list_lock);
1055 return hdev;
1056}
1da177e4
LT
1057
1058/* ---- Inquiry support ---- */
ff9ef578 1059
30dc78e1
JH
1060bool hci_discovery_active(struct hci_dev *hdev)
1061{
1062 struct discovery_state *discov = &hdev->discovery;
1063
6fbe195d 1064 switch (discov->state) {
343f935b 1065 case DISCOVERY_FINDING:
6fbe195d 1066 case DISCOVERY_RESOLVING:
30dc78e1
JH
1067 return true;
1068
6fbe195d
AG
1069 default:
1070 return false;
1071 }
30dc78e1
JH
1072}
1073
ff9ef578
JH
1074void hci_discovery_set_state(struct hci_dev *hdev, int state)
1075{
bb3e0a33
JH
1076 int old_state = hdev->discovery.state;
1077
ff9ef578
JH
1078 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1079
bb3e0a33 1080 if (old_state == state)
ff9ef578
JH
1081 return;
1082
bb3e0a33
JH
1083 hdev->discovery.state = state;
1084
ff9ef578
JH
1085 switch (state) {
1086 case DISCOVERY_STOPPED:
c54c3860
AG
1087 hci_update_background_scan(hdev);
1088
bb3e0a33 1089 if (old_state != DISCOVERY_STARTING)
7b99b659 1090 mgmt_discovering(hdev, 0);
ff9ef578
JH
1091 break;
1092 case DISCOVERY_STARTING:
1093 break;
343f935b 1094 case DISCOVERY_FINDING:
ff9ef578
JH
1095 mgmt_discovering(hdev, 1);
1096 break;
30dc78e1
JH
1097 case DISCOVERY_RESOLVING:
1098 break;
ff9ef578
JH
1099 case DISCOVERY_STOPPING:
1100 break;
1101 }
ff9ef578
JH
1102}
1103
1f9b9a5d 1104void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1105{
30883512 1106 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1107 struct inquiry_entry *p, *n;
1da177e4 1108
561aafbc
JH
1109 list_for_each_entry_safe(p, n, &cache->all, all) {
1110 list_del(&p->all);
b57c1a56 1111 kfree(p);
1da177e4 1112 }
561aafbc
JH
1113
1114 INIT_LIST_HEAD(&cache->unknown);
1115 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1116}
1117
a8c5fb1a
GP
1118struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1119 bdaddr_t *bdaddr)
1da177e4 1120{
30883512 1121 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1122 struct inquiry_entry *e;
1123
6ed93dc6 1124 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1125
561aafbc
JH
1126 list_for_each_entry(e, &cache->all, all) {
1127 if (!bacmp(&e->data.bdaddr, bdaddr))
1128 return e;
1129 }
1130
1131 return NULL;
1132}
1133
1134struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1135 bdaddr_t *bdaddr)
561aafbc 1136{
30883512 1137 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1138 struct inquiry_entry *e;
1139
6ed93dc6 1140 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1141
1142 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1143 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1144 return e;
1145 }
1146
1147 return NULL;
1da177e4
LT
1148}
1149
30dc78e1 1150struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1151 bdaddr_t *bdaddr,
1152 int state)
30dc78e1
JH
1153{
1154 struct discovery_state *cache = &hdev->discovery;
1155 struct inquiry_entry *e;
1156
6ed93dc6 1157 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1158
1159 list_for_each_entry(e, &cache->resolve, list) {
1160 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1161 return e;
1162 if (!bacmp(&e->data.bdaddr, bdaddr))
1163 return e;
1164 }
1165
1166 return NULL;
1167}
1168
a3d4e20a 1169void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1170 struct inquiry_entry *ie)
a3d4e20a
JH
1171{
1172 struct discovery_state *cache = &hdev->discovery;
1173 struct list_head *pos = &cache->resolve;
1174 struct inquiry_entry *p;
1175
1176 list_del(&ie->list);
1177
1178 list_for_each_entry(p, &cache->resolve, list) {
1179 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1180 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1181 break;
1182 pos = &p->list;
1183 }
1184
1185 list_add(&ie->list, pos);
1186}
1187
af58925c
MH
1188u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1189 bool name_known)
1da177e4 1190{
30883512 1191 struct discovery_state *cache = &hdev->discovery;
70f23020 1192 struct inquiry_entry *ie;
af58925c 1193 u32 flags = 0;
1da177e4 1194
6ed93dc6 1195 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1196
6928a924 1197 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1198
af58925c
MH
1199 if (!data->ssp_mode)
1200 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1201
70f23020 1202 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1203 if (ie) {
af58925c
MH
1204 if (!ie->data.ssp_mode)
1205 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1206
a3d4e20a 1207 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1208 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1209 ie->data.rssi = data->rssi;
1210 hci_inquiry_cache_update_resolve(hdev, ie);
1211 }
1212
561aafbc 1213 goto update;
a3d4e20a 1214 }
561aafbc
JH
1215
1216 /* Entry not in the cache. Add new one. */
27f70f3e 1217 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1218 if (!ie) {
1219 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1220 goto done;
1221 }
561aafbc
JH
1222
1223 list_add(&ie->all, &cache->all);
1224
1225 if (name_known) {
1226 ie->name_state = NAME_KNOWN;
1227 } else {
1228 ie->name_state = NAME_NOT_KNOWN;
1229 list_add(&ie->list, &cache->unknown);
1230 }
70f23020 1231
561aafbc
JH
1232update:
1233 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1234 ie->name_state != NAME_PENDING) {
561aafbc
JH
1235 ie->name_state = NAME_KNOWN;
1236 list_del(&ie->list);
1da177e4
LT
1237 }
1238
70f23020
AE
1239 memcpy(&ie->data, data, sizeof(*data));
1240 ie->timestamp = jiffies;
1da177e4 1241 cache->timestamp = jiffies;
3175405b
JH
1242
1243 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1244 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1245
af58925c
MH
1246done:
1247 return flags;
1da177e4
LT
1248}
1249
1250static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1251{
30883512 1252 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1253 struct inquiry_info *info = (struct inquiry_info *) buf;
1254 struct inquiry_entry *e;
1255 int copied = 0;
1256
561aafbc 1257 list_for_each_entry(e, &cache->all, all) {
1da177e4 1258 struct inquiry_data *data = &e->data;
b57c1a56
JH
1259
1260 if (copied >= num)
1261 break;
1262
1da177e4
LT
1263 bacpy(&info->bdaddr, &data->bdaddr);
1264 info->pscan_rep_mode = data->pscan_rep_mode;
1265 info->pscan_period_mode = data->pscan_period_mode;
1266 info->pscan_mode = data->pscan_mode;
1267 memcpy(info->dev_class, data->dev_class, 3);
1268 info->clock_offset = data->clock_offset;
b57c1a56 1269
1da177e4 1270 info++;
b57c1a56 1271 copied++;
1da177e4
LT
1272 }
1273
1274 BT_DBG("cache %p, copied %d", cache, copied);
1275 return copied;
1276}
1277
42c6b129 1278static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1279{
1280 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1281 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1282 struct hci_cp_inquiry cp;
1283
1284 BT_DBG("%s", hdev->name);
1285
1286 if (test_bit(HCI_INQUIRY, &hdev->flags))
1287 return;
1288
1289 /* Start Inquiry */
1290 memcpy(&cp.lap, &ir->lap, 3);
1291 cp.length = ir->length;
1292 cp.num_rsp = ir->num_rsp;
42c6b129 1293 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1294}
1295
1296int hci_inquiry(void __user *arg)
1297{
1298 __u8 __user *ptr = arg;
1299 struct hci_inquiry_req ir;
1300 struct hci_dev *hdev;
1301 int err = 0, do_inquiry = 0, max_rsp;
1302 long timeo;
1303 __u8 *buf;
1304
1305 if (copy_from_user(&ir, ptr, sizeof(ir)))
1306 return -EFAULT;
1307
5a08ecce
AE
1308 hdev = hci_dev_get(ir.dev_id);
1309 if (!hdev)
1da177e4
LT
1310 return -ENODEV;
1311
d7a5a11d 1312 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1313 err = -EBUSY;
1314 goto done;
1315 }
1316
d7a5a11d 1317 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1318 err = -EOPNOTSUPP;
1319 goto done;
1320 }
1321
5b69bef5
MH
1322 if (hdev->dev_type != HCI_BREDR) {
1323 err = -EOPNOTSUPP;
1324 goto done;
1325 }
1326
d7a5a11d 1327 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1328 err = -EOPNOTSUPP;
1329 goto done;
1330 }
1331
09fd0de5 1332 hci_dev_lock(hdev);
8e87d142 1333 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1334 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1335 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1336 do_inquiry = 1;
1337 }
09fd0de5 1338 hci_dev_unlock(hdev);
1da177e4 1339
04837f64 1340 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1341
1342 if (do_inquiry) {
01178cd4
JH
1343 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1344 timeo);
70f23020
AE
1345 if (err < 0)
1346 goto done;
3e13fa1e
AG
1347
1348 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1349 * cleared). If it is interrupted by a signal, return -EINTR.
1350 */
74316201 1351 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1352 TASK_INTERRUPTIBLE))
1353 return -EINTR;
70f23020 1354 }
1da177e4 1355
8fc9ced3
GP
1356 /* for unlimited number of responses we will use buffer with
1357 * 255 entries
1358 */
1da177e4
LT
1359 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1360
1361 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1362 * copy it to the user space.
1363 */
01df8c31 1364 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1365 if (!buf) {
1da177e4
LT
1366 err = -ENOMEM;
1367 goto done;
1368 }
1369
09fd0de5 1370 hci_dev_lock(hdev);
1da177e4 1371 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1372 hci_dev_unlock(hdev);
1da177e4
LT
1373
1374 BT_DBG("num_rsp %d", ir.num_rsp);
1375
1376 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1377 ptr += sizeof(ir);
1378 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1379 ir.num_rsp))
1da177e4 1380 err = -EFAULT;
8e87d142 1381 } else
1da177e4
LT
1382 err = -EFAULT;
1383
1384 kfree(buf);
1385
1386done:
1387 hci_dev_put(hdev);
1388 return err;
1389}
1390
cbed0ca1 1391static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1392{
1da177e4
LT
1393 int ret = 0;
1394
1da177e4
LT
1395 BT_DBG("%s %p", hdev->name, hdev);
1396
1397 hci_req_lock(hdev);
1398
d7a5a11d 1399 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1400 ret = -ENODEV;
1401 goto done;
1402 }
1403
d7a5a11d
MH
1404 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1405 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1406 /* Check for rfkill but allow the HCI setup stage to
1407 * proceed (which in itself doesn't cause any RF activity).
1408 */
d7a5a11d 1409 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1410 ret = -ERFKILL;
1411 goto done;
1412 }
1413
1414 /* Check for valid public address or a configured static
1415 * random adddress, but let the HCI setup proceed to
1416 * be able to determine if there is a public address
1417 * or not.
1418 *
c6beca0e
MH
1419 * In case of user channel usage, it is not important
1420 * if a public address or static random address is
1421 * available.
1422 *
a5c8f270
MH
1423 * This check is only valid for BR/EDR controllers
1424 * since AMP controllers do not have an address.
1425 */
d7a5a11d 1426 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
c6beca0e 1427 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1428 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1429 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1430 ret = -EADDRNOTAVAIL;
1431 goto done;
1432 }
611b30f7
MH
1433 }
1434
1da177e4
LT
1435 if (test_bit(HCI_UP, &hdev->flags)) {
1436 ret = -EALREADY;
1437 goto done;
1438 }
1439
1da177e4
LT
1440 if (hdev->open(hdev)) {
1441 ret = -EIO;
1442 goto done;
1443 }
1444
e9ca8bf1 1445 set_bit(HCI_RUNNING, &hdev->flags);
4a3f95b7
MH
1446 hci_notify(hdev, HCI_DEV_OPEN);
1447
f41c70c4
MH
1448 atomic_set(&hdev->cmd_cnt, 1);
1449 set_bit(HCI_INIT, &hdev->flags);
1450
d7a5a11d 1451 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
af202f84
MH
1452 if (hdev->setup)
1453 ret = hdev->setup(hdev);
f41c70c4 1454
af202f84
MH
1455 /* The transport driver can set these quirks before
1456 * creating the HCI device or in its setup callback.
1457 *
1458 * In case any of them is set, the controller has to
1459 * start up as unconfigured.
1460 */
eb1904f4
MH
1461 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1462 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1463 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1464
0ebca7d6
MH
1465 /* For an unconfigured controller it is required to
1466 * read at least the version information provided by
1467 * the Read Local Version Information command.
1468 *
1469 * If the set_bdaddr driver callback is provided, then
1470 * also the original Bluetooth public device address
1471 * will be read using the Read BD Address command.
1472 */
d7a5a11d 1473 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1474 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1475 }
1476
d7a5a11d 1477 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1478 /* If public address change is configured, ensure that
1479 * the address gets programmed. If the driver does not
1480 * support changing the public address, fail the power
1481 * on procedure.
1482 */
1483 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1484 hdev->set_bdaddr)
24c457e2
MH
1485 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1486 else
1487 ret = -EADDRNOTAVAIL;
1488 }
1489
f41c70c4 1490 if (!ret) {
d7a5a11d
MH
1491 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1492 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
f41c70c4 1493 ret = __hci_init(hdev);
1da177e4
LT
1494 }
1495
f41c70c4
MH
1496 clear_bit(HCI_INIT, &hdev->flags);
1497
1da177e4
LT
1498 if (!ret) {
1499 hci_dev_hold(hdev);
a1536da2 1500 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4
LT
1501 set_bit(HCI_UP, &hdev->flags);
1502 hci_notify(hdev, HCI_DEV_UP);
d7a5a11d
MH
1503 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1504 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1505 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1506 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1514b892 1507 hdev->dev_type == HCI_BREDR) {
09fd0de5 1508 hci_dev_lock(hdev);
744cf19e 1509 mgmt_powered(hdev, 1);
09fd0de5 1510 hci_dev_unlock(hdev);
56e5cb86 1511 }
8e87d142 1512 } else {
1da177e4 1513 /* Init failed, cleanup */
3eff45ea 1514 flush_work(&hdev->tx_work);
c347b765 1515 flush_work(&hdev->cmd_work);
b78752cc 1516 flush_work(&hdev->rx_work);
1da177e4
LT
1517
1518 skb_queue_purge(&hdev->cmd_q);
1519 skb_queue_purge(&hdev->rx_q);
1520
1521 if (hdev->flush)
1522 hdev->flush(hdev);
1523
1524 if (hdev->sent_cmd) {
1525 kfree_skb(hdev->sent_cmd);
1526 hdev->sent_cmd = NULL;
1527 }
1528
e9ca8bf1 1529 clear_bit(HCI_RUNNING, &hdev->flags);
4a3f95b7
MH
1530 hci_notify(hdev, HCI_DEV_CLOSE);
1531
1da177e4 1532 hdev->close(hdev);
fee746b0 1533 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1534 }
1535
1536done:
1537 hci_req_unlock(hdev);
1da177e4
LT
1538 return ret;
1539}
1540
cbed0ca1
JH
1541/* ---- HCI ioctl helpers ---- */
1542
1543int hci_dev_open(__u16 dev)
1544{
1545 struct hci_dev *hdev;
1546 int err;
1547
1548 hdev = hci_dev_get(dev);
1549 if (!hdev)
1550 return -ENODEV;
1551
4a964404 1552 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1553 * up as user channel. Trying to bring them up as normal devices
1554 * will result into a failure. Only user channel operation is
1555 * possible.
1556 *
1557 * When this function is called for a user channel, the flag
1558 * HCI_USER_CHANNEL will be set first before attempting to
1559 * open the device.
1560 */
d7a5a11d
MH
1561 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1562 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1563 err = -EOPNOTSUPP;
1564 goto done;
1565 }
1566
e1d08f40
JH
1567 /* We need to ensure that no other power on/off work is pending
1568 * before proceeding to call hci_dev_do_open. This is
1569 * particularly important if the setup procedure has not yet
1570 * completed.
1571 */
a69d8927 1572 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1573 cancel_delayed_work(&hdev->power_off);
1574
a5c8f270
MH
1575 /* After this call it is guaranteed that the setup procedure
1576 * has finished. This means that error conditions like RFKILL
1577 * or no valid public or static random address apply.
1578 */
e1d08f40
JH
1579 flush_workqueue(hdev->req_workqueue);
1580
12aa4f0a 1581 /* For controllers not using the management interface and that
b6ae8457 1582 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1583 * so that pairing works for them. Once the management interface
1584 * is in use this bit will be cleared again and userspace has
1585 * to explicitly enable it.
1586 */
d7a5a11d
MH
1587 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1588 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1589 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1590
cbed0ca1
JH
1591 err = hci_dev_do_open(hdev);
1592
fee746b0 1593done:
cbed0ca1 1594 hci_dev_put(hdev);
cbed0ca1
JH
1595 return err;
1596}
1597
d7347f3c
JH
1598/* This function requires the caller holds hdev->lock */
1599static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1600{
1601 struct hci_conn_params *p;
1602
f161dd41
JH
1603 list_for_each_entry(p, &hdev->le_conn_params, list) {
1604 if (p->conn) {
1605 hci_conn_drop(p->conn);
f8aaf9b6 1606 hci_conn_put(p->conn);
f161dd41
JH
1607 p->conn = NULL;
1608 }
d7347f3c 1609 list_del_init(&p->action);
f161dd41 1610 }
d7347f3c
JH
1611
1612 BT_DBG("All LE pending actions cleared");
1613}
1614
6b3cc1db 1615int hci_dev_do_close(struct hci_dev *hdev)
1da177e4 1616{
acc649c6
MH
1617 bool auto_off;
1618
1da177e4
LT
1619 BT_DBG("%s %p", hdev->name, hdev);
1620
d24d8144 1621 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1622 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1623 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1624 /* Execute vendor specific shutdown routine */
1625 if (hdev->shutdown)
1626 hdev->shutdown(hdev);
1627 }
1628
78c04c0b
VCG
1629 cancel_delayed_work(&hdev->power_off);
1630
1da177e4
LT
1631 hci_req_cancel(hdev, ENODEV);
1632 hci_req_lock(hdev);
1633
1634 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1635 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1636 hci_req_unlock(hdev);
1637 return 0;
1638 }
1639
3eff45ea
GP
1640 /* Flush RX and TX works */
1641 flush_work(&hdev->tx_work);
b78752cc 1642 flush_work(&hdev->rx_work);
1da177e4 1643
16ab91ab 1644 if (hdev->discov_timeout > 0) {
e0f9309f 1645 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1646 hdev->discov_timeout = 0;
a358dc11
MH
1647 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1648 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1649 }
1650
a69d8927 1651 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1652 cancel_delayed_work(&hdev->service_cache);
1653
7ba8b4be 1654 cancel_delayed_work_sync(&hdev->le_scan_disable);
2d28cfe7 1655 cancel_delayed_work_sync(&hdev->le_scan_restart);
4518bb0f 1656
d7a5a11d 1657 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1658 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1659
5d900e46
FG
1660 if (hdev->adv_instance_timeout) {
1661 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1662 hdev->adv_instance_timeout = 0;
1663 }
1664
76727c02
JH
1665 /* Avoid potential lockdep warnings from the *_flush() calls by
1666 * ensuring the workqueue is empty up front.
1667 */
1668 drain_workqueue(hdev->workqueue);
1669
09fd0de5 1670 hci_dev_lock(hdev);
1aeb9c65 1671
8f502f84
JH
1672 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1673
acc649c6
MH
1674 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1675
1676 if (!auto_off && hdev->dev_type == HCI_BREDR)
1677 mgmt_powered(hdev, 0);
1aeb9c65 1678
1f9b9a5d 1679 hci_inquiry_cache_flush(hdev);
d7347f3c 1680 hci_pend_le_actions_clear(hdev);
f161dd41 1681 hci_conn_hash_flush(hdev);
09fd0de5 1682 hci_dev_unlock(hdev);
1da177e4 1683
64dae967
MH
1684 smp_unregister(hdev);
1685
1da177e4
LT
1686 hci_notify(hdev, HCI_DEV_DOWN);
1687
1688 if (hdev->flush)
1689 hdev->flush(hdev);
1690
1691 /* Reset device */
1692 skb_queue_purge(&hdev->cmd_q);
1693 atomic_set(&hdev->cmd_cnt, 1);
acc649c6
MH
1694 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1695 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4 1696 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1697 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1698 clear_bit(HCI_INIT, &hdev->flags);
1699 }
1700
c347b765
GP
1701 /* flush cmd work */
1702 flush_work(&hdev->cmd_work);
1da177e4
LT
1703
1704 /* Drop queues */
1705 skb_queue_purge(&hdev->rx_q);
1706 skb_queue_purge(&hdev->cmd_q);
1707 skb_queue_purge(&hdev->raw_q);
1708
1709 /* Drop last sent command */
1710 if (hdev->sent_cmd) {
65cc2b49 1711 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1712 kfree_skb(hdev->sent_cmd);
1713 hdev->sent_cmd = NULL;
1714 }
1715
e9ca8bf1 1716 clear_bit(HCI_RUNNING, &hdev->flags);
4a3f95b7
MH
1717 hci_notify(hdev, HCI_DEV_CLOSE);
1718
1da177e4
LT
1719 /* After this point our queues are empty
1720 * and no tasks are scheduled. */
1721 hdev->close(hdev);
1722
35b973c9 1723 /* Clear flags */
fee746b0 1724 hdev->flags &= BIT(HCI_RAW);
eacb44df 1725 hci_dev_clear_volatile_flags(hdev);
35b973c9 1726
ced5c338 1727 /* Controller radio is available but is currently powered down */
536619e8 1728 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1729
e59fda8d 1730 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1731 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1732 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1733
1da177e4
LT
1734 hci_req_unlock(hdev);
1735
1736 hci_dev_put(hdev);
1737 return 0;
1738}
1739
1740int hci_dev_close(__u16 dev)
1741{
1742 struct hci_dev *hdev;
1743 int err;
1744
70f23020
AE
1745 hdev = hci_dev_get(dev);
1746 if (!hdev)
1da177e4 1747 return -ENODEV;
8ee56540 1748
d7a5a11d 1749 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1750 err = -EBUSY;
1751 goto done;
1752 }
1753
a69d8927 1754 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1755 cancel_delayed_work(&hdev->power_off);
1756
1da177e4 1757 err = hci_dev_do_close(hdev);
8ee56540 1758
0736cfa8 1759done:
1da177e4
LT
1760 hci_dev_put(hdev);
1761 return err;
1762}
1763
5c912495 1764static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1765{
5c912495 1766 int ret;
1da177e4 1767
5c912495 1768 BT_DBG("%s %p", hdev->name, hdev);
1da177e4
LT
1769
1770 hci_req_lock(hdev);
1da177e4 1771
1da177e4
LT
1772 /* Drop queues */
1773 skb_queue_purge(&hdev->rx_q);
1774 skb_queue_purge(&hdev->cmd_q);
1775
76727c02
JH
1776 /* Avoid potential lockdep warnings from the *_flush() calls by
1777 * ensuring the workqueue is empty up front.
1778 */
1779 drain_workqueue(hdev->workqueue);
1780
09fd0de5 1781 hci_dev_lock(hdev);
1f9b9a5d 1782 hci_inquiry_cache_flush(hdev);
1da177e4 1783 hci_conn_hash_flush(hdev);
09fd0de5 1784 hci_dev_unlock(hdev);
1da177e4
LT
1785
1786 if (hdev->flush)
1787 hdev->flush(hdev);
1788
8e87d142 1789 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1790 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1791
fee746b0 1792 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4 1793
1da177e4 1794 hci_req_unlock(hdev);
1da177e4
LT
1795 return ret;
1796}
1797
5c912495
MH
1798int hci_dev_reset(__u16 dev)
1799{
1800 struct hci_dev *hdev;
1801 int err;
1802
1803 hdev = hci_dev_get(dev);
1804 if (!hdev)
1805 return -ENODEV;
1806
1807 if (!test_bit(HCI_UP, &hdev->flags)) {
1808 err = -ENETDOWN;
1809 goto done;
1810 }
1811
d7a5a11d 1812 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1813 err = -EBUSY;
1814 goto done;
1815 }
1816
d7a5a11d 1817 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1818 err = -EOPNOTSUPP;
1819 goto done;
1820 }
1821
1822 err = hci_dev_do_reset(hdev);
1823
1824done:
1825 hci_dev_put(hdev);
1826 return err;
1827}
1828
1da177e4
LT
1829int hci_dev_reset_stat(__u16 dev)
1830{
1831 struct hci_dev *hdev;
1832 int ret = 0;
1833
70f23020
AE
1834 hdev = hci_dev_get(dev);
1835 if (!hdev)
1da177e4
LT
1836 return -ENODEV;
1837
d7a5a11d 1838 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1839 ret = -EBUSY;
1840 goto done;
1841 }
1842
d7a5a11d 1843 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1844 ret = -EOPNOTSUPP;
1845 goto done;
1846 }
1847
1da177e4
LT
1848 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1849
0736cfa8 1850done:
1da177e4 1851 hci_dev_put(hdev);
1da177e4
LT
1852 return ret;
1853}
1854
123abc08
JH
1855static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1856{
bc6d2d04 1857 bool conn_changed, discov_changed;
123abc08
JH
1858
1859 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1860
1861 if ((scan & SCAN_PAGE))
238be788
MH
1862 conn_changed = !hci_dev_test_and_set_flag(hdev,
1863 HCI_CONNECTABLE);
123abc08 1864 else
a69d8927
MH
1865 conn_changed = hci_dev_test_and_clear_flag(hdev,
1866 HCI_CONNECTABLE);
123abc08 1867
bc6d2d04 1868 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1869 discov_changed = !hci_dev_test_and_set_flag(hdev,
1870 HCI_DISCOVERABLE);
bc6d2d04 1871 } else {
a358dc11 1872 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1873 discov_changed = hci_dev_test_and_clear_flag(hdev,
1874 HCI_DISCOVERABLE);
bc6d2d04
JH
1875 }
1876
d7a5a11d 1877 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1878 return;
1879
bc6d2d04
JH
1880 if (conn_changed || discov_changed) {
1881 /* In case this was disabled through mgmt */
a1536da2 1882 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1883
d7a5a11d 1884 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
bc6d2d04
JH
1885 mgmt_update_adv_data(hdev);
1886
123abc08 1887 mgmt_new_settings(hdev);
bc6d2d04 1888 }
123abc08
JH
1889}
1890
1da177e4
LT
1891int hci_dev_cmd(unsigned int cmd, void __user *arg)
1892{
1893 struct hci_dev *hdev;
1894 struct hci_dev_req dr;
1895 int err = 0;
1896
1897 if (copy_from_user(&dr, arg, sizeof(dr)))
1898 return -EFAULT;
1899
70f23020
AE
1900 hdev = hci_dev_get(dr.dev_id);
1901 if (!hdev)
1da177e4
LT
1902 return -ENODEV;
1903
d7a5a11d 1904 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1905 err = -EBUSY;
1906 goto done;
1907 }
1908
d7a5a11d 1909 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1910 err = -EOPNOTSUPP;
1911 goto done;
1912 }
1913
5b69bef5
MH
1914 if (hdev->dev_type != HCI_BREDR) {
1915 err = -EOPNOTSUPP;
1916 goto done;
1917 }
1918
d7a5a11d 1919 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1920 err = -EOPNOTSUPP;
1921 goto done;
1922 }
1923
1da177e4
LT
1924 switch (cmd) {
1925 case HCISETAUTH:
01178cd4
JH
1926 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1927 HCI_INIT_TIMEOUT);
1da177e4
LT
1928 break;
1929
1930 case HCISETENCRYPT:
1931 if (!lmp_encrypt_capable(hdev)) {
1932 err = -EOPNOTSUPP;
1933 break;
1934 }
1935
1936 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1937 /* Auth must be enabled first */
01178cd4
JH
1938 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1939 HCI_INIT_TIMEOUT);
1da177e4
LT
1940 if (err)
1941 break;
1942 }
1943
01178cd4
JH
1944 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1945 HCI_INIT_TIMEOUT);
1da177e4
LT
1946 break;
1947
1948 case HCISETSCAN:
01178cd4
JH
1949 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1950 HCI_INIT_TIMEOUT);
91a668b0 1951
bc6d2d04
JH
1952 /* Ensure that the connectable and discoverable states
1953 * get correctly modified as this was a non-mgmt change.
91a668b0 1954 */
123abc08
JH
1955 if (!err)
1956 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1957 break;
1958
1da177e4 1959 case HCISETLINKPOL:
01178cd4
JH
1960 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1961 HCI_INIT_TIMEOUT);
1da177e4
LT
1962 break;
1963
1964 case HCISETLINKMODE:
e4e8e37c
MH
1965 hdev->link_mode = ((__u16) dr.dev_opt) &
1966 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1967 break;
1968
1969 case HCISETPTYPE:
1970 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1971 break;
1972
1973 case HCISETACLMTU:
e4e8e37c
MH
1974 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1975 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1976 break;
1977
1978 case HCISETSCOMTU:
e4e8e37c
MH
1979 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1980 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1981 break;
1982
1983 default:
1984 err = -EINVAL;
1985 break;
1986 }
e4e8e37c 1987
0736cfa8 1988done:
1da177e4
LT
1989 hci_dev_put(hdev);
1990 return err;
1991}
1992
1993int hci_get_dev_list(void __user *arg)
1994{
8035ded4 1995 struct hci_dev *hdev;
1da177e4
LT
1996 struct hci_dev_list_req *dl;
1997 struct hci_dev_req *dr;
1da177e4
LT
1998 int n = 0, size, err;
1999 __u16 dev_num;
2000
2001 if (get_user(dev_num, (__u16 __user *) arg))
2002 return -EFAULT;
2003
2004 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2005 return -EINVAL;
2006
2007 size = sizeof(*dl) + dev_num * sizeof(*dr);
2008
70f23020
AE
2009 dl = kzalloc(size, GFP_KERNEL);
2010 if (!dl)
1da177e4
LT
2011 return -ENOMEM;
2012
2013 dr = dl->dev_req;
2014
f20d09d5 2015 read_lock(&hci_dev_list_lock);
8035ded4 2016 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2017 unsigned long flags = hdev->flags;
c542a06c 2018
2e84d8db
MH
2019 /* When the auto-off is configured it means the transport
2020 * is running, but in that case still indicate that the
2021 * device is actually down.
2022 */
d7a5a11d 2023 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 2024 flags &= ~BIT(HCI_UP);
c542a06c 2025
1da177e4 2026 (dr + n)->dev_id = hdev->id;
2e84d8db 2027 (dr + n)->dev_opt = flags;
c542a06c 2028
1da177e4
LT
2029 if (++n >= dev_num)
2030 break;
2031 }
f20d09d5 2032 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2033
2034 dl->dev_num = n;
2035 size = sizeof(*dl) + n * sizeof(*dr);
2036
2037 err = copy_to_user(arg, dl, size);
2038 kfree(dl);
2039
2040 return err ? -EFAULT : 0;
2041}
2042
2043int hci_get_dev_info(void __user *arg)
2044{
2045 struct hci_dev *hdev;
2046 struct hci_dev_info di;
2e84d8db 2047 unsigned long flags;
1da177e4
LT
2048 int err = 0;
2049
2050 if (copy_from_user(&di, arg, sizeof(di)))
2051 return -EFAULT;
2052
70f23020
AE
2053 hdev = hci_dev_get(di.dev_id);
2054 if (!hdev)
1da177e4
LT
2055 return -ENODEV;
2056
2e84d8db
MH
2057 /* When the auto-off is configured it means the transport
2058 * is running, but in that case still indicate that the
2059 * device is actually down.
2060 */
d7a5a11d 2061 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
2062 flags = hdev->flags & ~BIT(HCI_UP);
2063 else
2064 flags = hdev->flags;
c542a06c 2065
1da177e4
LT
2066 strcpy(di.name, hdev->name);
2067 di.bdaddr = hdev->bdaddr;
60f2a3ed 2068 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2069 di.flags = flags;
1da177e4 2070 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2071 if (lmp_bredr_capable(hdev)) {
2072 di.acl_mtu = hdev->acl_mtu;
2073 di.acl_pkts = hdev->acl_pkts;
2074 di.sco_mtu = hdev->sco_mtu;
2075 di.sco_pkts = hdev->sco_pkts;
2076 } else {
2077 di.acl_mtu = hdev->le_mtu;
2078 di.acl_pkts = hdev->le_pkts;
2079 di.sco_mtu = 0;
2080 di.sco_pkts = 0;
2081 }
1da177e4
LT
2082 di.link_policy = hdev->link_policy;
2083 di.link_mode = hdev->link_mode;
2084
2085 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2086 memcpy(&di.features, &hdev->features, sizeof(di.features));
2087
2088 if (copy_to_user(arg, &di, sizeof(di)))
2089 err = -EFAULT;
2090
2091 hci_dev_put(hdev);
2092
2093 return err;
2094}
2095
2096/* ---- Interface to HCI drivers ---- */
2097
611b30f7
MH
2098static int hci_rfkill_set_block(void *data, bool blocked)
2099{
2100 struct hci_dev *hdev = data;
2101
2102 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2103
d7a5a11d 2104 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2105 return -EBUSY;
2106
5e130367 2107 if (blocked) {
a1536da2 2108 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2109 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2110 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2111 hci_dev_do_close(hdev);
5e130367 2112 } else {
a358dc11 2113 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2114 }
611b30f7
MH
2115
2116 return 0;
2117}
2118
2119static const struct rfkill_ops hci_rfkill_ops = {
2120 .set_block = hci_rfkill_set_block,
2121};
2122
ab81cbf9
JH
2123static void hci_power_on(struct work_struct *work)
2124{
2125 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2126 int err;
ab81cbf9
JH
2127
2128 BT_DBG("%s", hdev->name);
2129
cbed0ca1 2130 err = hci_dev_do_open(hdev);
96570ffc 2131 if (err < 0) {
3ad67582 2132 hci_dev_lock(hdev);
96570ffc 2133 mgmt_set_powered_failed(hdev, err);
3ad67582 2134 hci_dev_unlock(hdev);
ab81cbf9 2135 return;
96570ffc 2136 }
ab81cbf9 2137
a5c8f270
MH
2138 /* During the HCI setup phase, a few error conditions are
2139 * ignored and they need to be checked now. If they are still
2140 * valid, it is important to turn the device back off.
2141 */
d7a5a11d
MH
2142 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2143 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
a5c8f270
MH
2144 (hdev->dev_type == HCI_BREDR &&
2145 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2146 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2147 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2148 hci_dev_do_close(hdev);
d7a5a11d 2149 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2150 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2151 HCI_AUTO_OFF_TIMEOUT);
bf543036 2152 }
ab81cbf9 2153
a69d8927 2154 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2155 /* For unconfigured devices, set the HCI_RAW flag
2156 * so that userspace can easily identify them.
4a964404 2157 */
d7a5a11d 2158 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2159 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2160
2161 /* For fully configured devices, this will send
2162 * the Index Added event. For unconfigured devices,
2163 * it will send Unconfigued Index Added event.
2164 *
2165 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2166 * and no event will be send.
2167 */
2168 mgmt_index_added(hdev);
a69d8927 2169 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2170 /* When the controller is now configured, then it
2171 * is important to clear the HCI_RAW flag.
2172 */
d7a5a11d 2173 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2174 clear_bit(HCI_RAW, &hdev->flags);
2175
d603b76b
MH
2176 /* Powering on the controller with HCI_CONFIG set only
2177 * happens with the transition from unconfigured to
2178 * configured. This will send the Index Added event.
2179 */
744cf19e 2180 mgmt_index_added(hdev);
fee746b0 2181 }
ab81cbf9
JH
2182}
2183
2184static void hci_power_off(struct work_struct *work)
2185{
3243553f 2186 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2187 power_off.work);
ab81cbf9
JH
2188
2189 BT_DBG("%s", hdev->name);
2190
8ee56540 2191 hci_dev_do_close(hdev);
ab81cbf9
JH
2192}
2193
c7741d16
MH
2194static void hci_error_reset(struct work_struct *work)
2195{
2196 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2197
2198 BT_DBG("%s", hdev->name);
2199
2200 if (hdev->hw_error)
2201 hdev->hw_error(hdev, hdev->hw_error_code);
2202 else
2203 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2204 hdev->hw_error_code);
2205
2206 if (hci_dev_do_close(hdev))
2207 return;
2208
c7741d16
MH
2209 hci_dev_do_open(hdev);
2210}
2211
16ab91ab
JH
2212static void hci_discov_off(struct work_struct *work)
2213{
2214 struct hci_dev *hdev;
16ab91ab
JH
2215
2216 hdev = container_of(work, struct hci_dev, discov_off.work);
2217
2218 BT_DBG("%s", hdev->name);
2219
d1967ff8 2220 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2221}
2222
5d900e46
FG
2223static void hci_adv_timeout_expire(struct work_struct *work)
2224{
2225 struct hci_dev *hdev;
2226
2227 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2228
2229 BT_DBG("%s", hdev->name);
2230
2231 mgmt_adv_timeout_expired(hdev);
2232}
2233
35f7498a 2234void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2235{
4821002c 2236 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2237
4821002c
JH
2238 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2239 list_del(&uuid->list);
2aeb9a1a
JH
2240 kfree(uuid);
2241 }
2aeb9a1a
JH
2242}
2243
35f7498a 2244void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2245{
0378b597 2246 struct link_key *key;
55ed8ca1 2247
0378b597
JH
2248 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2249 list_del_rcu(&key->list);
2250 kfree_rcu(key, rcu);
55ed8ca1 2251 }
55ed8ca1
JH
2252}
2253
35f7498a 2254void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2255{
970d0f1b 2256 struct smp_ltk *k;
b899efaf 2257
970d0f1b
JH
2258 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2259 list_del_rcu(&k->list);
2260 kfree_rcu(k, rcu);
b899efaf 2261 }
b899efaf
VCG
2262}
2263
970c4e46
JH
2264void hci_smp_irks_clear(struct hci_dev *hdev)
2265{
adae20cb 2266 struct smp_irk *k;
970c4e46 2267
adae20cb
JH
2268 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2269 list_del_rcu(&k->list);
2270 kfree_rcu(k, rcu);
970c4e46
JH
2271 }
2272}
2273
55ed8ca1
JH
2274struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2275{
8035ded4 2276 struct link_key *k;
55ed8ca1 2277
0378b597
JH
2278 rcu_read_lock();
2279 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2280 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2281 rcu_read_unlock();
55ed8ca1 2282 return k;
0378b597
JH
2283 }
2284 }
2285 rcu_read_unlock();
55ed8ca1
JH
2286
2287 return NULL;
2288}
2289
745c0ce3 2290static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2291 u8 key_type, u8 old_key_type)
d25e28ab
JH
2292{
2293 /* Legacy key */
2294 if (key_type < 0x03)
745c0ce3 2295 return true;
d25e28ab
JH
2296
2297 /* Debug keys are insecure so don't store them persistently */
2298 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2299 return false;
d25e28ab
JH
2300
2301 /* Changed combination key and there's no previous one */
2302 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2303 return false;
d25e28ab
JH
2304
2305 /* Security mode 3 case */
2306 if (!conn)
745c0ce3 2307 return true;
d25e28ab 2308
e3befab9
JH
2309 /* BR/EDR key derived using SC from an LE link */
2310 if (conn->type == LE_LINK)
2311 return true;
2312
d25e28ab
JH
2313 /* Neither local nor remote side had no-bonding as requirement */
2314 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2315 return true;
d25e28ab
JH
2316
2317 /* Local side had dedicated bonding as requirement */
2318 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2319 return true;
d25e28ab
JH
2320
2321 /* Remote side had dedicated bonding as requirement */
2322 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2323 return true;
d25e28ab
JH
2324
2325 /* If none of the above criteria match, then don't store the key
2326 * persistently */
745c0ce3 2327 return false;
d25e28ab
JH
2328}
2329
e804d25d 2330static u8 ltk_role(u8 type)
98a0b845 2331{
e804d25d
JH
2332 if (type == SMP_LTK)
2333 return HCI_ROLE_MASTER;
98a0b845 2334
e804d25d 2335 return HCI_ROLE_SLAVE;
98a0b845
JH
2336}
2337
f3a73d97
JH
2338struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2339 u8 addr_type, u8 role)
75d262c2 2340{
c9839a11 2341 struct smp_ltk *k;
75d262c2 2342
970d0f1b
JH
2343 rcu_read_lock();
2344 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2345 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2346 continue;
2347
923e2414 2348 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2349 rcu_read_unlock();
75d262c2 2350 return k;
970d0f1b
JH
2351 }
2352 }
2353 rcu_read_unlock();
75d262c2
VCG
2354
2355 return NULL;
2356}
75d262c2 2357
970c4e46
JH
2358struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2359{
2360 struct smp_irk *irk;
2361
adae20cb
JH
2362 rcu_read_lock();
2363 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2364 if (!bacmp(&irk->rpa, rpa)) {
2365 rcu_read_unlock();
970c4e46 2366 return irk;
adae20cb 2367 }
970c4e46
JH
2368 }
2369
adae20cb 2370 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2371 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2372 bacpy(&irk->rpa, rpa);
adae20cb 2373 rcu_read_unlock();
970c4e46
JH
2374 return irk;
2375 }
2376 }
adae20cb 2377 rcu_read_unlock();
970c4e46
JH
2378
2379 return NULL;
2380}
2381
2382struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2383 u8 addr_type)
2384{
2385 struct smp_irk *irk;
2386
6cfc9988
JH
2387 /* Identity Address must be public or static random */
2388 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2389 return NULL;
2390
adae20cb
JH
2391 rcu_read_lock();
2392 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2393 if (addr_type == irk->addr_type &&
adae20cb
JH
2394 bacmp(bdaddr, &irk->bdaddr) == 0) {
2395 rcu_read_unlock();
970c4e46 2396 return irk;
adae20cb 2397 }
970c4e46 2398 }
adae20cb 2399 rcu_read_unlock();
970c4e46
JH
2400
2401 return NULL;
2402}
2403
567fa2aa 2404struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2405 bdaddr_t *bdaddr, u8 *val, u8 type,
2406 u8 pin_len, bool *persistent)
55ed8ca1
JH
2407{
2408 struct link_key *key, *old_key;
745c0ce3 2409 u8 old_key_type;
55ed8ca1
JH
2410
2411 old_key = hci_find_link_key(hdev, bdaddr);
2412 if (old_key) {
2413 old_key_type = old_key->type;
2414 key = old_key;
2415 } else {
12adcf3a 2416 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2417 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2418 if (!key)
567fa2aa 2419 return NULL;
0378b597 2420 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2421 }
2422
6ed93dc6 2423 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2424
d25e28ab
JH
2425 /* Some buggy controller combinations generate a changed
2426 * combination key for legacy pairing even when there's no
2427 * previous key */
2428 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2429 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2430 type = HCI_LK_COMBINATION;
655fe6ec
JH
2431 if (conn)
2432 conn->key_type = type;
2433 }
d25e28ab 2434
55ed8ca1 2435 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2436 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2437 key->pin_len = pin_len;
2438
b6020ba0 2439 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2440 key->type = old_key_type;
4748fed2
JH
2441 else
2442 key->type = type;
2443
7652ff6a
JH
2444 if (persistent)
2445 *persistent = hci_persistent_key(hdev, conn, type,
2446 old_key_type);
4df378a1 2447
567fa2aa 2448 return key;
55ed8ca1
JH
2449}
2450
ca9142b8 2451struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2452 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2453 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2454{
c9839a11 2455 struct smp_ltk *key, *old_key;
e804d25d 2456 u8 role = ltk_role(type);
75d262c2 2457
f3a73d97 2458 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2459 if (old_key)
75d262c2 2460 key = old_key;
c9839a11 2461 else {
0a14ab41 2462 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2463 if (!key)
ca9142b8 2464 return NULL;
970d0f1b 2465 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2466 }
2467
75d262c2 2468 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2469 key->bdaddr_type = addr_type;
2470 memcpy(key->val, tk, sizeof(key->val));
2471 key->authenticated = authenticated;
2472 key->ediv = ediv;
fe39c7b2 2473 key->rand = rand;
c9839a11
VCG
2474 key->enc_size = enc_size;
2475 key->type = type;
75d262c2 2476
ca9142b8 2477 return key;
75d262c2
VCG
2478}
2479
ca9142b8
JH
2480struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2481 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2482{
2483 struct smp_irk *irk;
2484
2485 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2486 if (!irk) {
2487 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2488 if (!irk)
ca9142b8 2489 return NULL;
970c4e46
JH
2490
2491 bacpy(&irk->bdaddr, bdaddr);
2492 irk->addr_type = addr_type;
2493
adae20cb 2494 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2495 }
2496
2497 memcpy(irk->val, val, 16);
2498 bacpy(&irk->rpa, rpa);
2499
ca9142b8 2500 return irk;
970c4e46
JH
2501}
2502
55ed8ca1
JH
2503int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2504{
2505 struct link_key *key;
2506
2507 key = hci_find_link_key(hdev, bdaddr);
2508 if (!key)
2509 return -ENOENT;
2510
6ed93dc6 2511 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2512
0378b597
JH
2513 list_del_rcu(&key->list);
2514 kfree_rcu(key, rcu);
55ed8ca1
JH
2515
2516 return 0;
2517}
2518
e0b2b27e 2519int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2520{
970d0f1b 2521 struct smp_ltk *k;
c51ffa0b 2522 int removed = 0;
b899efaf 2523
970d0f1b 2524 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2525 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2526 continue;
2527
6ed93dc6 2528 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2529
970d0f1b
JH
2530 list_del_rcu(&k->list);
2531 kfree_rcu(k, rcu);
c51ffa0b 2532 removed++;
b899efaf
VCG
2533 }
2534
c51ffa0b 2535 return removed ? 0 : -ENOENT;
b899efaf
VCG
2536}
2537
a7ec7338
JH
2538void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2539{
adae20cb 2540 struct smp_irk *k;
a7ec7338 2541
adae20cb 2542 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2543 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2544 continue;
2545
2546 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2547
adae20cb
JH
2548 list_del_rcu(&k->list);
2549 kfree_rcu(k, rcu);
a7ec7338
JH
2550 }
2551}
2552
55e76b38
JH
2553bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2554{
2555 struct smp_ltk *k;
4ba9faf3 2556 struct smp_irk *irk;
55e76b38
JH
2557 u8 addr_type;
2558
2559 if (type == BDADDR_BREDR) {
2560 if (hci_find_link_key(hdev, bdaddr))
2561 return true;
2562 return false;
2563 }
2564
2565 /* Convert to HCI addr type which struct smp_ltk uses */
2566 if (type == BDADDR_LE_PUBLIC)
2567 addr_type = ADDR_LE_DEV_PUBLIC;
2568 else
2569 addr_type = ADDR_LE_DEV_RANDOM;
2570
4ba9faf3
JH
2571 irk = hci_get_irk(hdev, bdaddr, addr_type);
2572 if (irk) {
2573 bdaddr = &irk->bdaddr;
2574 addr_type = irk->addr_type;
2575 }
2576
55e76b38
JH
2577 rcu_read_lock();
2578 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2579 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2580 rcu_read_unlock();
55e76b38 2581 return true;
87c8b28d 2582 }
55e76b38
JH
2583 }
2584 rcu_read_unlock();
2585
2586 return false;
2587}
2588
6bd32326 2589/* HCI command timer function */
65cc2b49 2590static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2591{
65cc2b49
MH
2592 struct hci_dev *hdev = container_of(work, struct hci_dev,
2593 cmd_timer.work);
6bd32326 2594
bda4f23a
AE
2595 if (hdev->sent_cmd) {
2596 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2597 u16 opcode = __le16_to_cpu(sent->opcode);
2598
2599 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2600 } else {
2601 BT_ERR("%s command tx timeout", hdev->name);
2602 }
2603
6bd32326 2604 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2605 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2606}
2607
2763eda6 2608struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2609 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2610{
2611 struct oob_data *data;
2612
6928a924
JH
2613 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2614 if (bacmp(bdaddr, &data->bdaddr) != 0)
2615 continue;
2616 if (data->bdaddr_type != bdaddr_type)
2617 continue;
2618 return data;
2619 }
2763eda6
SJ
2620
2621 return NULL;
2622}
2623
6928a924
JH
2624int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2625 u8 bdaddr_type)
2763eda6
SJ
2626{
2627 struct oob_data *data;
2628
6928a924 2629 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2630 if (!data)
2631 return -ENOENT;
2632
6928a924 2633 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2634
2635 list_del(&data->list);
2636 kfree(data);
2637
2638 return 0;
2639}
2640
35f7498a 2641void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2642{
2643 struct oob_data *data, *n;
2644
2645 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2646 list_del(&data->list);
2647 kfree(data);
2648 }
2763eda6
SJ
2649}
2650
0798872e 2651int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2652 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2653 u8 *hash256, u8 *rand256)
2763eda6
SJ
2654{
2655 struct oob_data *data;
2656
6928a924 2657 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2658 if (!data) {
0a14ab41 2659 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2660 if (!data)
2661 return -ENOMEM;
2662
2663 bacpy(&data->bdaddr, bdaddr);
6928a924 2664 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2665 list_add(&data->list, &hdev->remote_oob_data);
2666 }
2667
81328d5c
JH
2668 if (hash192 && rand192) {
2669 memcpy(data->hash192, hash192, sizeof(data->hash192));
2670 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2671 if (hash256 && rand256)
2672 data->present = 0x03;
81328d5c
JH
2673 } else {
2674 memset(data->hash192, 0, sizeof(data->hash192));
2675 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2676 if (hash256 && rand256)
2677 data->present = 0x02;
2678 else
2679 data->present = 0x00;
0798872e
MH
2680 }
2681
81328d5c
JH
2682 if (hash256 && rand256) {
2683 memcpy(data->hash256, hash256, sizeof(data->hash256));
2684 memcpy(data->rand256, rand256, sizeof(data->rand256));
2685 } else {
2686 memset(data->hash256, 0, sizeof(data->hash256));
2687 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2688 if (hash192 && rand192)
2689 data->present = 0x01;
81328d5c 2690 }
0798872e 2691
6ed93dc6 2692 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2693
2694 return 0;
2695}
2696
d2609b34
FG
2697/* This function requires the caller holds hdev->lock */
2698struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2699{
2700 struct adv_info *adv_instance;
2701
2702 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2703 if (adv_instance->instance == instance)
2704 return adv_instance;
2705 }
2706
2707 return NULL;
2708}
2709
2710/* This function requires the caller holds hdev->lock */
2711struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2712 struct adv_info *cur_instance;
2713
2714 cur_instance = hci_find_adv_instance(hdev, instance);
2715 if (!cur_instance)
2716 return NULL;
2717
2718 if (cur_instance == list_last_entry(&hdev->adv_instances,
2719 struct adv_info, list))
2720 return list_first_entry(&hdev->adv_instances,
2721 struct adv_info, list);
2722 else
2723 return list_next_entry(cur_instance, list);
2724}
2725
2726/* This function requires the caller holds hdev->lock */
2727int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2728{
2729 struct adv_info *adv_instance;
2730
2731 adv_instance = hci_find_adv_instance(hdev, instance);
2732 if (!adv_instance)
2733 return -ENOENT;
2734
2735 BT_DBG("%s removing %dMR", hdev->name, instance);
2736
5d900e46
FG
2737 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2738 cancel_delayed_work(&hdev->adv_instance_expire);
2739 hdev->adv_instance_timeout = 0;
2740 }
2741
d2609b34
FG
2742 list_del(&adv_instance->list);
2743 kfree(adv_instance);
2744
2745 hdev->adv_instance_cnt--;
2746
2747 return 0;
2748}
2749
2750/* This function requires the caller holds hdev->lock */
2751void hci_adv_instances_clear(struct hci_dev *hdev)
2752{
2753 struct adv_info *adv_instance, *n;
2754
5d900e46
FG
2755 if (hdev->adv_instance_timeout) {
2756 cancel_delayed_work(&hdev->adv_instance_expire);
2757 hdev->adv_instance_timeout = 0;
2758 }
2759
d2609b34
FG
2760 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2761 list_del(&adv_instance->list);
2762 kfree(adv_instance);
2763 }
2764
2765 hdev->adv_instance_cnt = 0;
2766}
2767
2768/* This function requires the caller holds hdev->lock */
2769int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2770 u16 adv_data_len, u8 *adv_data,
2771 u16 scan_rsp_len, u8 *scan_rsp_data,
2772 u16 timeout, u16 duration)
2773{
2774 struct adv_info *adv_instance;
2775
2776 adv_instance = hci_find_adv_instance(hdev, instance);
2777 if (adv_instance) {
2778 memset(adv_instance->adv_data, 0,
2779 sizeof(adv_instance->adv_data));
2780 memset(adv_instance->scan_rsp_data, 0,
2781 sizeof(adv_instance->scan_rsp_data));
2782 } else {
2783 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2784 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2785 return -EOVERFLOW;
2786
39ecfad6 2787 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2788 if (!adv_instance)
2789 return -ENOMEM;
2790
fffd38bc 2791 adv_instance->pending = true;
d2609b34
FG
2792 adv_instance->instance = instance;
2793 list_add(&adv_instance->list, &hdev->adv_instances);
2794 hdev->adv_instance_cnt++;
2795 }
2796
2797 adv_instance->flags = flags;
2798 adv_instance->adv_data_len = adv_data_len;
2799 adv_instance->scan_rsp_len = scan_rsp_len;
2800
2801 if (adv_data_len)
2802 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2803
2804 if (scan_rsp_len)
2805 memcpy(adv_instance->scan_rsp_data,
2806 scan_rsp_data, scan_rsp_len);
2807
2808 adv_instance->timeout = timeout;
5d900e46 2809 adv_instance->remaining_time = timeout;
d2609b34
FG
2810
2811 if (duration == 0)
2812 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2813 else
2814 adv_instance->duration = duration;
2815
2816 BT_DBG("%s for %dMR", hdev->name, instance);
2817
2818 return 0;
2819}
2820
dcc36c16 2821struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2822 bdaddr_t *bdaddr, u8 type)
b2a66aad 2823{
8035ded4 2824 struct bdaddr_list *b;
b2a66aad 2825
dcc36c16 2826 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2827 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2828 return b;
b9ee0a78 2829 }
b2a66aad
AJ
2830
2831 return NULL;
2832}
2833
dcc36c16 2834void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
2835{
2836 struct list_head *p, *n;
2837
dcc36c16 2838 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 2839 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2840
2841 list_del(p);
2842 kfree(b);
2843 }
b2a66aad
AJ
2844}
2845
dcc36c16 2846int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2847{
2848 struct bdaddr_list *entry;
b2a66aad 2849
b9ee0a78 2850 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2851 return -EBADF;
2852
dcc36c16 2853 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2854 return -EEXIST;
b2a66aad 2855
27f70f3e 2856 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2857 if (!entry)
2858 return -ENOMEM;
b2a66aad
AJ
2859
2860 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2861 entry->bdaddr_type = type;
b2a66aad 2862
dcc36c16 2863 list_add(&entry->list, list);
b2a66aad 2864
2a8357f2 2865 return 0;
b2a66aad
AJ
2866}
2867
dcc36c16 2868int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2869{
2870 struct bdaddr_list *entry;
b2a66aad 2871
35f7498a 2872 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2873 hci_bdaddr_list_clear(list);
35f7498a
JH
2874 return 0;
2875 }
b2a66aad 2876
dcc36c16 2877 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2878 if (!entry)
2879 return -ENOENT;
2880
2881 list_del(&entry->list);
2882 kfree(entry);
2883
2884 return 0;
2885}
2886
15819a70
AG
2887/* This function requires the caller holds hdev->lock */
2888struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2889 bdaddr_t *addr, u8 addr_type)
2890{
2891 struct hci_conn_params *params;
2892
2893 list_for_each_entry(params, &hdev->le_conn_params, list) {
2894 if (bacmp(&params->addr, addr) == 0 &&
2895 params->addr_type == addr_type) {
2896 return params;
2897 }
2898 }
2899
2900 return NULL;
2901}
2902
4b10966f 2903/* This function requires the caller holds hdev->lock */
501f8827
JH
2904struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2905 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2906{
912b42ef 2907 struct hci_conn_params *param;
a9b0a04c 2908
501f8827 2909 list_for_each_entry(param, list, action) {
912b42ef
JH
2910 if (bacmp(&param->addr, addr) == 0 &&
2911 param->addr_type == addr_type)
2912 return param;
4b10966f
MH
2913 }
2914
2915 return NULL;
a9b0a04c
AG
2916}
2917
f75113a2
JP
2918/* This function requires the caller holds hdev->lock */
2919struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2920 bdaddr_t *addr,
2921 u8 addr_type)
2922{
2923 struct hci_conn_params *param;
2924
2925 list_for_each_entry(param, &hdev->pend_le_conns, action) {
2926 if (bacmp(&param->addr, addr) == 0 &&
2927 param->addr_type == addr_type &&
2928 param->explicit_connect)
2929 return param;
2930 }
2931
2932 list_for_each_entry(param, &hdev->pend_le_reports, action) {
2933 if (bacmp(&param->addr, addr) == 0 &&
2934 param->addr_type == addr_type &&
2935 param->explicit_connect)
2936 return param;
2937 }
2938
2939 return NULL;
2940}
2941
15819a70 2942/* This function requires the caller holds hdev->lock */
51d167c0
MH
2943struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2944 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2945{
2946 struct hci_conn_params *params;
2947
2948 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2949 if (params)
51d167c0 2950 return params;
15819a70
AG
2951
2952 params = kzalloc(sizeof(*params), GFP_KERNEL);
2953 if (!params) {
2954 BT_ERR("Out of memory");
51d167c0 2955 return NULL;
15819a70
AG
2956 }
2957
2958 bacpy(&params->addr, addr);
2959 params->addr_type = addr_type;
cef952ce
AG
2960
2961 list_add(&params->list, &hdev->le_conn_params);
93450c75 2962 INIT_LIST_HEAD(&params->action);
cef952ce 2963
bf5b3c8b
MH
2964 params->conn_min_interval = hdev->le_conn_min_interval;
2965 params->conn_max_interval = hdev->le_conn_max_interval;
2966 params->conn_latency = hdev->le_conn_latency;
2967 params->supervision_timeout = hdev->le_supv_timeout;
2968 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2969
2970 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2971
51d167c0 2972 return params;
bf5b3c8b
MH
2973}
2974
f6c63249 2975static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2976{
f8aaf9b6 2977 if (params->conn) {
f161dd41 2978 hci_conn_drop(params->conn);
f8aaf9b6
JH
2979 hci_conn_put(params->conn);
2980 }
f161dd41 2981
95305baa 2982 list_del(&params->action);
15819a70
AG
2983 list_del(&params->list);
2984 kfree(params);
f6c63249
JH
2985}
2986
2987/* This function requires the caller holds hdev->lock */
2988void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2989{
2990 struct hci_conn_params *params;
2991
2992 params = hci_conn_params_lookup(hdev, addr, addr_type);
2993 if (!params)
2994 return;
2995
2996 hci_conn_params_free(params);
15819a70 2997
95305baa
JH
2998 hci_update_background_scan(hdev);
2999
15819a70
AG
3000 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3001}
3002
3003/* This function requires the caller holds hdev->lock */
55af49a8 3004void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3005{
3006 struct hci_conn_params *params, *tmp;
3007
3008 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3009 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3010 continue;
f75113a2
JP
3011
3012 /* If trying to estabilish one time connection to disabled
3013 * device, leave the params, but mark them as just once.
3014 */
3015 if (params->explicit_connect) {
3016 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3017 continue;
3018 }
3019
15819a70
AG
3020 list_del(&params->list);
3021 kfree(params);
3022 }
3023
55af49a8 3024 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3025}
3026
3027/* This function requires the caller holds hdev->lock */
373110c5 3028void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3029{
15819a70 3030 struct hci_conn_params *params, *tmp;
77a77a30 3031
f6c63249
JH
3032 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3033 hci_conn_params_free(params);
77a77a30 3034
a4790dbd 3035 hci_update_background_scan(hdev);
77a77a30 3036
15819a70 3037 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3038}
3039
1904a853 3040static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7ba8b4be 3041{
4c87eaab
AG
3042 if (status) {
3043 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3044
4c87eaab
AG
3045 hci_dev_lock(hdev);
3046 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3047 hci_dev_unlock(hdev);
3048 return;
3049 }
7ba8b4be
AG
3050}
3051
1904a853
MH
3052static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3053 u16 opcode)
7ba8b4be 3054{
4c87eaab
AG
3055 /* General inquiry access code (GIAC) */
3056 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4c87eaab 3057 struct hci_cp_inquiry cp;
7ba8b4be
AG
3058 int err;
3059
4c87eaab
AG
3060 if (status) {
3061 BT_ERR("Failed to disable LE scanning: status %d", status);
3062 return;
3063 }
7ba8b4be 3064
2d28cfe7
JP
3065 hdev->discovery.scan_start = 0;
3066
4c87eaab
AG
3067 switch (hdev->discovery.type) {
3068 case DISCOV_TYPE_LE:
3069 hci_dev_lock(hdev);
3070 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3071 hci_dev_unlock(hdev);
3072 break;
7ba8b4be 3073
4c87eaab 3074 case DISCOV_TYPE_INTERLEAVED:
4c87eaab 3075 hci_dev_lock(hdev);
7dbfac1d 3076
07d2334a
JP
3077 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3078 &hdev->quirks)) {
3079 /* If we were running LE only scan, change discovery
3080 * state. If we were running both LE and BR/EDR inquiry
3081 * simultaneously, and BR/EDR inquiry is already
3082 * finished, stop discovery, otherwise BR/EDR inquiry
177d0506
WK
3083 * will stop discovery when finished. If we will resolve
3084 * remote device name, do not change discovery state.
07d2334a 3085 */
177d0506
WK
3086 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3087 hdev->discovery.state != DISCOVERY_RESOLVING)
07d2334a
JP
3088 hci_discovery_set_state(hdev,
3089 DISCOVERY_STOPPED);
3090 } else {
baf880a9
JH
3091 struct hci_request req;
3092
07d2334a
JP
3093 hci_inquiry_cache_flush(hdev);
3094
baf880a9
JH
3095 hci_req_init(&req, hdev);
3096
3097 memset(&cp, 0, sizeof(cp));
3098 memcpy(&cp.lap, lap, sizeof(cp.lap));
3099 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3100 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3101
07d2334a
JP
3102 err = hci_req_run(&req, inquiry_complete);
3103 if (err) {
3104 BT_ERR("Inquiry request failed: err %d", err);
3105 hci_discovery_set_state(hdev,
3106 DISCOVERY_STOPPED);
3107 }
4c87eaab 3108 }
7dbfac1d 3109
4c87eaab
AG
3110 hci_dev_unlock(hdev);
3111 break;
7dbfac1d 3112 }
7dbfac1d
AG
3113}
3114
7ba8b4be
AG
3115static void le_scan_disable_work(struct work_struct *work)
3116{
3117 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3118 le_scan_disable.work);
4c87eaab
AG
3119 struct hci_request req;
3120 int err;
7ba8b4be
AG
3121
3122 BT_DBG("%s", hdev->name);
3123
2d28cfe7
JP
3124 cancel_delayed_work_sync(&hdev->le_scan_restart);
3125
4c87eaab 3126 hci_req_init(&req, hdev);
28b75a89 3127
b1efcc28 3128 hci_req_add_le_scan_disable(&req);
28b75a89 3129
4c87eaab
AG
3130 err = hci_req_run(&req, le_scan_disable_work_complete);
3131 if (err)
3132 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3133}
3134
2d28cfe7
JP
3135static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3136 u16 opcode)
3137{
3138 unsigned long timeout, duration, scan_start, now;
3139
3140 BT_DBG("%s", hdev->name);
3141
3142 if (status) {
3143 BT_ERR("Failed to restart LE scan: status %d", status);
3144 return;
3145 }
3146
3147 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3148 !hdev->discovery.scan_start)
3149 return;
3150
3151 /* When the scan was started, hdev->le_scan_disable has been queued
3152 * after duration from scan_start. During scan restart this job
3153 * has been canceled, and we need to queue it again after proper
3154 * timeout, to make sure that scan does not run indefinitely.
3155 */
3156 duration = hdev->discovery.scan_duration;
3157 scan_start = hdev->discovery.scan_start;
3158 now = jiffies;
3159 if (now - scan_start <= duration) {
3160 int elapsed;
3161
3162 if (now >= scan_start)
3163 elapsed = now - scan_start;
3164 else
3165 elapsed = ULONG_MAX - scan_start + now;
3166
3167 timeout = duration - elapsed;
3168 } else {
3169 timeout = 0;
3170 }
3171 queue_delayed_work(hdev->workqueue,
3172 &hdev->le_scan_disable, timeout);
3173}
3174
3175static void le_scan_restart_work(struct work_struct *work)
3176{
3177 struct hci_dev *hdev = container_of(work, struct hci_dev,
3178 le_scan_restart.work);
3179 struct hci_request req;
3180 struct hci_cp_le_set_scan_enable cp;
3181 int err;
3182
3183 BT_DBG("%s", hdev->name);
3184
3185 /* If controller is not scanning we are done. */
d7a5a11d 3186 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2d28cfe7
JP
3187 return;
3188
3189 hci_req_init(&req, hdev);
3190
3191 hci_req_add_le_scan_disable(&req);
3192
3193 memset(&cp, 0, sizeof(cp));
3194 cp.enable = LE_SCAN_ENABLE;
3195 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3196 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3197
3198 err = hci_req_run(&req, le_scan_restart_work_complete);
3199 if (err)
3200 BT_ERR("Restart LE scan request failed: err %d", err);
3201}
3202
a1f4c318
JH
3203/* Copy the Identity Address of the controller.
3204 *
3205 * If the controller has a public BD_ADDR, then by default use that one.
3206 * If this is a LE only controller without a public address, default to
3207 * the static random address.
3208 *
3209 * For debugging purposes it is possible to force controllers with a
3210 * public address to use the static random address instead.
50b5b952
MH
3211 *
3212 * In case BR/EDR has been disabled on a dual-mode controller and
3213 * userspace has configured a static address, then that address
3214 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3215 */
3216void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3217 u8 *bdaddr_type)
3218{
b7cb93e5 3219 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3220 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3221 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3222 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3223 bacpy(bdaddr, &hdev->static_addr);
3224 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3225 } else {
3226 bacpy(bdaddr, &hdev->bdaddr);
3227 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3228 }
3229}
3230
9be0dab7
DH
3231/* Alloc HCI device */
3232struct hci_dev *hci_alloc_dev(void)
3233{
3234 struct hci_dev *hdev;
3235
27f70f3e 3236 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3237 if (!hdev)
3238 return NULL;
3239
b1b813d4
DH
3240 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3241 hdev->esco_type = (ESCO_HV1);
3242 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3243 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3244 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3245 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3246 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3247 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3248 hdev->adv_instance_cnt = 0;
3249 hdev->cur_adv_instance = 0x00;
5d900e46 3250 hdev->adv_instance_timeout = 0;
b1b813d4 3251
b1b813d4
DH
3252 hdev->sniff_max_interval = 800;
3253 hdev->sniff_min_interval = 80;
3254
3f959d46 3255 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3256 hdev->le_adv_min_interval = 0x0800;
3257 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3258 hdev->le_scan_interval = 0x0060;
3259 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3260 hdev->le_conn_min_interval = 0x0028;
3261 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3262 hdev->le_conn_latency = 0x0000;
3263 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3264 hdev->le_def_tx_len = 0x001b;
3265 hdev->le_def_tx_time = 0x0148;
3266 hdev->le_max_tx_len = 0x001b;
3267 hdev->le_max_tx_time = 0x0148;
3268 hdev->le_max_rx_len = 0x001b;
3269 hdev->le_max_rx_time = 0x0148;
bef64738 3270
d6bfd59c 3271 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3272 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3273 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3274 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3275
b1b813d4
DH
3276 mutex_init(&hdev->lock);
3277 mutex_init(&hdev->req_lock);
3278
3279 INIT_LIST_HEAD(&hdev->mgmt_pending);
3280 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3281 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3282 INIT_LIST_HEAD(&hdev->uuids);
3283 INIT_LIST_HEAD(&hdev->link_keys);
3284 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3285 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3286 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3287 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3288 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3289 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3290 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3291 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3292 INIT_LIST_HEAD(&hdev->adv_instances);
b1b813d4
DH
3293
3294 INIT_WORK(&hdev->rx_work, hci_rx_work);
3295 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3296 INIT_WORK(&hdev->tx_work, hci_tx_work);
3297 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3298 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3299
b1b813d4
DH
3300 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3301 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3302 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2d28cfe7 3303 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
5d900e46 3304 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
b1b813d4 3305
b1b813d4
DH
3306 skb_queue_head_init(&hdev->rx_q);
3307 skb_queue_head_init(&hdev->cmd_q);
3308 skb_queue_head_init(&hdev->raw_q);
3309
3310 init_waitqueue_head(&hdev->req_wait_q);
3311
65cc2b49 3312 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3313
b1b813d4
DH
3314 hci_init_sysfs(hdev);
3315 discovery_init(hdev);
9be0dab7
DH
3316
3317 return hdev;
3318}
3319EXPORT_SYMBOL(hci_alloc_dev);
3320
3321/* Free HCI device */
3322void hci_free_dev(struct hci_dev *hdev)
3323{
9be0dab7
DH
3324 /* will free via device release */
3325 put_device(&hdev->dev);
3326}
3327EXPORT_SYMBOL(hci_free_dev);
3328
1da177e4
LT
3329/* Register HCI device */
3330int hci_register_dev(struct hci_dev *hdev)
3331{
b1b813d4 3332 int id, error;
1da177e4 3333
74292d5a 3334 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3335 return -EINVAL;
3336
08add513
MM
3337 /* Do not allow HCI_AMP devices to register at index 0,
3338 * so the index can be used as the AMP controller ID.
3339 */
3df92b31
SL
3340 switch (hdev->dev_type) {
3341 case HCI_BREDR:
3342 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3343 break;
3344 case HCI_AMP:
3345 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3346 break;
3347 default:
3348 return -EINVAL;
1da177e4 3349 }
8e87d142 3350
3df92b31
SL
3351 if (id < 0)
3352 return id;
3353
1da177e4
LT
3354 sprintf(hdev->name, "hci%d", id);
3355 hdev->id = id;
2d8b3a11
AE
3356
3357 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3358
d8537548
KC
3359 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3360 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3361 if (!hdev->workqueue) {
3362 error = -ENOMEM;
3363 goto err;
3364 }
f48fd9c8 3365
d8537548
KC
3366 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3367 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3368 if (!hdev->req_workqueue) {
3369 destroy_workqueue(hdev->workqueue);
3370 error = -ENOMEM;
3371 goto err;
3372 }
3373
0153e2ec
MH
3374 if (!IS_ERR_OR_NULL(bt_debugfs))
3375 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3376
bdc3e0f1
MH
3377 dev_set_name(&hdev->dev, "%s", hdev->name);
3378
3379 error = device_add(&hdev->dev);
33ca954d 3380 if (error < 0)
54506918 3381 goto err_wqueue;
1da177e4 3382
611b30f7 3383 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3384 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3385 hdev);
611b30f7
MH
3386 if (hdev->rfkill) {
3387 if (rfkill_register(hdev->rfkill) < 0) {
3388 rfkill_destroy(hdev->rfkill);
3389 hdev->rfkill = NULL;
3390 }
3391 }
3392
5e130367 3393 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3394 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3395
a1536da2
MH
3396 hci_dev_set_flag(hdev, HCI_SETUP);
3397 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3398
01cd3404 3399 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3400 /* Assume BR/EDR support until proven otherwise (such as
3401 * through reading supported features during init.
3402 */
a1536da2 3403 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3404 }
ce2be9ac 3405
fcee3377
GP
3406 write_lock(&hci_dev_list_lock);
3407 list_add(&hdev->list, &hci_dev_list);
3408 write_unlock(&hci_dev_list_lock);
3409
4a964404
MH
3410 /* Devices that are marked for raw-only usage are unconfigured
3411 * and should not be included in normal operation.
fee746b0
MH
3412 */
3413 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3414 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3415
1da177e4 3416 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3417 hci_dev_hold(hdev);
1da177e4 3418
19202573 3419 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3420
1da177e4 3421 return id;
f48fd9c8 3422
33ca954d
DH
3423err_wqueue:
3424 destroy_workqueue(hdev->workqueue);
6ead1bbc 3425 destroy_workqueue(hdev->req_workqueue);
33ca954d 3426err:
3df92b31 3427 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3428
33ca954d 3429 return error;
1da177e4
LT
3430}
3431EXPORT_SYMBOL(hci_register_dev);
3432
3433/* Unregister HCI device */
59735631 3434void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3435{
2d7cc19e 3436 int id;
ef222013 3437
c13854ce 3438 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3439
a1536da2 3440 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3441
3df92b31
SL
3442 id = hdev->id;
3443
f20d09d5 3444 write_lock(&hci_dev_list_lock);
1da177e4 3445 list_del(&hdev->list);
f20d09d5 3446 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3447
3448 hci_dev_do_close(hdev);
3449
b9b5ef18
GP
3450 cancel_work_sync(&hdev->power_on);
3451
ab81cbf9 3452 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3453 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3454 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3455 hci_dev_lock(hdev);
744cf19e 3456 mgmt_index_removed(hdev);
09fd0de5 3457 hci_dev_unlock(hdev);
56e5cb86 3458 }
ab81cbf9 3459
2e58ef3e
JH
3460 /* mgmt_index_removed should take care of emptying the
3461 * pending list */
3462 BUG_ON(!list_empty(&hdev->mgmt_pending));
3463
1da177e4
LT
3464 hci_notify(hdev, HCI_DEV_UNREG);
3465
611b30f7
MH
3466 if (hdev->rfkill) {
3467 rfkill_unregister(hdev->rfkill);
3468 rfkill_destroy(hdev->rfkill);
3469 }
3470
bdc3e0f1 3471 device_del(&hdev->dev);
147e2d59 3472
0153e2ec
MH
3473 debugfs_remove_recursive(hdev->debugfs);
3474
f48fd9c8 3475 destroy_workqueue(hdev->workqueue);
6ead1bbc 3476 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3477
09fd0de5 3478 hci_dev_lock(hdev);
dcc36c16 3479 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3480 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3481 hci_uuids_clear(hdev);
55ed8ca1 3482 hci_link_keys_clear(hdev);
b899efaf 3483 hci_smp_ltks_clear(hdev);
970c4e46 3484 hci_smp_irks_clear(hdev);
2763eda6 3485 hci_remote_oob_data_clear(hdev);
d2609b34 3486 hci_adv_instances_clear(hdev);
dcc36c16 3487 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3488 hci_conn_params_clear_all(hdev);
22078800 3489 hci_discovery_filter_clear(hdev);
09fd0de5 3490 hci_dev_unlock(hdev);
e2e0cacb 3491
dc946bd8 3492 hci_dev_put(hdev);
3df92b31
SL
3493
3494 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3495}
3496EXPORT_SYMBOL(hci_unregister_dev);
3497
3498/* Suspend HCI device */
3499int hci_suspend_dev(struct hci_dev *hdev)
3500{
3501 hci_notify(hdev, HCI_DEV_SUSPEND);
3502 return 0;
3503}
3504EXPORT_SYMBOL(hci_suspend_dev);
3505
3506/* Resume HCI device */
3507int hci_resume_dev(struct hci_dev *hdev)
3508{
3509 hci_notify(hdev, HCI_DEV_RESUME);
3510 return 0;
3511}
3512EXPORT_SYMBOL(hci_resume_dev);
3513
75e0569f
MH
3514/* Reset HCI device */
3515int hci_reset_dev(struct hci_dev *hdev)
3516{
3517 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3518 struct sk_buff *skb;
3519
3520 skb = bt_skb_alloc(3, GFP_ATOMIC);
3521 if (!skb)
3522 return -ENOMEM;
3523
3524 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3525 memcpy(skb_put(skb, 3), hw_err, 3);
3526
3527 /* Send Hardware Error to upper stack */
3528 return hci_recv_frame(hdev, skb);
3529}
3530EXPORT_SYMBOL(hci_reset_dev);
3531
76bca880 3532/* Receive frame from HCI drivers */
e1a26170 3533int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3534{
76bca880 3535 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3536 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3537 kfree_skb(skb);
3538 return -ENXIO;
3539 }
3540
fe806dce
MH
3541 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3542 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3543 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3544 kfree_skb(skb);
3545 return -EINVAL;
3546 }
3547
d82603c6 3548 /* Incoming skb */
76bca880
MH
3549 bt_cb(skb)->incoming = 1;
3550
3551 /* Time stamp */
3552 __net_timestamp(skb);
3553
76bca880 3554 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3555 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3556
76bca880
MH
3557 return 0;
3558}
3559EXPORT_SYMBOL(hci_recv_frame);
3560
e875ff84
MH
3561/* Receive diagnostic message from HCI drivers */
3562int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3563{
3564 /* Time stamp */
3565 __net_timestamp(skb);
3566
3567 /* Mark as diagnostic packet and send to monitor */
3568 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3569 hci_send_to_monitor(hdev, skb);
3570
3571 kfree_skb(skb);
3572 return 0;
3573}
3574EXPORT_SYMBOL(hci_recv_diag);
3575
1da177e4
LT
3576/* ---- Interface to upper protocols ---- */
3577
1da177e4
LT
3578int hci_register_cb(struct hci_cb *cb)
3579{
3580 BT_DBG("%p name %s", cb, cb->name);
3581
fba7ecf0 3582 mutex_lock(&hci_cb_list_lock);
00629e0f 3583 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3584 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3585
3586 return 0;
3587}
3588EXPORT_SYMBOL(hci_register_cb);
3589
3590int hci_unregister_cb(struct hci_cb *cb)
3591{
3592 BT_DBG("%p name %s", cb, cb->name);
3593
fba7ecf0 3594 mutex_lock(&hci_cb_list_lock);
1da177e4 3595 list_del(&cb->list);
fba7ecf0 3596 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3597
3598 return 0;
3599}
3600EXPORT_SYMBOL(hci_unregister_cb);
3601
51086991 3602static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3603{
cdc52faa
MH
3604 int err;
3605
0d48d939 3606 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3607
cd82e61c
MH
3608 /* Time stamp */
3609 __net_timestamp(skb);
1da177e4 3610
cd82e61c
MH
3611 /* Send copy to monitor */
3612 hci_send_to_monitor(hdev, skb);
3613
3614 if (atomic_read(&hdev->promisc)) {
3615 /* Send copy to the sockets */
470fe1b5 3616 hci_send_to_sock(hdev, skb);
1da177e4
LT
3617 }
3618
3619 /* Get rid of skb owner, prior to sending to the driver. */
3620 skb_orphan(skb);
3621
73d0d3c8
MH
3622 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3623 kfree_skb(skb);
3624 return;
3625 }
3626
cdc52faa
MH
3627 err = hdev->send(hdev, skb);
3628 if (err < 0) {
3629 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3630 kfree_skb(skb);
3631 }
1da177e4
LT
3632}
3633
1ca3a9d0 3634/* Send HCI command */
07dc93dd
JH
3635int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3636 const void *param)
1ca3a9d0
JH
3637{
3638 struct sk_buff *skb;
3639
3640 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3641
3642 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3643 if (!skb) {
3644 BT_ERR("%s no memory for command", hdev->name);
3645 return -ENOMEM;
3646 }
3647
49c922bb 3648 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3649 * single-command requests.
3650 */
db6e3e8d 3651 bt_cb(skb)->req.start = true;
11714b3d 3652
1da177e4 3653 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3654 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3655
3656 return 0;
3657}
1da177e4
LT
3658
3659/* Get data from the previously sent command */
a9de9248 3660void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3661{
3662 struct hci_command_hdr *hdr;
3663
3664 if (!hdev->sent_cmd)
3665 return NULL;
3666
3667 hdr = (void *) hdev->sent_cmd->data;
3668
a9de9248 3669 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3670 return NULL;
3671
f0e09510 3672 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3673
3674 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3675}
3676
fbef168f
LP
3677/* Send HCI command and wait for command commplete event */
3678struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3679 const void *param, u32 timeout)
3680{
3681 struct sk_buff *skb;
3682
3683 if (!test_bit(HCI_UP, &hdev->flags))
3684 return ERR_PTR(-ENETDOWN);
3685
3686 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3687
3688 hci_req_lock(hdev);
3689 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3690 hci_req_unlock(hdev);
3691
3692 return skb;
3693}
3694EXPORT_SYMBOL(hci_cmd_sync);
3695
1da177e4
LT
3696/* Send ACL data */
3697static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3698{
3699 struct hci_acl_hdr *hdr;
3700 int len = skb->len;
3701
badff6d0
ACM
3702 skb_push(skb, HCI_ACL_HDR_SIZE);
3703 skb_reset_transport_header(skb);
9c70220b 3704 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3705 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3706 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3707}
3708
ee22be7e 3709static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3710 struct sk_buff *skb, __u16 flags)
1da177e4 3711{
ee22be7e 3712 struct hci_conn *conn = chan->conn;
1da177e4
LT
3713 struct hci_dev *hdev = conn->hdev;
3714 struct sk_buff *list;
3715
087bfd99
GP
3716 skb->len = skb_headlen(skb);
3717 skb->data_len = 0;
3718
3719 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3720
3721 switch (hdev->dev_type) {
3722 case HCI_BREDR:
3723 hci_add_acl_hdr(skb, conn->handle, flags);
3724 break;
3725 case HCI_AMP:
3726 hci_add_acl_hdr(skb, chan->handle, flags);
3727 break;
3728 default:
3729 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3730 return;
3731 }
087bfd99 3732
70f23020
AE
3733 list = skb_shinfo(skb)->frag_list;
3734 if (!list) {
1da177e4
LT
3735 /* Non fragmented */
3736 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3737
73d80deb 3738 skb_queue_tail(queue, skb);
1da177e4
LT
3739 } else {
3740 /* Fragmented */
3741 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3742
3743 skb_shinfo(skb)->frag_list = NULL;
3744
9cfd5a23
JR
3745 /* Queue all fragments atomically. We need to use spin_lock_bh
3746 * here because of 6LoWPAN links, as there this function is
3747 * called from softirq and using normal spin lock could cause
3748 * deadlocks.
3749 */
3750 spin_lock_bh(&queue->lock);
1da177e4 3751
73d80deb 3752 __skb_queue_tail(queue, skb);
e702112f
AE
3753
3754 flags &= ~ACL_START;
3755 flags |= ACL_CONT;
1da177e4
LT
3756 do {
3757 skb = list; list = list->next;
8e87d142 3758
0d48d939 3759 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3760 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3761
3762 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3763
73d80deb 3764 __skb_queue_tail(queue, skb);
1da177e4
LT
3765 } while (list);
3766
9cfd5a23 3767 spin_unlock_bh(&queue->lock);
1da177e4 3768 }
73d80deb
LAD
3769}
3770
3771void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3772{
ee22be7e 3773 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3774
f0e09510 3775 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3776
ee22be7e 3777 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3778
3eff45ea 3779 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3780}
1da177e4
LT
3781
3782/* Send SCO data */
0d861d8b 3783void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3784{
3785 struct hci_dev *hdev = conn->hdev;
3786 struct hci_sco_hdr hdr;
3787
3788 BT_DBG("%s len %d", hdev->name, skb->len);
3789
aca3192c 3790 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3791 hdr.dlen = skb->len;
3792
badff6d0
ACM
3793 skb_push(skb, HCI_SCO_HDR_SIZE);
3794 skb_reset_transport_header(skb);
9c70220b 3795 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3796
0d48d939 3797 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3798
1da177e4 3799 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3800 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3801}
1da177e4
LT
3802
3803/* ---- HCI TX task (outgoing data) ---- */
3804
3805/* HCI Connection scheduler */
6039aa73
GP
3806static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3807 int *quote)
1da177e4
LT
3808{
3809 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3810 struct hci_conn *conn = NULL, *c;
abc5de8f 3811 unsigned int num = 0, min = ~0;
1da177e4 3812
8e87d142 3813 /* We don't have to lock device here. Connections are always
1da177e4 3814 * added and removed with TX task disabled. */
bf4c6325
GP
3815
3816 rcu_read_lock();
3817
3818 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3819 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3820 continue;
769be974
MH
3821
3822 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3823 continue;
3824
1da177e4
LT
3825 num++;
3826
3827 if (c->sent < min) {
3828 min = c->sent;
3829 conn = c;
3830 }
52087a79
LAD
3831
3832 if (hci_conn_num(hdev, type) == num)
3833 break;
1da177e4
LT
3834 }
3835
bf4c6325
GP
3836 rcu_read_unlock();
3837
1da177e4 3838 if (conn) {
6ed58ec5
VT
3839 int cnt, q;
3840
3841 switch (conn->type) {
3842 case ACL_LINK:
3843 cnt = hdev->acl_cnt;
3844 break;
3845 case SCO_LINK:
3846 case ESCO_LINK:
3847 cnt = hdev->sco_cnt;
3848 break;
3849 case LE_LINK:
3850 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3851 break;
3852 default:
3853 cnt = 0;
3854 BT_ERR("Unknown link type");
3855 }
3856
3857 q = cnt / num;
1da177e4
LT
3858 *quote = q ? q : 1;
3859 } else
3860 *quote = 0;
3861
3862 BT_DBG("conn %p quote %d", conn, *quote);
3863 return conn;
3864}
3865
6039aa73 3866static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3867{
3868 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3869 struct hci_conn *c;
1da177e4 3870
bae1f5d9 3871 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3872
bf4c6325
GP
3873 rcu_read_lock();
3874
1da177e4 3875 /* Kill stalled connections */
bf4c6325 3876 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3877 if (c->type == type && c->sent) {
6ed93dc6
AE
3878 BT_ERR("%s killing stalled connection %pMR",
3879 hdev->name, &c->dst);
bed71748 3880 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3881 }
3882 }
bf4c6325
GP
3883
3884 rcu_read_unlock();
1da177e4
LT
3885}
3886
6039aa73
GP
3887static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3888 int *quote)
1da177e4 3889{
73d80deb
LAD
3890 struct hci_conn_hash *h = &hdev->conn_hash;
3891 struct hci_chan *chan = NULL;
abc5de8f 3892 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3893 struct hci_conn *conn;
73d80deb
LAD
3894 int cnt, q, conn_num = 0;
3895
3896 BT_DBG("%s", hdev->name);
3897
bf4c6325
GP
3898 rcu_read_lock();
3899
3900 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3901 struct hci_chan *tmp;
3902
3903 if (conn->type != type)
3904 continue;
3905
3906 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3907 continue;
3908
3909 conn_num++;
3910
8192edef 3911 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3912 struct sk_buff *skb;
3913
3914 if (skb_queue_empty(&tmp->data_q))
3915 continue;
3916
3917 skb = skb_peek(&tmp->data_q);
3918 if (skb->priority < cur_prio)
3919 continue;
3920
3921 if (skb->priority > cur_prio) {
3922 num = 0;
3923 min = ~0;
3924 cur_prio = skb->priority;
3925 }
3926
3927 num++;
3928
3929 if (conn->sent < min) {
3930 min = conn->sent;
3931 chan = tmp;
3932 }
3933 }
3934
3935 if (hci_conn_num(hdev, type) == conn_num)
3936 break;
3937 }
3938
bf4c6325
GP
3939 rcu_read_unlock();
3940
73d80deb
LAD
3941 if (!chan)
3942 return NULL;
3943
3944 switch (chan->conn->type) {
3945 case ACL_LINK:
3946 cnt = hdev->acl_cnt;
3947 break;
bd1eb66b
AE
3948 case AMP_LINK:
3949 cnt = hdev->block_cnt;
3950 break;
73d80deb
LAD
3951 case SCO_LINK:
3952 case ESCO_LINK:
3953 cnt = hdev->sco_cnt;
3954 break;
3955 case LE_LINK:
3956 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3957 break;
3958 default:
3959 cnt = 0;
3960 BT_ERR("Unknown link type");
3961 }
3962
3963 q = cnt / num;
3964 *quote = q ? q : 1;
3965 BT_DBG("chan %p quote %d", chan, *quote);
3966 return chan;
3967}
3968
02b20f0b
LAD
3969static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3970{
3971 struct hci_conn_hash *h = &hdev->conn_hash;
3972 struct hci_conn *conn;
3973 int num = 0;
3974
3975 BT_DBG("%s", hdev->name);
3976
bf4c6325
GP
3977 rcu_read_lock();
3978
3979 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3980 struct hci_chan *chan;
3981
3982 if (conn->type != type)
3983 continue;
3984
3985 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3986 continue;
3987
3988 num++;
3989
8192edef 3990 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3991 struct sk_buff *skb;
3992
3993 if (chan->sent) {
3994 chan->sent = 0;
3995 continue;
3996 }
3997
3998 if (skb_queue_empty(&chan->data_q))
3999 continue;
4000
4001 skb = skb_peek(&chan->data_q);
4002 if (skb->priority >= HCI_PRIO_MAX - 1)
4003 continue;
4004
4005 skb->priority = HCI_PRIO_MAX - 1;
4006
4007 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4008 skb->priority);
02b20f0b
LAD
4009 }
4010
4011 if (hci_conn_num(hdev, type) == num)
4012 break;
4013 }
bf4c6325
GP
4014
4015 rcu_read_unlock();
4016
02b20f0b
LAD
4017}
4018
b71d385a
AE
4019static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4020{
4021 /* Calculate count of blocks used by this packet */
4022 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4023}
4024
6039aa73 4025static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4026{
d7a5a11d 4027 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
4028 /* ACL tx timeout must be longer than maximum
4029 * link supervision timeout (40.9 seconds) */
63d2bc1b 4030 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4031 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4032 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4033 }
63d2bc1b 4034}
1da177e4 4035
6039aa73 4036static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4037{
4038 unsigned int cnt = hdev->acl_cnt;
4039 struct hci_chan *chan;
4040 struct sk_buff *skb;
4041 int quote;
4042
4043 __check_timeout(hdev, cnt);
04837f64 4044
73d80deb 4045 while (hdev->acl_cnt &&
a8c5fb1a 4046 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4047 u32 priority = (skb_peek(&chan->data_q))->priority;
4048 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4049 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4050 skb->len, skb->priority);
73d80deb 4051
ec1cce24
LAD
4052 /* Stop if priority has changed */
4053 if (skb->priority < priority)
4054 break;
4055
4056 skb = skb_dequeue(&chan->data_q);
4057
73d80deb 4058 hci_conn_enter_active_mode(chan->conn,
04124681 4059 bt_cb(skb)->force_active);
04837f64 4060
57d17d70 4061 hci_send_frame(hdev, skb);
1da177e4
LT
4062 hdev->acl_last_tx = jiffies;
4063
4064 hdev->acl_cnt--;
73d80deb
LAD
4065 chan->sent++;
4066 chan->conn->sent++;
1da177e4
LT
4067 }
4068 }
02b20f0b
LAD
4069
4070 if (cnt != hdev->acl_cnt)
4071 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4072}
4073
6039aa73 4074static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4075{
63d2bc1b 4076 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4077 struct hci_chan *chan;
4078 struct sk_buff *skb;
4079 int quote;
bd1eb66b 4080 u8 type;
b71d385a 4081
63d2bc1b 4082 __check_timeout(hdev, cnt);
b71d385a 4083
bd1eb66b
AE
4084 BT_DBG("%s", hdev->name);
4085
4086 if (hdev->dev_type == HCI_AMP)
4087 type = AMP_LINK;
4088 else
4089 type = ACL_LINK;
4090
b71d385a 4091 while (hdev->block_cnt > 0 &&
bd1eb66b 4092 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4093 u32 priority = (skb_peek(&chan->data_q))->priority;
4094 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4095 int blocks;
4096
4097 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4098 skb->len, skb->priority);
b71d385a
AE
4099
4100 /* Stop if priority has changed */
4101 if (skb->priority < priority)
4102 break;
4103
4104 skb = skb_dequeue(&chan->data_q);
4105
4106 blocks = __get_blocks(hdev, skb);
4107 if (blocks > hdev->block_cnt)
4108 return;
4109
4110 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4111 bt_cb(skb)->force_active);
b71d385a 4112
57d17d70 4113 hci_send_frame(hdev, skb);
b71d385a
AE
4114 hdev->acl_last_tx = jiffies;
4115
4116 hdev->block_cnt -= blocks;
4117 quote -= blocks;
4118
4119 chan->sent += blocks;
4120 chan->conn->sent += blocks;
4121 }
4122 }
4123
4124 if (cnt != hdev->block_cnt)
bd1eb66b 4125 hci_prio_recalculate(hdev, type);
b71d385a
AE
4126}
4127
6039aa73 4128static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4129{
4130 BT_DBG("%s", hdev->name);
4131
bd1eb66b
AE
4132 /* No ACL link over BR/EDR controller */
4133 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4134 return;
4135
4136 /* No AMP link over AMP controller */
4137 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4138 return;
4139
4140 switch (hdev->flow_ctl_mode) {
4141 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4142 hci_sched_acl_pkt(hdev);
4143 break;
4144
4145 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4146 hci_sched_acl_blk(hdev);
4147 break;
4148 }
4149}
4150
1da177e4 4151/* Schedule SCO */
6039aa73 4152static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4153{
4154 struct hci_conn *conn;
4155 struct sk_buff *skb;
4156 int quote;
4157
4158 BT_DBG("%s", hdev->name);
4159
52087a79
LAD
4160 if (!hci_conn_num(hdev, SCO_LINK))
4161 return;
4162
1da177e4
LT
4163 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4164 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4165 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4166 hci_send_frame(hdev, skb);
1da177e4
LT
4167
4168 conn->sent++;
4169 if (conn->sent == ~0)
4170 conn->sent = 0;
4171 }
4172 }
4173}
4174
6039aa73 4175static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4176{
4177 struct hci_conn *conn;
4178 struct sk_buff *skb;
4179 int quote;
4180
4181 BT_DBG("%s", hdev->name);
4182
52087a79
LAD
4183 if (!hci_conn_num(hdev, ESCO_LINK))
4184 return;
4185
8fc9ced3
GP
4186 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4187 &quote))) {
b6a0dc82
MH
4188 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4189 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4190 hci_send_frame(hdev, skb);
b6a0dc82
MH
4191
4192 conn->sent++;
4193 if (conn->sent == ~0)
4194 conn->sent = 0;
4195 }
4196 }
4197}
4198
6039aa73 4199static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4200{
73d80deb 4201 struct hci_chan *chan;
6ed58ec5 4202 struct sk_buff *skb;
02b20f0b 4203 int quote, cnt, tmp;
6ed58ec5
VT
4204
4205 BT_DBG("%s", hdev->name);
4206
52087a79
LAD
4207 if (!hci_conn_num(hdev, LE_LINK))
4208 return;
4209
d7a5a11d 4210 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
4211 /* LE tx timeout must be longer than maximum
4212 * link supervision timeout (40.9 seconds) */
bae1f5d9 4213 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4214 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4215 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4216 }
4217
4218 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4219 tmp = cnt;
73d80deb 4220 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4221 u32 priority = (skb_peek(&chan->data_q))->priority;
4222 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4223 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4224 skb->len, skb->priority);
6ed58ec5 4225
ec1cce24
LAD
4226 /* Stop if priority has changed */
4227 if (skb->priority < priority)
4228 break;
4229
4230 skb = skb_dequeue(&chan->data_q);
4231
57d17d70 4232 hci_send_frame(hdev, skb);
6ed58ec5
VT
4233 hdev->le_last_tx = jiffies;
4234
4235 cnt--;
73d80deb
LAD
4236 chan->sent++;
4237 chan->conn->sent++;
6ed58ec5
VT
4238 }
4239 }
73d80deb 4240
6ed58ec5
VT
4241 if (hdev->le_pkts)
4242 hdev->le_cnt = cnt;
4243 else
4244 hdev->acl_cnt = cnt;
02b20f0b
LAD
4245
4246 if (cnt != tmp)
4247 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4248}
4249
3eff45ea 4250static void hci_tx_work(struct work_struct *work)
1da177e4 4251{
3eff45ea 4252 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4253 struct sk_buff *skb;
4254
6ed58ec5 4255 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4256 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4257
d7a5a11d 4258 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
4259 /* Schedule queues and send stuff to HCI driver */
4260 hci_sched_acl(hdev);
4261 hci_sched_sco(hdev);
4262 hci_sched_esco(hdev);
4263 hci_sched_le(hdev);
4264 }
6ed58ec5 4265
1da177e4
LT
4266 /* Send next queued raw (unknown type) packet */
4267 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4268 hci_send_frame(hdev, skb);
1da177e4
LT
4269}
4270
25985edc 4271/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4272
4273/* ACL data packet */
6039aa73 4274static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4275{
4276 struct hci_acl_hdr *hdr = (void *) skb->data;
4277 struct hci_conn *conn;
4278 __u16 handle, flags;
4279
4280 skb_pull(skb, HCI_ACL_HDR_SIZE);
4281
4282 handle = __le16_to_cpu(hdr->handle);
4283 flags = hci_flags(handle);
4284 handle = hci_handle(handle);
4285
f0e09510 4286 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4287 handle, flags);
1da177e4
LT
4288
4289 hdev->stat.acl_rx++;
4290
4291 hci_dev_lock(hdev);
4292 conn = hci_conn_hash_lookup_handle(hdev, handle);
4293 hci_dev_unlock(hdev);
8e87d142 4294
1da177e4 4295 if (conn) {
65983fc7 4296 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4297
1da177e4 4298 /* Send to upper protocol */
686ebf28
UF
4299 l2cap_recv_acldata(conn, skb, flags);
4300 return;
1da177e4 4301 } else {
8e87d142 4302 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4303 hdev->name, handle);
1da177e4
LT
4304 }
4305
4306 kfree_skb(skb);
4307}
4308
4309/* SCO data packet */
6039aa73 4310static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4311{
4312 struct hci_sco_hdr *hdr = (void *) skb->data;
4313 struct hci_conn *conn;
4314 __u16 handle;
4315
4316 skb_pull(skb, HCI_SCO_HDR_SIZE);
4317
4318 handle = __le16_to_cpu(hdr->handle);
4319
f0e09510 4320 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4321
4322 hdev->stat.sco_rx++;
4323
4324 hci_dev_lock(hdev);
4325 conn = hci_conn_hash_lookup_handle(hdev, handle);
4326 hci_dev_unlock(hdev);
4327
4328 if (conn) {
1da177e4 4329 /* Send to upper protocol */
686ebf28
UF
4330 sco_recv_scodata(conn, skb);
4331 return;
1da177e4 4332 } else {
8e87d142 4333 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4334 hdev->name, handle);
1da177e4
LT
4335 }
4336
4337 kfree_skb(skb);
4338}
4339
9238f36a
JH
4340static bool hci_req_is_complete(struct hci_dev *hdev)
4341{
4342 struct sk_buff *skb;
4343
4344 skb = skb_peek(&hdev->cmd_q);
4345 if (!skb)
4346 return true;
4347
db6e3e8d 4348 return bt_cb(skb)->req.start;
9238f36a
JH
4349}
4350
42c6b129
JH
4351static void hci_resend_last(struct hci_dev *hdev)
4352{
4353 struct hci_command_hdr *sent;
4354 struct sk_buff *skb;
4355 u16 opcode;
4356
4357 if (!hdev->sent_cmd)
4358 return;
4359
4360 sent = (void *) hdev->sent_cmd->data;
4361 opcode = __le16_to_cpu(sent->opcode);
4362 if (opcode == HCI_OP_RESET)
4363 return;
4364
4365 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4366 if (!skb)
4367 return;
4368
4369 skb_queue_head(&hdev->cmd_q, skb);
4370 queue_work(hdev->workqueue, &hdev->cmd_work);
4371}
4372
e6214487
JH
4373void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4374 hci_req_complete_t *req_complete,
4375 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4376{
9238f36a
JH
4377 struct sk_buff *skb;
4378 unsigned long flags;
4379
4380 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4381
42c6b129
JH
4382 /* If the completed command doesn't match the last one that was
4383 * sent we need to do special handling of it.
9238f36a 4384 */
42c6b129
JH
4385 if (!hci_sent_cmd_data(hdev, opcode)) {
4386 /* Some CSR based controllers generate a spontaneous
4387 * reset complete event during init and any pending
4388 * command will never be completed. In such a case we
4389 * need to resend whatever was the last sent
4390 * command.
4391 */
4392 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4393 hci_resend_last(hdev);
4394
9238f36a 4395 return;
42c6b129 4396 }
9238f36a
JH
4397
4398 /* If the command succeeded and there's still more commands in
4399 * this request the request is not yet complete.
4400 */
4401 if (!status && !hci_req_is_complete(hdev))
4402 return;
4403
4404 /* If this was the last command in a request the complete
4405 * callback would be found in hdev->sent_cmd instead of the
4406 * command queue (hdev->cmd_q).
4407 */
e6214487
JH
4408 if (bt_cb(hdev->sent_cmd)->req.complete) {
4409 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4410 return;
4411 }
53e21fbc 4412
e6214487
JH
4413 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4414 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4415 return;
9238f36a
JH
4416 }
4417
4418 /* Remove all pending commands belonging to this request */
4419 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4420 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
db6e3e8d 4421 if (bt_cb(skb)->req.start) {
9238f36a
JH
4422 __skb_queue_head(&hdev->cmd_q, skb);
4423 break;
4424 }
4425
e6214487
JH
4426 *req_complete = bt_cb(skb)->req.complete;
4427 *req_complete_skb = bt_cb(skb)->req.complete_skb;
9238f36a
JH
4428 kfree_skb(skb);
4429 }
4430 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4431}
4432
b78752cc 4433static void hci_rx_work(struct work_struct *work)
1da177e4 4434{
b78752cc 4435 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4436 struct sk_buff *skb;
4437
4438 BT_DBG("%s", hdev->name);
4439
1da177e4 4440 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4441 /* Send copy to monitor */
4442 hci_send_to_monitor(hdev, skb);
4443
1da177e4
LT
4444 if (atomic_read(&hdev->promisc)) {
4445 /* Send copy to the sockets */
470fe1b5 4446 hci_send_to_sock(hdev, skb);
1da177e4
LT
4447 }
4448
d7a5a11d 4449 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4450 kfree_skb(skb);
4451 continue;
4452 }
4453
4454 if (test_bit(HCI_INIT, &hdev->flags)) {
4455 /* Don't process data packets in this states. */
0d48d939 4456 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4457 case HCI_ACLDATA_PKT:
4458 case HCI_SCODATA_PKT:
4459 kfree_skb(skb);
4460 continue;
3ff50b79 4461 }
1da177e4
LT
4462 }
4463
4464 /* Process frame */
0d48d939 4465 switch (bt_cb(skb)->pkt_type) {
1da177e4 4466 case HCI_EVENT_PKT:
b78752cc 4467 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4468 hci_event_packet(hdev, skb);
4469 break;
4470
4471 case HCI_ACLDATA_PKT:
4472 BT_DBG("%s ACL data packet", hdev->name);
4473 hci_acldata_packet(hdev, skb);
4474 break;
4475
4476 case HCI_SCODATA_PKT:
4477 BT_DBG("%s SCO data packet", hdev->name);
4478 hci_scodata_packet(hdev, skb);
4479 break;
4480
4481 default:
4482 kfree_skb(skb);
4483 break;
4484 }
4485 }
1da177e4
LT
4486}
4487
c347b765 4488static void hci_cmd_work(struct work_struct *work)
1da177e4 4489{
c347b765 4490 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4491 struct sk_buff *skb;
4492
2104786b
AE
4493 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4494 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4495
1da177e4 4496 /* Send queued commands */
5a08ecce
AE
4497 if (atomic_read(&hdev->cmd_cnt)) {
4498 skb = skb_dequeue(&hdev->cmd_q);
4499 if (!skb)
4500 return;
4501
7585b97a 4502 kfree_skb(hdev->sent_cmd);
1da177e4 4503
a675d7f1 4504 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4505 if (hdev->sent_cmd) {
1da177e4 4506 atomic_dec(&hdev->cmd_cnt);
57d17d70 4507 hci_send_frame(hdev, skb);
7bdb8a5c 4508 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4509 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4510 else
65cc2b49
MH
4511 schedule_delayed_work(&hdev->cmd_timer,
4512 HCI_CMD_TIMEOUT);
1da177e4
LT
4513 } else {
4514 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4515 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4516 }
4517 }
4518}