]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Move eir_get_length() function into hci_event.c
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
77a63e0a
FW
82static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
75e84b7c
JH
84{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
7b1abbbe
JH
107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
75e84b7c
JH
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
7b1abbbe 137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 138 const void *param, u8 event, u32 timeout)
75e84b7c
JH
139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
7b1abbbe 148 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
7b1abbbe
JH
187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 192 const void *param, u32 timeout)
7b1abbbe
JH
193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
1da177e4 198/* Execute request and wait for completion. */
01178cd4 199static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
200 void (*func)(struct hci_request *req,
201 unsigned long opt),
01178cd4 202 unsigned long opt, __u32 timeout)
1da177e4 203{
42c6b129 204 struct hci_request req;
1da177e4
LT
205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
42c6b129
JH
210 hci_req_init(&req, hdev);
211
1da177e4
LT
212 hdev->req_status = HCI_REQ_PEND;
213
42c6b129 214 func(&req, opt);
53cce22d 215
42c6b129
JH
216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
53cce22d 218 hdev->req_status = 0;
920c8300
AG
219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
42c6b129 224 */
920c8300
AG
225 if (err == -ENODATA)
226 return 0;
227
228 return err;
53cce22d
JH
229 }
230
bc4445c7
AG
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
1da177e4
LT
234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
e175072f 243 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
3ff50b79 253 }
1da177e4 254
a5040efa 255 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
01178cd4 262static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
263 void (*req)(struct hci_request *req,
264 unsigned long opt),
01178cd4 265 unsigned long opt, __u32 timeout)
1da177e4
LT
266{
267 int ret;
268
7c6a329e
MH
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
1da177e4
LT
272 /* Serialize all requests */
273 hci_req_lock(hdev);
01178cd4 274 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
42c6b129 280static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 281{
42c6b129 282 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
283
284 /* Reset device */
42c6b129
JH
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
287}
288
42c6b129 289static void bredr_init(struct hci_request *req)
1da177e4 290{
42c6b129 291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 292
1da177e4 293 /* Read Local Supported Features */
42c6b129 294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 295
1143e5a6 296 /* Read Local Version */
42c6b129 297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
298
299 /* Read BD Address */
42c6b129 300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
301}
302
42c6b129 303static void amp_init(struct hci_request *req)
e61ef499 304{
42c6b129 305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 306
e61ef499 307 /* Read Local Version */
42c6b129 308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 309
f6996cfe
MH
310 /* Read Local Supported Commands */
311 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
312
313 /* Read Local Supported Features */
314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
315
6bcbc489 316 /* Read Local AMP Info */
42c6b129 317 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
318
319 /* Read Data Blk size */
42c6b129 320 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 321
f38ba941
MH
322 /* Read Flow Control Mode */
323 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
324
7528ca1c
MH
325 /* Read Location Data */
326 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
327}
328
42c6b129 329static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 330{
42c6b129 331 struct hci_dev *hdev = req->hdev;
e61ef499
AE
332
333 BT_DBG("%s %ld", hdev->name, opt);
334
11778716
AE
335 /* Reset */
336 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 337 hci_reset_req(req, 0);
11778716 338
e61ef499
AE
339 switch (hdev->dev_type) {
340 case HCI_BREDR:
42c6b129 341 bredr_init(req);
e61ef499
AE
342 break;
343
344 case HCI_AMP:
42c6b129 345 amp_init(req);
e61ef499
AE
346 break;
347
348 default:
349 BT_ERR("Unknown device type %d", hdev->dev_type);
350 break;
351 }
e61ef499
AE
352}
353
42c6b129 354static void bredr_setup(struct hci_request *req)
2177bab5 355{
4ca048e3
MH
356 struct hci_dev *hdev = req->hdev;
357
2177bab5
JH
358 __le16 param;
359 __u8 flt_type;
360
361 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 362 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
363
364 /* Read Class of Device */
42c6b129 365 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
366
367 /* Read Local Name */
42c6b129 368 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
369
370 /* Read Voice Setting */
42c6b129 371 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 372
b4cb9fb2
MH
373 /* Read Number of Supported IAC */
374 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
375
4b836f39
MH
376 /* Read Current IAC LAP */
377 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
378
2177bab5
JH
379 /* Clear Event Filters */
380 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 381 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
382
383 /* Connection accept timeout ~20 secs */
384 param = __constant_cpu_to_le16(0x7d00);
42c6b129 385 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 386
4ca048e3
MH
387 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
388 * but it does not support page scan related HCI commands.
389 */
390 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
391 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
392 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
393 }
2177bab5
JH
394}
395
42c6b129 396static void le_setup(struct hci_request *req)
2177bab5 397{
c73eee91
JH
398 struct hci_dev *hdev = req->hdev;
399
2177bab5 400 /* Read LE Buffer Size */
42c6b129 401 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
402
403 /* Read LE Local Supported Features */
42c6b129 404 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
405
406 /* Read LE Advertising Channel TX Power */
42c6b129 407 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
408
409 /* Read LE White List Size */
42c6b129 410 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
411
412 /* Read LE Supported States */
42c6b129 413 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
414
415 /* LE-only controllers have LE implicitly enabled */
416 if (!lmp_bredr_capable(hdev))
417 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
418}
419
420static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
421{
422 if (lmp_ext_inq_capable(hdev))
423 return 0x02;
424
425 if (lmp_inq_rssi_capable(hdev))
426 return 0x01;
427
428 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
429 hdev->lmp_subver == 0x0757)
430 return 0x01;
431
432 if (hdev->manufacturer == 15) {
433 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
434 return 0x01;
435 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
436 return 0x01;
437 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
438 return 0x01;
439 }
440
441 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
442 hdev->lmp_subver == 0x1805)
443 return 0x01;
444
445 return 0x00;
446}
447
42c6b129 448static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
449{
450 u8 mode;
451
42c6b129 452 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 453
42c6b129 454 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
455}
456
42c6b129 457static void hci_setup_event_mask(struct hci_request *req)
2177bab5 458{
42c6b129
JH
459 struct hci_dev *hdev = req->hdev;
460
2177bab5
JH
461 /* The second byte is 0xff instead of 0x9f (two reserved bits
462 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
463 * command otherwise.
464 */
465 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
466
467 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
468 * any event mask for pre 1.2 devices.
469 */
470 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
471 return;
472
473 if (lmp_bredr_capable(hdev)) {
474 events[4] |= 0x01; /* Flow Specification Complete */
475 events[4] |= 0x02; /* Inquiry Result with RSSI */
476 events[4] |= 0x04; /* Read Remote Extended Features Complete */
477 events[5] |= 0x08; /* Synchronous Connection Complete */
478 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
479 } else {
480 /* Use a different default for LE-only devices */
481 memset(events, 0, sizeof(events));
482 events[0] |= 0x10; /* Disconnection Complete */
483 events[0] |= 0x80; /* Encryption Change */
484 events[1] |= 0x08; /* Read Remote Version Information Complete */
485 events[1] |= 0x20; /* Command Complete */
486 events[1] |= 0x40; /* Command Status */
487 events[1] |= 0x80; /* Hardware Error */
488 events[2] |= 0x04; /* Number of Completed Packets */
489 events[3] |= 0x02; /* Data Buffer Overflow */
490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
491 }
492
493 if (lmp_inq_rssi_capable(hdev))
494 events[4] |= 0x02; /* Inquiry Result with RSSI */
495
496 if (lmp_sniffsubr_capable(hdev))
497 events[5] |= 0x20; /* Sniff Subrating */
498
499 if (lmp_pause_enc_capable(hdev))
500 events[5] |= 0x80; /* Encryption Key Refresh Complete */
501
502 if (lmp_ext_inq_capable(hdev))
503 events[5] |= 0x40; /* Extended Inquiry Result */
504
505 if (lmp_no_flush_capable(hdev))
506 events[7] |= 0x01; /* Enhanced Flush Complete */
507
508 if (lmp_lsto_capable(hdev))
509 events[6] |= 0x80; /* Link Supervision Timeout Changed */
510
511 if (lmp_ssp_capable(hdev)) {
512 events[6] |= 0x01; /* IO Capability Request */
513 events[6] |= 0x02; /* IO Capability Response */
514 events[6] |= 0x04; /* User Confirmation Request */
515 events[6] |= 0x08; /* User Passkey Request */
516 events[6] |= 0x10; /* Remote OOB Data Request */
517 events[6] |= 0x20; /* Simple Pairing Complete */
518 events[7] |= 0x04; /* User Passkey Notification */
519 events[7] |= 0x08; /* Keypress Notification */
520 events[7] |= 0x10; /* Remote Host Supported
521 * Features Notification
522 */
523 }
524
525 if (lmp_le_capable(hdev))
526 events[7] |= 0x20; /* LE Meta-Event */
527
42c6b129 528 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
529
530 if (lmp_le_capable(hdev)) {
531 memset(events, 0, sizeof(events));
532 events[0] = 0x1f;
42c6b129
JH
533 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
534 sizeof(events), events);
2177bab5
JH
535 }
536}
537
42c6b129 538static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 539{
42c6b129
JH
540 struct hci_dev *hdev = req->hdev;
541
2177bab5 542 if (lmp_bredr_capable(hdev))
42c6b129 543 bredr_setup(req);
56f87901
JH
544 else
545 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
546
547 if (lmp_le_capable(hdev))
42c6b129 548 le_setup(req);
2177bab5 549
42c6b129 550 hci_setup_event_mask(req);
2177bab5 551
3f8e2d75
JH
552 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
553 * local supported commands HCI command.
554 */
555 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 556 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
557
558 if (lmp_ssp_capable(hdev)) {
559 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
560 u8 mode = 0x01;
42c6b129
JH
561 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
562 sizeof(mode), &mode);
2177bab5
JH
563 } else {
564 struct hci_cp_write_eir cp;
565
566 memset(hdev->eir, 0, sizeof(hdev->eir));
567 memset(&cp, 0, sizeof(cp));
568
42c6b129 569 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
570 }
571 }
572
573 if (lmp_inq_rssi_capable(hdev))
42c6b129 574 hci_setup_inquiry_mode(req);
2177bab5
JH
575
576 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 577 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
578
579 if (lmp_ext_feat_capable(hdev)) {
580 struct hci_cp_read_local_ext_features cp;
581
582 cp.page = 0x01;
42c6b129
JH
583 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
584 sizeof(cp), &cp);
2177bab5
JH
585 }
586
587 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
588 u8 enable = 1;
42c6b129
JH
589 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
590 &enable);
2177bab5
JH
591 }
592}
593
42c6b129 594static void hci_setup_link_policy(struct hci_request *req)
2177bab5 595{
42c6b129 596 struct hci_dev *hdev = req->hdev;
2177bab5
JH
597 struct hci_cp_write_def_link_policy cp;
598 u16 link_policy = 0;
599
600 if (lmp_rswitch_capable(hdev))
601 link_policy |= HCI_LP_RSWITCH;
602 if (lmp_hold_capable(hdev))
603 link_policy |= HCI_LP_HOLD;
604 if (lmp_sniff_capable(hdev))
605 link_policy |= HCI_LP_SNIFF;
606 if (lmp_park_capable(hdev))
607 link_policy |= HCI_LP_PARK;
608
609 cp.policy = cpu_to_le16(link_policy);
42c6b129 610 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
611}
612
42c6b129 613static void hci_set_le_support(struct hci_request *req)
2177bab5 614{
42c6b129 615 struct hci_dev *hdev = req->hdev;
2177bab5
JH
616 struct hci_cp_write_le_host_supported cp;
617
c73eee91
JH
618 /* LE-only devices do not support explicit enablement */
619 if (!lmp_bredr_capable(hdev))
620 return;
621
2177bab5
JH
622 memset(&cp, 0, sizeof(cp));
623
624 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
625 cp.le = 0x01;
626 cp.simul = lmp_le_br_capable(hdev);
627 }
628
629 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
630 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
631 &cp);
2177bab5
JH
632}
633
d62e6d67
JH
634static void hci_set_event_mask_page_2(struct hci_request *req)
635{
636 struct hci_dev *hdev = req->hdev;
637 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
638
639 /* If Connectionless Slave Broadcast master role is supported
640 * enable all necessary events for it.
641 */
642 if (hdev->features[2][0] & 0x01) {
643 events[1] |= 0x40; /* Triggered Clock Capture */
644 events[1] |= 0x80; /* Synchronization Train Complete */
645 events[2] |= 0x10; /* Slave Page Response Timeout */
646 events[2] |= 0x20; /* CSB Channel Map Change */
647 }
648
649 /* If Connectionless Slave Broadcast slave role is supported
650 * enable all necessary events for it.
651 */
652 if (hdev->features[2][0] & 0x02) {
653 events[2] |= 0x01; /* Synchronization Train Received */
654 events[2] |= 0x02; /* CSB Receive */
655 events[2] |= 0x04; /* CSB Timeout */
656 events[2] |= 0x08; /* Truncated Page Complete */
657 }
658
659 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
660}
661
42c6b129 662static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 663{
42c6b129 664 struct hci_dev *hdev = req->hdev;
d2c5d77f 665 u8 p;
42c6b129 666
b8f4e068
GP
667 /* Some Broadcom based Bluetooth controllers do not support the
668 * Delete Stored Link Key command. They are clearly indicating its
669 * absence in the bit mask of supported commands.
670 *
671 * Check the supported commands and only if the the command is marked
672 * as supported send it. If not supported assume that the controller
673 * does not have actual support for stored link keys which makes this
674 * command redundant anyway.
637b4cae 675 */
59f45d57
JH
676 if (hdev->commands[6] & 0x80) {
677 struct hci_cp_delete_stored_link_key cp;
678
679 bacpy(&cp.bdaddr, BDADDR_ANY);
680 cp.delete_all = 0x01;
681 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
682 sizeof(cp), &cp);
683 }
684
2177bab5 685 if (hdev->commands[5] & 0x10)
42c6b129 686 hci_setup_link_policy(req);
2177bab5 687
441ad2d0 688 if (lmp_le_capable(hdev))
42c6b129 689 hci_set_le_support(req);
d2c5d77f
JH
690
691 /* Read features beyond page 1 if available */
692 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
693 struct hci_cp_read_local_ext_features cp;
694
695 cp.page = p;
696 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
697 sizeof(cp), &cp);
698 }
2177bab5
JH
699}
700
5d4e7e8d
JH
701static void hci_init4_req(struct hci_request *req, unsigned long opt)
702{
703 struct hci_dev *hdev = req->hdev;
704
d62e6d67
JH
705 /* Set event mask page 2 if the HCI command for it is supported */
706 if (hdev->commands[22] & 0x04)
707 hci_set_event_mask_page_2(req);
708
5d4e7e8d
JH
709 /* Check for Synchronization Train support */
710 if (hdev->features[2][0] & 0x04)
711 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
712}
713
2177bab5
JH
714static int __hci_init(struct hci_dev *hdev)
715{
716 int err;
717
718 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
719 if (err < 0)
720 return err;
721
722 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
723 * BR/EDR/LE type controllers. AMP controllers only need the
724 * first stage init.
725 */
726 if (hdev->dev_type != HCI_BREDR)
727 return 0;
728
729 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
730 if (err < 0)
731 return err;
732
5d4e7e8d
JH
733 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
734 if (err < 0)
735 return err;
736
737 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
2177bab5
JH
738}
739
42c6b129 740static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
741{
742 __u8 scan = opt;
743
42c6b129 744 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
745
746 /* Inquiry and Page scans */
42c6b129 747 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
748}
749
42c6b129 750static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
751{
752 __u8 auth = opt;
753
42c6b129 754 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
755
756 /* Authentication */
42c6b129 757 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
758}
759
42c6b129 760static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
761{
762 __u8 encrypt = opt;
763
42c6b129 764 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 765
e4e8e37c 766 /* Encryption */
42c6b129 767 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
768}
769
42c6b129 770static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
771{
772 __le16 policy = cpu_to_le16(opt);
773
42c6b129 774 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
775
776 /* Default link policy */
42c6b129 777 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
778}
779
8e87d142 780/* Get HCI device by index.
1da177e4
LT
781 * Device is held on return. */
782struct hci_dev *hci_dev_get(int index)
783{
8035ded4 784 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
785
786 BT_DBG("%d", index);
787
788 if (index < 0)
789 return NULL;
790
791 read_lock(&hci_dev_list_lock);
8035ded4 792 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
793 if (d->id == index) {
794 hdev = hci_dev_hold(d);
795 break;
796 }
797 }
798 read_unlock(&hci_dev_list_lock);
799 return hdev;
800}
1da177e4
LT
801
802/* ---- Inquiry support ---- */
ff9ef578 803
30dc78e1
JH
804bool hci_discovery_active(struct hci_dev *hdev)
805{
806 struct discovery_state *discov = &hdev->discovery;
807
6fbe195d 808 switch (discov->state) {
343f935b 809 case DISCOVERY_FINDING:
6fbe195d 810 case DISCOVERY_RESOLVING:
30dc78e1
JH
811 return true;
812
6fbe195d
AG
813 default:
814 return false;
815 }
30dc78e1
JH
816}
817
ff9ef578
JH
818void hci_discovery_set_state(struct hci_dev *hdev, int state)
819{
820 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
821
822 if (hdev->discovery.state == state)
823 return;
824
825 switch (state) {
826 case DISCOVERY_STOPPED:
7b99b659
AG
827 if (hdev->discovery.state != DISCOVERY_STARTING)
828 mgmt_discovering(hdev, 0);
ff9ef578
JH
829 break;
830 case DISCOVERY_STARTING:
831 break;
343f935b 832 case DISCOVERY_FINDING:
ff9ef578
JH
833 mgmt_discovering(hdev, 1);
834 break;
30dc78e1
JH
835 case DISCOVERY_RESOLVING:
836 break;
ff9ef578
JH
837 case DISCOVERY_STOPPING:
838 break;
839 }
840
841 hdev->discovery.state = state;
842}
843
1f9b9a5d 844void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 845{
30883512 846 struct discovery_state *cache = &hdev->discovery;
b57c1a56 847 struct inquiry_entry *p, *n;
1da177e4 848
561aafbc
JH
849 list_for_each_entry_safe(p, n, &cache->all, all) {
850 list_del(&p->all);
b57c1a56 851 kfree(p);
1da177e4 852 }
561aafbc
JH
853
854 INIT_LIST_HEAD(&cache->unknown);
855 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
856}
857
a8c5fb1a
GP
858struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
859 bdaddr_t *bdaddr)
1da177e4 860{
30883512 861 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
862 struct inquiry_entry *e;
863
6ed93dc6 864 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 865
561aafbc
JH
866 list_for_each_entry(e, &cache->all, all) {
867 if (!bacmp(&e->data.bdaddr, bdaddr))
868 return e;
869 }
870
871 return NULL;
872}
873
874struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 875 bdaddr_t *bdaddr)
561aafbc 876{
30883512 877 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
878 struct inquiry_entry *e;
879
6ed93dc6 880 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
881
882 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 883 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
884 return e;
885 }
886
887 return NULL;
1da177e4
LT
888}
889
30dc78e1 890struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
891 bdaddr_t *bdaddr,
892 int state)
30dc78e1
JH
893{
894 struct discovery_state *cache = &hdev->discovery;
895 struct inquiry_entry *e;
896
6ed93dc6 897 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
898
899 list_for_each_entry(e, &cache->resolve, list) {
900 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
901 return e;
902 if (!bacmp(&e->data.bdaddr, bdaddr))
903 return e;
904 }
905
906 return NULL;
907}
908
a3d4e20a 909void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 910 struct inquiry_entry *ie)
a3d4e20a
JH
911{
912 struct discovery_state *cache = &hdev->discovery;
913 struct list_head *pos = &cache->resolve;
914 struct inquiry_entry *p;
915
916 list_del(&ie->list);
917
918 list_for_each_entry(p, &cache->resolve, list) {
919 if (p->name_state != NAME_PENDING &&
a8c5fb1a 920 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
921 break;
922 pos = &p->list;
923 }
924
925 list_add(&ie->list, pos);
926}
927
3175405b 928bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 929 bool name_known, bool *ssp)
1da177e4 930{
30883512 931 struct discovery_state *cache = &hdev->discovery;
70f23020 932 struct inquiry_entry *ie;
1da177e4 933
6ed93dc6 934 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 935
2b2fec4d
SJ
936 hci_remove_remote_oob_data(hdev, &data->bdaddr);
937
388fc8fa
JH
938 if (ssp)
939 *ssp = data->ssp_mode;
940
70f23020 941 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 942 if (ie) {
388fc8fa
JH
943 if (ie->data.ssp_mode && ssp)
944 *ssp = true;
945
a3d4e20a 946 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 947 data->rssi != ie->data.rssi) {
a3d4e20a
JH
948 ie->data.rssi = data->rssi;
949 hci_inquiry_cache_update_resolve(hdev, ie);
950 }
951
561aafbc 952 goto update;
a3d4e20a 953 }
561aafbc
JH
954
955 /* Entry not in the cache. Add new one. */
956 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
957 if (!ie)
3175405b 958 return false;
561aafbc
JH
959
960 list_add(&ie->all, &cache->all);
961
962 if (name_known) {
963 ie->name_state = NAME_KNOWN;
964 } else {
965 ie->name_state = NAME_NOT_KNOWN;
966 list_add(&ie->list, &cache->unknown);
967 }
70f23020 968
561aafbc
JH
969update:
970 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 971 ie->name_state != NAME_PENDING) {
561aafbc
JH
972 ie->name_state = NAME_KNOWN;
973 list_del(&ie->list);
1da177e4
LT
974 }
975
70f23020
AE
976 memcpy(&ie->data, data, sizeof(*data));
977 ie->timestamp = jiffies;
1da177e4 978 cache->timestamp = jiffies;
3175405b
JH
979
980 if (ie->name_state == NAME_NOT_KNOWN)
981 return false;
982
983 return true;
1da177e4
LT
984}
985
986static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
987{
30883512 988 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
989 struct inquiry_info *info = (struct inquiry_info *) buf;
990 struct inquiry_entry *e;
991 int copied = 0;
992
561aafbc 993 list_for_each_entry(e, &cache->all, all) {
1da177e4 994 struct inquiry_data *data = &e->data;
b57c1a56
JH
995
996 if (copied >= num)
997 break;
998
1da177e4
LT
999 bacpy(&info->bdaddr, &data->bdaddr);
1000 info->pscan_rep_mode = data->pscan_rep_mode;
1001 info->pscan_period_mode = data->pscan_period_mode;
1002 info->pscan_mode = data->pscan_mode;
1003 memcpy(info->dev_class, data->dev_class, 3);
1004 info->clock_offset = data->clock_offset;
b57c1a56 1005
1da177e4 1006 info++;
b57c1a56 1007 copied++;
1da177e4
LT
1008 }
1009
1010 BT_DBG("cache %p, copied %d", cache, copied);
1011 return copied;
1012}
1013
42c6b129 1014static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1015{
1016 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1017 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1018 struct hci_cp_inquiry cp;
1019
1020 BT_DBG("%s", hdev->name);
1021
1022 if (test_bit(HCI_INQUIRY, &hdev->flags))
1023 return;
1024
1025 /* Start Inquiry */
1026 memcpy(&cp.lap, &ir->lap, 3);
1027 cp.length = ir->length;
1028 cp.num_rsp = ir->num_rsp;
42c6b129 1029 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1030}
1031
3e13fa1e
AG
1032static int wait_inquiry(void *word)
1033{
1034 schedule();
1035 return signal_pending(current);
1036}
1037
1da177e4
LT
1038int hci_inquiry(void __user *arg)
1039{
1040 __u8 __user *ptr = arg;
1041 struct hci_inquiry_req ir;
1042 struct hci_dev *hdev;
1043 int err = 0, do_inquiry = 0, max_rsp;
1044 long timeo;
1045 __u8 *buf;
1046
1047 if (copy_from_user(&ir, ptr, sizeof(ir)))
1048 return -EFAULT;
1049
5a08ecce
AE
1050 hdev = hci_dev_get(ir.dev_id);
1051 if (!hdev)
1da177e4
LT
1052 return -ENODEV;
1053
0736cfa8
MH
1054 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1055 err = -EBUSY;
1056 goto done;
1057 }
1058
5b69bef5
MH
1059 if (hdev->dev_type != HCI_BREDR) {
1060 err = -EOPNOTSUPP;
1061 goto done;
1062 }
1063
56f87901
JH
1064 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1065 err = -EOPNOTSUPP;
1066 goto done;
1067 }
1068
09fd0de5 1069 hci_dev_lock(hdev);
8e87d142 1070 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1071 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1072 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1073 do_inquiry = 1;
1074 }
09fd0de5 1075 hci_dev_unlock(hdev);
1da177e4 1076
04837f64 1077 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1078
1079 if (do_inquiry) {
01178cd4
JH
1080 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1081 timeo);
70f23020
AE
1082 if (err < 0)
1083 goto done;
3e13fa1e
AG
1084
1085 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1086 * cleared). If it is interrupted by a signal, return -EINTR.
1087 */
1088 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1089 TASK_INTERRUPTIBLE))
1090 return -EINTR;
70f23020 1091 }
1da177e4 1092
8fc9ced3
GP
1093 /* for unlimited number of responses we will use buffer with
1094 * 255 entries
1095 */
1da177e4
LT
1096 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1097
1098 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1099 * copy it to the user space.
1100 */
01df8c31 1101 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1102 if (!buf) {
1da177e4
LT
1103 err = -ENOMEM;
1104 goto done;
1105 }
1106
09fd0de5 1107 hci_dev_lock(hdev);
1da177e4 1108 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1109 hci_dev_unlock(hdev);
1da177e4
LT
1110
1111 BT_DBG("num_rsp %d", ir.num_rsp);
1112
1113 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1114 ptr += sizeof(ir);
1115 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1116 ir.num_rsp))
1da177e4 1117 err = -EFAULT;
8e87d142 1118 } else
1da177e4
LT
1119 err = -EFAULT;
1120
1121 kfree(buf);
1122
1123done:
1124 hci_dev_put(hdev);
1125 return err;
1126}
1127
cbed0ca1 1128static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1129{
1da177e4
LT
1130 int ret = 0;
1131
1da177e4
LT
1132 BT_DBG("%s %p", hdev->name, hdev);
1133
1134 hci_req_lock(hdev);
1135
94324962
JH
1136 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1137 ret = -ENODEV;
1138 goto done;
1139 }
1140
a5c8f270
MH
1141 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1142 /* Check for rfkill but allow the HCI setup stage to
1143 * proceed (which in itself doesn't cause any RF activity).
1144 */
1145 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1146 ret = -ERFKILL;
1147 goto done;
1148 }
1149
1150 /* Check for valid public address or a configured static
1151 * random adddress, but let the HCI setup proceed to
1152 * be able to determine if there is a public address
1153 * or not.
1154 *
1155 * This check is only valid for BR/EDR controllers
1156 * since AMP controllers do not have an address.
1157 */
1158 if (hdev->dev_type == HCI_BREDR &&
1159 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1160 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1161 ret = -EADDRNOTAVAIL;
1162 goto done;
1163 }
611b30f7
MH
1164 }
1165
1da177e4
LT
1166 if (test_bit(HCI_UP, &hdev->flags)) {
1167 ret = -EALREADY;
1168 goto done;
1169 }
1170
1da177e4
LT
1171 if (hdev->open(hdev)) {
1172 ret = -EIO;
1173 goto done;
1174 }
1175
f41c70c4
MH
1176 atomic_set(&hdev->cmd_cnt, 1);
1177 set_bit(HCI_INIT, &hdev->flags);
1178
1179 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1180 ret = hdev->setup(hdev);
1181
1182 if (!ret) {
f41c70c4
MH
1183 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1184 set_bit(HCI_RAW, &hdev->flags);
1185
0736cfa8
MH
1186 if (!test_bit(HCI_RAW, &hdev->flags) &&
1187 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1188 ret = __hci_init(hdev);
1da177e4
LT
1189 }
1190
f41c70c4
MH
1191 clear_bit(HCI_INIT, &hdev->flags);
1192
1da177e4
LT
1193 if (!ret) {
1194 hci_dev_hold(hdev);
1195 set_bit(HCI_UP, &hdev->flags);
1196 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1197 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1198 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1199 hdev->dev_type == HCI_BREDR) {
09fd0de5 1200 hci_dev_lock(hdev);
744cf19e 1201 mgmt_powered(hdev, 1);
09fd0de5 1202 hci_dev_unlock(hdev);
56e5cb86 1203 }
8e87d142 1204 } else {
1da177e4 1205 /* Init failed, cleanup */
3eff45ea 1206 flush_work(&hdev->tx_work);
c347b765 1207 flush_work(&hdev->cmd_work);
b78752cc 1208 flush_work(&hdev->rx_work);
1da177e4
LT
1209
1210 skb_queue_purge(&hdev->cmd_q);
1211 skb_queue_purge(&hdev->rx_q);
1212
1213 if (hdev->flush)
1214 hdev->flush(hdev);
1215
1216 if (hdev->sent_cmd) {
1217 kfree_skb(hdev->sent_cmd);
1218 hdev->sent_cmd = NULL;
1219 }
1220
1221 hdev->close(hdev);
1222 hdev->flags = 0;
1223 }
1224
1225done:
1226 hci_req_unlock(hdev);
1da177e4
LT
1227 return ret;
1228}
1229
cbed0ca1
JH
1230/* ---- HCI ioctl helpers ---- */
1231
1232int hci_dev_open(__u16 dev)
1233{
1234 struct hci_dev *hdev;
1235 int err;
1236
1237 hdev = hci_dev_get(dev);
1238 if (!hdev)
1239 return -ENODEV;
1240
e1d08f40
JH
1241 /* We need to ensure that no other power on/off work is pending
1242 * before proceeding to call hci_dev_do_open. This is
1243 * particularly important if the setup procedure has not yet
1244 * completed.
1245 */
1246 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1247 cancel_delayed_work(&hdev->power_off);
1248
a5c8f270
MH
1249 /* After this call it is guaranteed that the setup procedure
1250 * has finished. This means that error conditions like RFKILL
1251 * or no valid public or static random address apply.
1252 */
e1d08f40
JH
1253 flush_workqueue(hdev->req_workqueue);
1254
cbed0ca1
JH
1255 err = hci_dev_do_open(hdev);
1256
1257 hci_dev_put(hdev);
1258
1259 return err;
1260}
1261
1da177e4
LT
1262static int hci_dev_do_close(struct hci_dev *hdev)
1263{
1264 BT_DBG("%s %p", hdev->name, hdev);
1265
78c04c0b
VCG
1266 cancel_delayed_work(&hdev->power_off);
1267
1da177e4
LT
1268 hci_req_cancel(hdev, ENODEV);
1269 hci_req_lock(hdev);
1270
1271 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1272 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1273 hci_req_unlock(hdev);
1274 return 0;
1275 }
1276
3eff45ea
GP
1277 /* Flush RX and TX works */
1278 flush_work(&hdev->tx_work);
b78752cc 1279 flush_work(&hdev->rx_work);
1da177e4 1280
16ab91ab 1281 if (hdev->discov_timeout > 0) {
e0f9309f 1282 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1283 hdev->discov_timeout = 0;
5e5282bb 1284 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 1285 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1286 }
1287
a8b2d5c2 1288 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1289 cancel_delayed_work(&hdev->service_cache);
1290
7ba8b4be
AG
1291 cancel_delayed_work_sync(&hdev->le_scan_disable);
1292
09fd0de5 1293 hci_dev_lock(hdev);
1f9b9a5d 1294 hci_inquiry_cache_flush(hdev);
1da177e4 1295 hci_conn_hash_flush(hdev);
09fd0de5 1296 hci_dev_unlock(hdev);
1da177e4
LT
1297
1298 hci_notify(hdev, HCI_DEV_DOWN);
1299
1300 if (hdev->flush)
1301 hdev->flush(hdev);
1302
1303 /* Reset device */
1304 skb_queue_purge(&hdev->cmd_q);
1305 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1306 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 1307 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 1308 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1309 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1310 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1311 clear_bit(HCI_INIT, &hdev->flags);
1312 }
1313
c347b765
GP
1314 /* flush cmd work */
1315 flush_work(&hdev->cmd_work);
1da177e4
LT
1316
1317 /* Drop queues */
1318 skb_queue_purge(&hdev->rx_q);
1319 skb_queue_purge(&hdev->cmd_q);
1320 skb_queue_purge(&hdev->raw_q);
1321
1322 /* Drop last sent command */
1323 if (hdev->sent_cmd) {
b79f44c1 1324 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1325 kfree_skb(hdev->sent_cmd);
1326 hdev->sent_cmd = NULL;
1327 }
1328
b6ddb638
JH
1329 kfree_skb(hdev->recv_evt);
1330 hdev->recv_evt = NULL;
1331
1da177e4
LT
1332 /* After this point our queues are empty
1333 * and no tasks are scheduled. */
1334 hdev->close(hdev);
1335
35b973c9
JH
1336 /* Clear flags */
1337 hdev->flags = 0;
1338 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1339
93c311a0
MH
1340 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1341 if (hdev->dev_type == HCI_BREDR) {
1342 hci_dev_lock(hdev);
1343 mgmt_powered(hdev, 0);
1344 hci_dev_unlock(hdev);
1345 }
8ee56540 1346 }
5add6af8 1347
ced5c338 1348 /* Controller radio is available but is currently powered down */
536619e8 1349 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1350
e59fda8d 1351 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1352 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1353
1da177e4
LT
1354 hci_req_unlock(hdev);
1355
1356 hci_dev_put(hdev);
1357 return 0;
1358}
1359
1360int hci_dev_close(__u16 dev)
1361{
1362 struct hci_dev *hdev;
1363 int err;
1364
70f23020
AE
1365 hdev = hci_dev_get(dev);
1366 if (!hdev)
1da177e4 1367 return -ENODEV;
8ee56540 1368
0736cfa8
MH
1369 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1370 err = -EBUSY;
1371 goto done;
1372 }
1373
8ee56540
MH
1374 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1375 cancel_delayed_work(&hdev->power_off);
1376
1da177e4 1377 err = hci_dev_do_close(hdev);
8ee56540 1378
0736cfa8 1379done:
1da177e4
LT
1380 hci_dev_put(hdev);
1381 return err;
1382}
1383
1384int hci_dev_reset(__u16 dev)
1385{
1386 struct hci_dev *hdev;
1387 int ret = 0;
1388
70f23020
AE
1389 hdev = hci_dev_get(dev);
1390 if (!hdev)
1da177e4
LT
1391 return -ENODEV;
1392
1393 hci_req_lock(hdev);
1da177e4 1394
808a049e
MH
1395 if (!test_bit(HCI_UP, &hdev->flags)) {
1396 ret = -ENETDOWN;
1da177e4 1397 goto done;
808a049e 1398 }
1da177e4 1399
0736cfa8
MH
1400 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1401 ret = -EBUSY;
1402 goto done;
1403 }
1404
1da177e4
LT
1405 /* Drop queues */
1406 skb_queue_purge(&hdev->rx_q);
1407 skb_queue_purge(&hdev->cmd_q);
1408
09fd0de5 1409 hci_dev_lock(hdev);
1f9b9a5d 1410 hci_inquiry_cache_flush(hdev);
1da177e4 1411 hci_conn_hash_flush(hdev);
09fd0de5 1412 hci_dev_unlock(hdev);
1da177e4
LT
1413
1414 if (hdev->flush)
1415 hdev->flush(hdev);
1416
8e87d142 1417 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1418 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1419
1420 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1421 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1422
1423done:
1da177e4
LT
1424 hci_req_unlock(hdev);
1425 hci_dev_put(hdev);
1426 return ret;
1427}
1428
1429int hci_dev_reset_stat(__u16 dev)
1430{
1431 struct hci_dev *hdev;
1432 int ret = 0;
1433
70f23020
AE
1434 hdev = hci_dev_get(dev);
1435 if (!hdev)
1da177e4
LT
1436 return -ENODEV;
1437
0736cfa8
MH
1438 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1439 ret = -EBUSY;
1440 goto done;
1441 }
1442
1da177e4
LT
1443 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1444
0736cfa8 1445done:
1da177e4 1446 hci_dev_put(hdev);
1da177e4
LT
1447 return ret;
1448}
1449
1450int hci_dev_cmd(unsigned int cmd, void __user *arg)
1451{
1452 struct hci_dev *hdev;
1453 struct hci_dev_req dr;
1454 int err = 0;
1455
1456 if (copy_from_user(&dr, arg, sizeof(dr)))
1457 return -EFAULT;
1458
70f23020
AE
1459 hdev = hci_dev_get(dr.dev_id);
1460 if (!hdev)
1da177e4
LT
1461 return -ENODEV;
1462
0736cfa8
MH
1463 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1464 err = -EBUSY;
1465 goto done;
1466 }
1467
5b69bef5
MH
1468 if (hdev->dev_type != HCI_BREDR) {
1469 err = -EOPNOTSUPP;
1470 goto done;
1471 }
1472
56f87901
JH
1473 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1474 err = -EOPNOTSUPP;
1475 goto done;
1476 }
1477
1da177e4
LT
1478 switch (cmd) {
1479 case HCISETAUTH:
01178cd4
JH
1480 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1481 HCI_INIT_TIMEOUT);
1da177e4
LT
1482 break;
1483
1484 case HCISETENCRYPT:
1485 if (!lmp_encrypt_capable(hdev)) {
1486 err = -EOPNOTSUPP;
1487 break;
1488 }
1489
1490 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1491 /* Auth must be enabled first */
01178cd4
JH
1492 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1493 HCI_INIT_TIMEOUT);
1da177e4
LT
1494 if (err)
1495 break;
1496 }
1497
01178cd4
JH
1498 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1499 HCI_INIT_TIMEOUT);
1da177e4
LT
1500 break;
1501
1502 case HCISETSCAN:
01178cd4
JH
1503 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1504 HCI_INIT_TIMEOUT);
1da177e4
LT
1505 break;
1506
1da177e4 1507 case HCISETLINKPOL:
01178cd4
JH
1508 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1509 HCI_INIT_TIMEOUT);
1da177e4
LT
1510 break;
1511
1512 case HCISETLINKMODE:
e4e8e37c
MH
1513 hdev->link_mode = ((__u16) dr.dev_opt) &
1514 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1515 break;
1516
1517 case HCISETPTYPE:
1518 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1519 break;
1520
1521 case HCISETACLMTU:
e4e8e37c
MH
1522 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1523 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1524 break;
1525
1526 case HCISETSCOMTU:
e4e8e37c
MH
1527 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1528 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1529 break;
1530
1531 default:
1532 err = -EINVAL;
1533 break;
1534 }
e4e8e37c 1535
0736cfa8 1536done:
1da177e4
LT
1537 hci_dev_put(hdev);
1538 return err;
1539}
1540
1541int hci_get_dev_list(void __user *arg)
1542{
8035ded4 1543 struct hci_dev *hdev;
1da177e4
LT
1544 struct hci_dev_list_req *dl;
1545 struct hci_dev_req *dr;
1da177e4
LT
1546 int n = 0, size, err;
1547 __u16 dev_num;
1548
1549 if (get_user(dev_num, (__u16 __user *) arg))
1550 return -EFAULT;
1551
1552 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1553 return -EINVAL;
1554
1555 size = sizeof(*dl) + dev_num * sizeof(*dr);
1556
70f23020
AE
1557 dl = kzalloc(size, GFP_KERNEL);
1558 if (!dl)
1da177e4
LT
1559 return -ENOMEM;
1560
1561 dr = dl->dev_req;
1562
f20d09d5 1563 read_lock(&hci_dev_list_lock);
8035ded4 1564 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1565 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1566 cancel_delayed_work(&hdev->power_off);
c542a06c 1567
a8b2d5c2
JH
1568 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1569 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1570
1da177e4
LT
1571 (dr + n)->dev_id = hdev->id;
1572 (dr + n)->dev_opt = hdev->flags;
c542a06c 1573
1da177e4
LT
1574 if (++n >= dev_num)
1575 break;
1576 }
f20d09d5 1577 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1578
1579 dl->dev_num = n;
1580 size = sizeof(*dl) + n * sizeof(*dr);
1581
1582 err = copy_to_user(arg, dl, size);
1583 kfree(dl);
1584
1585 return err ? -EFAULT : 0;
1586}
1587
1588int hci_get_dev_info(void __user *arg)
1589{
1590 struct hci_dev *hdev;
1591 struct hci_dev_info di;
1592 int err = 0;
1593
1594 if (copy_from_user(&di, arg, sizeof(di)))
1595 return -EFAULT;
1596
70f23020
AE
1597 hdev = hci_dev_get(di.dev_id);
1598 if (!hdev)
1da177e4
LT
1599 return -ENODEV;
1600
a8b2d5c2 1601 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1602 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1603
a8b2d5c2
JH
1604 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1605 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1606
1da177e4
LT
1607 strcpy(di.name, hdev->name);
1608 di.bdaddr = hdev->bdaddr;
60f2a3ed 1609 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
1610 di.flags = hdev->flags;
1611 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1612 if (lmp_bredr_capable(hdev)) {
1613 di.acl_mtu = hdev->acl_mtu;
1614 di.acl_pkts = hdev->acl_pkts;
1615 di.sco_mtu = hdev->sco_mtu;
1616 di.sco_pkts = hdev->sco_pkts;
1617 } else {
1618 di.acl_mtu = hdev->le_mtu;
1619 di.acl_pkts = hdev->le_pkts;
1620 di.sco_mtu = 0;
1621 di.sco_pkts = 0;
1622 }
1da177e4
LT
1623 di.link_policy = hdev->link_policy;
1624 di.link_mode = hdev->link_mode;
1625
1626 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1627 memcpy(&di.features, &hdev->features, sizeof(di.features));
1628
1629 if (copy_to_user(arg, &di, sizeof(di)))
1630 err = -EFAULT;
1631
1632 hci_dev_put(hdev);
1633
1634 return err;
1635}
1636
1637/* ---- Interface to HCI drivers ---- */
1638
611b30f7
MH
1639static int hci_rfkill_set_block(void *data, bool blocked)
1640{
1641 struct hci_dev *hdev = data;
1642
1643 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1644
0736cfa8
MH
1645 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1646 return -EBUSY;
1647
5e130367
JH
1648 if (blocked) {
1649 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
1650 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1651 hci_dev_do_close(hdev);
5e130367
JH
1652 } else {
1653 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 1654 }
611b30f7
MH
1655
1656 return 0;
1657}
1658
1659static const struct rfkill_ops hci_rfkill_ops = {
1660 .set_block = hci_rfkill_set_block,
1661};
1662
ab81cbf9
JH
1663static void hci_power_on(struct work_struct *work)
1664{
1665 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 1666 int err;
ab81cbf9
JH
1667
1668 BT_DBG("%s", hdev->name);
1669
cbed0ca1 1670 err = hci_dev_do_open(hdev);
96570ffc
JH
1671 if (err < 0) {
1672 mgmt_set_powered_failed(hdev, err);
ab81cbf9 1673 return;
96570ffc 1674 }
ab81cbf9 1675
a5c8f270
MH
1676 /* During the HCI setup phase, a few error conditions are
1677 * ignored and they need to be checked now. If they are still
1678 * valid, it is important to turn the device back off.
1679 */
1680 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1681 (hdev->dev_type == HCI_BREDR &&
1682 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1683 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
1684 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1685 hci_dev_do_close(hdev);
1686 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
1687 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1688 HCI_AUTO_OFF_TIMEOUT);
bf543036 1689 }
ab81cbf9 1690
a8b2d5c2 1691 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1692 mgmt_index_added(hdev);
ab81cbf9
JH
1693}
1694
1695static void hci_power_off(struct work_struct *work)
1696{
3243553f 1697 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1698 power_off.work);
ab81cbf9
JH
1699
1700 BT_DBG("%s", hdev->name);
1701
8ee56540 1702 hci_dev_do_close(hdev);
ab81cbf9
JH
1703}
1704
16ab91ab
JH
1705static void hci_discov_off(struct work_struct *work)
1706{
1707 struct hci_dev *hdev;
b1e73124 1708 struct hci_request req;
16ab91ab
JH
1709 u8 scan = SCAN_PAGE;
1710
1711 hdev = container_of(work, struct hci_dev, discov_off.work);
1712
1713 BT_DBG("%s", hdev->name);
1714
09fd0de5 1715 hci_dev_lock(hdev);
16ab91ab 1716
b1e73124
MH
1717 hci_req_init(&req, hdev);
1718 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1719 hci_req_run(&req, NULL);
16ab91ab 1720
310a3d48
MH
1721 /* When discoverable timeout triggers, then just make sure
1722 * the limited discoverable flag is cleared. Even in the case
1723 * of a timeout triggered from general discoverable, it is
1724 * safe to unconditionally clear the flag.
1725 */
1726 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1727
16ab91ab
JH
1728 hdev->discov_timeout = 0;
1729
09fd0de5 1730 hci_dev_unlock(hdev);
16ab91ab
JH
1731}
1732
2aeb9a1a
JH
1733int hci_uuids_clear(struct hci_dev *hdev)
1734{
4821002c 1735 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1736
4821002c
JH
1737 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1738 list_del(&uuid->list);
2aeb9a1a
JH
1739 kfree(uuid);
1740 }
1741
1742 return 0;
1743}
1744
55ed8ca1
JH
1745int hci_link_keys_clear(struct hci_dev *hdev)
1746{
1747 struct list_head *p, *n;
1748
1749 list_for_each_safe(p, n, &hdev->link_keys) {
1750 struct link_key *key;
1751
1752 key = list_entry(p, struct link_key, list);
1753
1754 list_del(p);
1755 kfree(key);
1756 }
1757
1758 return 0;
1759}
1760
b899efaf
VCG
1761int hci_smp_ltks_clear(struct hci_dev *hdev)
1762{
1763 struct smp_ltk *k, *tmp;
1764
1765 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1766 list_del(&k->list);
1767 kfree(k);
1768 }
1769
1770 return 0;
1771}
1772
55ed8ca1
JH
1773struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1774{
8035ded4 1775 struct link_key *k;
55ed8ca1 1776
8035ded4 1777 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1778 if (bacmp(bdaddr, &k->bdaddr) == 0)
1779 return k;
55ed8ca1
JH
1780
1781 return NULL;
1782}
1783
745c0ce3 1784static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1785 u8 key_type, u8 old_key_type)
d25e28ab
JH
1786{
1787 /* Legacy key */
1788 if (key_type < 0x03)
745c0ce3 1789 return true;
d25e28ab
JH
1790
1791 /* Debug keys are insecure so don't store them persistently */
1792 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1793 return false;
d25e28ab
JH
1794
1795 /* Changed combination key and there's no previous one */
1796 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1797 return false;
d25e28ab
JH
1798
1799 /* Security mode 3 case */
1800 if (!conn)
745c0ce3 1801 return true;
d25e28ab
JH
1802
1803 /* Neither local nor remote side had no-bonding as requirement */
1804 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1805 return true;
d25e28ab
JH
1806
1807 /* Local side had dedicated bonding as requirement */
1808 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1809 return true;
d25e28ab
JH
1810
1811 /* Remote side had dedicated bonding as requirement */
1812 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1813 return true;
d25e28ab
JH
1814
1815 /* If none of the above criteria match, then don't store the key
1816 * persistently */
745c0ce3 1817 return false;
d25e28ab
JH
1818}
1819
c9839a11 1820struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1821{
c9839a11 1822 struct smp_ltk *k;
75d262c2 1823
c9839a11
VCG
1824 list_for_each_entry(k, &hdev->long_term_keys, list) {
1825 if (k->ediv != ediv ||
a8c5fb1a 1826 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1827 continue;
1828
c9839a11 1829 return k;
75d262c2
VCG
1830 }
1831
1832 return NULL;
1833}
75d262c2 1834
c9839a11 1835struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1836 u8 addr_type)
75d262c2 1837{
c9839a11 1838 struct smp_ltk *k;
75d262c2 1839
c9839a11
VCG
1840 list_for_each_entry(k, &hdev->long_term_keys, list)
1841 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1842 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1843 return k;
1844
1845 return NULL;
1846}
75d262c2 1847
d25e28ab 1848int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1849 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1850{
1851 struct link_key *key, *old_key;
745c0ce3
VA
1852 u8 old_key_type;
1853 bool persistent;
55ed8ca1
JH
1854
1855 old_key = hci_find_link_key(hdev, bdaddr);
1856 if (old_key) {
1857 old_key_type = old_key->type;
1858 key = old_key;
1859 } else {
12adcf3a 1860 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1861 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1862 if (!key)
1863 return -ENOMEM;
1864 list_add(&key->list, &hdev->link_keys);
1865 }
1866
6ed93dc6 1867 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1868
d25e28ab
JH
1869 /* Some buggy controller combinations generate a changed
1870 * combination key for legacy pairing even when there's no
1871 * previous key */
1872 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1873 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1874 type = HCI_LK_COMBINATION;
655fe6ec
JH
1875 if (conn)
1876 conn->key_type = type;
1877 }
d25e28ab 1878
55ed8ca1 1879 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1880 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1881 key->pin_len = pin_len;
1882
b6020ba0 1883 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1884 key->type = old_key_type;
4748fed2
JH
1885 else
1886 key->type = type;
1887
4df378a1
JH
1888 if (!new_key)
1889 return 0;
1890
1891 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1892
744cf19e 1893 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1894
6ec5bcad
VA
1895 if (conn)
1896 conn->flush_key = !persistent;
55ed8ca1
JH
1897
1898 return 0;
1899}
1900
c9839a11 1901int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1902 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1903 ediv, u8 rand[8])
75d262c2 1904{
c9839a11 1905 struct smp_ltk *key, *old_key;
75d262c2 1906
c9839a11
VCG
1907 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1908 return 0;
75d262c2 1909
c9839a11
VCG
1910 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1911 if (old_key)
75d262c2 1912 key = old_key;
c9839a11
VCG
1913 else {
1914 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1915 if (!key)
1916 return -ENOMEM;
c9839a11 1917 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1918 }
1919
75d262c2 1920 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1921 key->bdaddr_type = addr_type;
1922 memcpy(key->val, tk, sizeof(key->val));
1923 key->authenticated = authenticated;
1924 key->ediv = ediv;
1925 key->enc_size = enc_size;
1926 key->type = type;
1927 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1928
c9839a11
VCG
1929 if (!new_key)
1930 return 0;
75d262c2 1931
261cc5aa
VCG
1932 if (type & HCI_SMP_LTK)
1933 mgmt_new_ltk(hdev, key, 1);
1934
75d262c2
VCG
1935 return 0;
1936}
1937
55ed8ca1
JH
1938int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1939{
1940 struct link_key *key;
1941
1942 key = hci_find_link_key(hdev, bdaddr);
1943 if (!key)
1944 return -ENOENT;
1945
6ed93dc6 1946 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1947
1948 list_del(&key->list);
1949 kfree(key);
1950
1951 return 0;
1952}
1953
b899efaf
VCG
1954int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1955{
1956 struct smp_ltk *k, *tmp;
1957
1958 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1959 if (bacmp(bdaddr, &k->bdaddr))
1960 continue;
1961
6ed93dc6 1962 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1963
1964 list_del(&k->list);
1965 kfree(k);
1966 }
1967
1968 return 0;
1969}
1970
6bd32326 1971/* HCI command timer function */
bda4f23a 1972static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1973{
1974 struct hci_dev *hdev = (void *) arg;
1975
bda4f23a
AE
1976 if (hdev->sent_cmd) {
1977 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1978 u16 opcode = __le16_to_cpu(sent->opcode);
1979
1980 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1981 } else {
1982 BT_ERR("%s command tx timeout", hdev->name);
1983 }
1984
6bd32326 1985 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1986 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1987}
1988
2763eda6 1989struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1990 bdaddr_t *bdaddr)
2763eda6
SJ
1991{
1992 struct oob_data *data;
1993
1994 list_for_each_entry(data, &hdev->remote_oob_data, list)
1995 if (bacmp(bdaddr, &data->bdaddr) == 0)
1996 return data;
1997
1998 return NULL;
1999}
2000
2001int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2002{
2003 struct oob_data *data;
2004
2005 data = hci_find_remote_oob_data(hdev, bdaddr);
2006 if (!data)
2007 return -ENOENT;
2008
6ed93dc6 2009 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2010
2011 list_del(&data->list);
2012 kfree(data);
2013
2014 return 0;
2015}
2016
2017int hci_remote_oob_data_clear(struct hci_dev *hdev)
2018{
2019 struct oob_data *data, *n;
2020
2021 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2022 list_del(&data->list);
2023 kfree(data);
2024 }
2025
2026 return 0;
2027}
2028
2029int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 2030 u8 *randomizer)
2763eda6
SJ
2031{
2032 struct oob_data *data;
2033
2034 data = hci_find_remote_oob_data(hdev, bdaddr);
2035
2036 if (!data) {
2037 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2038 if (!data)
2039 return -ENOMEM;
2040
2041 bacpy(&data->bdaddr, bdaddr);
2042 list_add(&data->list, &hdev->remote_oob_data);
2043 }
2044
2045 memcpy(data->hash, hash, sizeof(data->hash));
2046 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2047
6ed93dc6 2048 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2049
2050 return 0;
2051}
2052
04124681 2053struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 2054{
8035ded4 2055 struct bdaddr_list *b;
b2a66aad 2056
8035ded4 2057 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
2058 if (bacmp(bdaddr, &b->bdaddr) == 0)
2059 return b;
b2a66aad
AJ
2060
2061 return NULL;
2062}
2063
2064int hci_blacklist_clear(struct hci_dev *hdev)
2065{
2066 struct list_head *p, *n;
2067
2068 list_for_each_safe(p, n, &hdev->blacklist) {
2069 struct bdaddr_list *b;
2070
2071 b = list_entry(p, struct bdaddr_list, list);
2072
2073 list_del(p);
2074 kfree(b);
2075 }
2076
2077 return 0;
2078}
2079
88c1fe4b 2080int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2081{
2082 struct bdaddr_list *entry;
b2a66aad
AJ
2083
2084 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2085 return -EBADF;
2086
5e762444
AJ
2087 if (hci_blacklist_lookup(hdev, bdaddr))
2088 return -EEXIST;
b2a66aad
AJ
2089
2090 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2091 if (!entry)
2092 return -ENOMEM;
b2a66aad
AJ
2093
2094 bacpy(&entry->bdaddr, bdaddr);
2095
2096 list_add(&entry->list, &hdev->blacklist);
2097
88c1fe4b 2098 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2099}
2100
88c1fe4b 2101int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2102{
2103 struct bdaddr_list *entry;
b2a66aad 2104
1ec918ce 2105 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 2106 return hci_blacklist_clear(hdev);
b2a66aad
AJ
2107
2108 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 2109 if (!entry)
5e762444 2110 return -ENOENT;
b2a66aad
AJ
2111
2112 list_del(&entry->list);
2113 kfree(entry);
2114
88c1fe4b 2115 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2116}
2117
4c87eaab 2118static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2119{
4c87eaab
AG
2120 if (status) {
2121 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2122
4c87eaab
AG
2123 hci_dev_lock(hdev);
2124 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2125 hci_dev_unlock(hdev);
2126 return;
2127 }
7ba8b4be
AG
2128}
2129
4c87eaab 2130static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2131{
4c87eaab
AG
2132 /* General inquiry access code (GIAC) */
2133 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2134 struct hci_request req;
2135 struct hci_cp_inquiry cp;
7ba8b4be
AG
2136 int err;
2137
4c87eaab
AG
2138 if (status) {
2139 BT_ERR("Failed to disable LE scanning: status %d", status);
2140 return;
2141 }
7ba8b4be 2142
4c87eaab
AG
2143 switch (hdev->discovery.type) {
2144 case DISCOV_TYPE_LE:
2145 hci_dev_lock(hdev);
2146 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2147 hci_dev_unlock(hdev);
2148 break;
7ba8b4be 2149
4c87eaab
AG
2150 case DISCOV_TYPE_INTERLEAVED:
2151 hci_req_init(&req, hdev);
7ba8b4be 2152
4c87eaab
AG
2153 memset(&cp, 0, sizeof(cp));
2154 memcpy(&cp.lap, lap, sizeof(cp.lap));
2155 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2156 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2157
4c87eaab 2158 hci_dev_lock(hdev);
7dbfac1d 2159
4c87eaab 2160 hci_inquiry_cache_flush(hdev);
7dbfac1d 2161
4c87eaab
AG
2162 err = hci_req_run(&req, inquiry_complete);
2163 if (err) {
2164 BT_ERR("Inquiry request failed: err %d", err);
2165 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2166 }
7dbfac1d 2167
4c87eaab
AG
2168 hci_dev_unlock(hdev);
2169 break;
7dbfac1d 2170 }
7dbfac1d
AG
2171}
2172
7ba8b4be
AG
2173static void le_scan_disable_work(struct work_struct *work)
2174{
2175 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2176 le_scan_disable.work);
7ba8b4be 2177 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
2178 struct hci_request req;
2179 int err;
7ba8b4be
AG
2180
2181 BT_DBG("%s", hdev->name);
2182
4c87eaab 2183 hci_req_init(&req, hdev);
28b75a89 2184
7ba8b4be 2185 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
2186 cp.enable = LE_SCAN_DISABLE;
2187 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 2188
4c87eaab
AG
2189 err = hci_req_run(&req, le_scan_disable_work_complete);
2190 if (err)
2191 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2192}
2193
9be0dab7
DH
2194/* Alloc HCI device */
2195struct hci_dev *hci_alloc_dev(void)
2196{
2197 struct hci_dev *hdev;
2198
2199 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2200 if (!hdev)
2201 return NULL;
2202
b1b813d4
DH
2203 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2204 hdev->esco_type = (ESCO_HV1);
2205 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2206 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2207 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2208 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2209 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2210
b1b813d4
DH
2211 hdev->sniff_max_interval = 800;
2212 hdev->sniff_min_interval = 80;
2213
bef64738
MH
2214 hdev->le_scan_interval = 0x0060;
2215 hdev->le_scan_window = 0x0030;
2216
b1b813d4
DH
2217 mutex_init(&hdev->lock);
2218 mutex_init(&hdev->req_lock);
2219
2220 INIT_LIST_HEAD(&hdev->mgmt_pending);
2221 INIT_LIST_HEAD(&hdev->blacklist);
2222 INIT_LIST_HEAD(&hdev->uuids);
2223 INIT_LIST_HEAD(&hdev->link_keys);
2224 INIT_LIST_HEAD(&hdev->long_term_keys);
2225 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2226 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2227
2228 INIT_WORK(&hdev->rx_work, hci_rx_work);
2229 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2230 INIT_WORK(&hdev->tx_work, hci_tx_work);
2231 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2232
b1b813d4
DH
2233 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2234 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2235 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2236
b1b813d4
DH
2237 skb_queue_head_init(&hdev->rx_q);
2238 skb_queue_head_init(&hdev->cmd_q);
2239 skb_queue_head_init(&hdev->raw_q);
2240
2241 init_waitqueue_head(&hdev->req_wait_q);
2242
bda4f23a 2243 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2244
b1b813d4
DH
2245 hci_init_sysfs(hdev);
2246 discovery_init(hdev);
9be0dab7
DH
2247
2248 return hdev;
2249}
2250EXPORT_SYMBOL(hci_alloc_dev);
2251
2252/* Free HCI device */
2253void hci_free_dev(struct hci_dev *hdev)
2254{
9be0dab7
DH
2255 /* will free via device release */
2256 put_device(&hdev->dev);
2257}
2258EXPORT_SYMBOL(hci_free_dev);
2259
1da177e4
LT
2260/* Register HCI device */
2261int hci_register_dev(struct hci_dev *hdev)
2262{
b1b813d4 2263 int id, error;
1da177e4 2264
010666a1 2265 if (!hdev->open || !hdev->close)
1da177e4
LT
2266 return -EINVAL;
2267
08add513
MM
2268 /* Do not allow HCI_AMP devices to register at index 0,
2269 * so the index can be used as the AMP controller ID.
2270 */
3df92b31
SL
2271 switch (hdev->dev_type) {
2272 case HCI_BREDR:
2273 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2274 break;
2275 case HCI_AMP:
2276 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2277 break;
2278 default:
2279 return -EINVAL;
1da177e4 2280 }
8e87d142 2281
3df92b31
SL
2282 if (id < 0)
2283 return id;
2284
1da177e4
LT
2285 sprintf(hdev->name, "hci%d", id);
2286 hdev->id = id;
2d8b3a11
AE
2287
2288 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2289
d8537548
KC
2290 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2291 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
2292 if (!hdev->workqueue) {
2293 error = -ENOMEM;
2294 goto err;
2295 }
f48fd9c8 2296
d8537548
KC
2297 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2298 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
2299 if (!hdev->req_workqueue) {
2300 destroy_workqueue(hdev->workqueue);
2301 error = -ENOMEM;
2302 goto err;
2303 }
2304
33ca954d
DH
2305 error = hci_add_sysfs(hdev);
2306 if (error < 0)
2307 goto err_wqueue;
1da177e4 2308
611b30f7 2309 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2310 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2311 hdev);
611b30f7
MH
2312 if (hdev->rfkill) {
2313 if (rfkill_register(hdev->rfkill) < 0) {
2314 rfkill_destroy(hdev->rfkill);
2315 hdev->rfkill = NULL;
2316 }
2317 }
2318
5e130367
JH
2319 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2320 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2321
a8b2d5c2 2322 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 2323 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 2324
01cd3404 2325 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
2326 /* Assume BR/EDR support until proven otherwise (such as
2327 * through reading supported features during init.
2328 */
2329 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2330 }
ce2be9ac 2331
fcee3377
GP
2332 write_lock(&hci_dev_list_lock);
2333 list_add(&hdev->list, &hci_dev_list);
2334 write_unlock(&hci_dev_list_lock);
2335
1da177e4 2336 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2337 hci_dev_hold(hdev);
1da177e4 2338
19202573 2339 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2340
1da177e4 2341 return id;
f48fd9c8 2342
33ca954d
DH
2343err_wqueue:
2344 destroy_workqueue(hdev->workqueue);
6ead1bbc 2345 destroy_workqueue(hdev->req_workqueue);
33ca954d 2346err:
3df92b31 2347 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 2348
33ca954d 2349 return error;
1da177e4
LT
2350}
2351EXPORT_SYMBOL(hci_register_dev);
2352
2353/* Unregister HCI device */
59735631 2354void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2355{
3df92b31 2356 int i, id;
ef222013 2357
c13854ce 2358 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2359
94324962
JH
2360 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2361
3df92b31
SL
2362 id = hdev->id;
2363
f20d09d5 2364 write_lock(&hci_dev_list_lock);
1da177e4 2365 list_del(&hdev->list);
f20d09d5 2366 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2367
2368 hci_dev_do_close(hdev);
2369
cd4c5391 2370 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2371 kfree_skb(hdev->reassembly[i]);
2372
b9b5ef18
GP
2373 cancel_work_sync(&hdev->power_on);
2374
ab81cbf9 2375 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2376 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2377 hci_dev_lock(hdev);
744cf19e 2378 mgmt_index_removed(hdev);
09fd0de5 2379 hci_dev_unlock(hdev);
56e5cb86 2380 }
ab81cbf9 2381
2e58ef3e
JH
2382 /* mgmt_index_removed should take care of emptying the
2383 * pending list */
2384 BUG_ON(!list_empty(&hdev->mgmt_pending));
2385
1da177e4
LT
2386 hci_notify(hdev, HCI_DEV_UNREG);
2387
611b30f7
MH
2388 if (hdev->rfkill) {
2389 rfkill_unregister(hdev->rfkill);
2390 rfkill_destroy(hdev->rfkill);
2391 }
2392
ce242970 2393 hci_del_sysfs(hdev);
147e2d59 2394
f48fd9c8 2395 destroy_workqueue(hdev->workqueue);
6ead1bbc 2396 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2397
09fd0de5 2398 hci_dev_lock(hdev);
e2e0cacb 2399 hci_blacklist_clear(hdev);
2aeb9a1a 2400 hci_uuids_clear(hdev);
55ed8ca1 2401 hci_link_keys_clear(hdev);
b899efaf 2402 hci_smp_ltks_clear(hdev);
2763eda6 2403 hci_remote_oob_data_clear(hdev);
09fd0de5 2404 hci_dev_unlock(hdev);
e2e0cacb 2405
dc946bd8 2406 hci_dev_put(hdev);
3df92b31
SL
2407
2408 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2409}
2410EXPORT_SYMBOL(hci_unregister_dev);
2411
2412/* Suspend HCI device */
2413int hci_suspend_dev(struct hci_dev *hdev)
2414{
2415 hci_notify(hdev, HCI_DEV_SUSPEND);
2416 return 0;
2417}
2418EXPORT_SYMBOL(hci_suspend_dev);
2419
2420/* Resume HCI device */
2421int hci_resume_dev(struct hci_dev *hdev)
2422{
2423 hci_notify(hdev, HCI_DEV_RESUME);
2424 return 0;
2425}
2426EXPORT_SYMBOL(hci_resume_dev);
2427
76bca880 2428/* Receive frame from HCI drivers */
e1a26170 2429int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 2430{
76bca880 2431 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2432 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2433 kfree_skb(skb);
2434 return -ENXIO;
2435 }
2436
d82603c6 2437 /* Incoming skb */
76bca880
MH
2438 bt_cb(skb)->incoming = 1;
2439
2440 /* Time stamp */
2441 __net_timestamp(skb);
2442
76bca880 2443 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2444 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2445
76bca880
MH
2446 return 0;
2447}
2448EXPORT_SYMBOL(hci_recv_frame);
2449
33e882a5 2450static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2451 int count, __u8 index)
33e882a5
SS
2452{
2453 int len = 0;
2454 int hlen = 0;
2455 int remain = count;
2456 struct sk_buff *skb;
2457 struct bt_skb_cb *scb;
2458
2459 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2460 index >= NUM_REASSEMBLY)
33e882a5
SS
2461 return -EILSEQ;
2462
2463 skb = hdev->reassembly[index];
2464
2465 if (!skb) {
2466 switch (type) {
2467 case HCI_ACLDATA_PKT:
2468 len = HCI_MAX_FRAME_SIZE;
2469 hlen = HCI_ACL_HDR_SIZE;
2470 break;
2471 case HCI_EVENT_PKT:
2472 len = HCI_MAX_EVENT_SIZE;
2473 hlen = HCI_EVENT_HDR_SIZE;
2474 break;
2475 case HCI_SCODATA_PKT:
2476 len = HCI_MAX_SCO_SIZE;
2477 hlen = HCI_SCO_HDR_SIZE;
2478 break;
2479 }
2480
1e429f38 2481 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2482 if (!skb)
2483 return -ENOMEM;
2484
2485 scb = (void *) skb->cb;
2486 scb->expect = hlen;
2487 scb->pkt_type = type;
2488
33e882a5
SS
2489 hdev->reassembly[index] = skb;
2490 }
2491
2492 while (count) {
2493 scb = (void *) skb->cb;
89bb46d0 2494 len = min_t(uint, scb->expect, count);
33e882a5
SS
2495
2496 memcpy(skb_put(skb, len), data, len);
2497
2498 count -= len;
2499 data += len;
2500 scb->expect -= len;
2501 remain = count;
2502
2503 switch (type) {
2504 case HCI_EVENT_PKT:
2505 if (skb->len == HCI_EVENT_HDR_SIZE) {
2506 struct hci_event_hdr *h = hci_event_hdr(skb);
2507 scb->expect = h->plen;
2508
2509 if (skb_tailroom(skb) < scb->expect) {
2510 kfree_skb(skb);
2511 hdev->reassembly[index] = NULL;
2512 return -ENOMEM;
2513 }
2514 }
2515 break;
2516
2517 case HCI_ACLDATA_PKT:
2518 if (skb->len == HCI_ACL_HDR_SIZE) {
2519 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2520 scb->expect = __le16_to_cpu(h->dlen);
2521
2522 if (skb_tailroom(skb) < scb->expect) {
2523 kfree_skb(skb);
2524 hdev->reassembly[index] = NULL;
2525 return -ENOMEM;
2526 }
2527 }
2528 break;
2529
2530 case HCI_SCODATA_PKT:
2531 if (skb->len == HCI_SCO_HDR_SIZE) {
2532 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2533 scb->expect = h->dlen;
2534
2535 if (skb_tailroom(skb) < scb->expect) {
2536 kfree_skb(skb);
2537 hdev->reassembly[index] = NULL;
2538 return -ENOMEM;
2539 }
2540 }
2541 break;
2542 }
2543
2544 if (scb->expect == 0) {
2545 /* Complete frame */
2546
2547 bt_cb(skb)->pkt_type = type;
e1a26170 2548 hci_recv_frame(hdev, skb);
33e882a5
SS
2549
2550 hdev->reassembly[index] = NULL;
2551 return remain;
2552 }
2553 }
2554
2555 return remain;
2556}
2557
ef222013
MH
2558int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2559{
f39a3c06
SS
2560 int rem = 0;
2561
ef222013
MH
2562 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2563 return -EILSEQ;
2564
da5f6c37 2565 while (count) {
1e429f38 2566 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2567 if (rem < 0)
2568 return rem;
ef222013 2569
f39a3c06
SS
2570 data += (count - rem);
2571 count = rem;
f81c6224 2572 }
ef222013 2573
f39a3c06 2574 return rem;
ef222013
MH
2575}
2576EXPORT_SYMBOL(hci_recv_fragment);
2577
99811510
SS
2578#define STREAM_REASSEMBLY 0
2579
2580int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2581{
2582 int type;
2583 int rem = 0;
2584
da5f6c37 2585 while (count) {
99811510
SS
2586 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2587
2588 if (!skb) {
2589 struct { char type; } *pkt;
2590
2591 /* Start of the frame */
2592 pkt = data;
2593 type = pkt->type;
2594
2595 data++;
2596 count--;
2597 } else
2598 type = bt_cb(skb)->pkt_type;
2599
1e429f38 2600 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2601 STREAM_REASSEMBLY);
99811510
SS
2602 if (rem < 0)
2603 return rem;
2604
2605 data += (count - rem);
2606 count = rem;
f81c6224 2607 }
99811510
SS
2608
2609 return rem;
2610}
2611EXPORT_SYMBOL(hci_recv_stream_fragment);
2612
1da177e4
LT
2613/* ---- Interface to upper protocols ---- */
2614
1da177e4
LT
2615int hci_register_cb(struct hci_cb *cb)
2616{
2617 BT_DBG("%p name %s", cb, cb->name);
2618
f20d09d5 2619 write_lock(&hci_cb_list_lock);
1da177e4 2620 list_add(&cb->list, &hci_cb_list);
f20d09d5 2621 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2622
2623 return 0;
2624}
2625EXPORT_SYMBOL(hci_register_cb);
2626
2627int hci_unregister_cb(struct hci_cb *cb)
2628{
2629 BT_DBG("%p name %s", cb, cb->name);
2630
f20d09d5 2631 write_lock(&hci_cb_list_lock);
1da177e4 2632 list_del(&cb->list);
f20d09d5 2633 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2634
2635 return 0;
2636}
2637EXPORT_SYMBOL(hci_unregister_cb);
2638
51086991 2639static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 2640{
0d48d939 2641 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2642
cd82e61c
MH
2643 /* Time stamp */
2644 __net_timestamp(skb);
1da177e4 2645
cd82e61c
MH
2646 /* Send copy to monitor */
2647 hci_send_to_monitor(hdev, skb);
2648
2649 if (atomic_read(&hdev->promisc)) {
2650 /* Send copy to the sockets */
470fe1b5 2651 hci_send_to_sock(hdev, skb);
1da177e4
LT
2652 }
2653
2654 /* Get rid of skb owner, prior to sending to the driver. */
2655 skb_orphan(skb);
2656
7bd8f09f 2657 if (hdev->send(hdev, skb) < 0)
51086991 2658 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
2659}
2660
3119ae95
JH
2661void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2662{
2663 skb_queue_head_init(&req->cmd_q);
2664 req->hdev = hdev;
5d73e034 2665 req->err = 0;
3119ae95
JH
2666}
2667
2668int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2669{
2670 struct hci_dev *hdev = req->hdev;
2671 struct sk_buff *skb;
2672 unsigned long flags;
2673
2674 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2675
5d73e034
AG
2676 /* If an error occured during request building, remove all HCI
2677 * commands queued on the HCI request queue.
2678 */
2679 if (req->err) {
2680 skb_queue_purge(&req->cmd_q);
2681 return req->err;
2682 }
2683
3119ae95
JH
2684 /* Do not allow empty requests */
2685 if (skb_queue_empty(&req->cmd_q))
382b0c39 2686 return -ENODATA;
3119ae95
JH
2687
2688 skb = skb_peek_tail(&req->cmd_q);
2689 bt_cb(skb)->req.complete = complete;
2690
2691 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2692 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2693 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2694
2695 queue_work(hdev->workqueue, &hdev->cmd_work);
2696
2697 return 0;
2698}
2699
1ca3a9d0 2700static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 2701 u32 plen, const void *param)
1da177e4
LT
2702{
2703 int len = HCI_COMMAND_HDR_SIZE + plen;
2704 struct hci_command_hdr *hdr;
2705 struct sk_buff *skb;
2706
1da177e4 2707 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2708 if (!skb)
2709 return NULL;
1da177e4
LT
2710
2711 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2712 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2713 hdr->plen = plen;
2714
2715 if (plen)
2716 memcpy(skb_put(skb, plen), param, plen);
2717
2718 BT_DBG("skb len %d", skb->len);
2719
0d48d939 2720 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 2721
1ca3a9d0
JH
2722 return skb;
2723}
2724
2725/* Send HCI command */
07dc93dd
JH
2726int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2727 const void *param)
1ca3a9d0
JH
2728{
2729 struct sk_buff *skb;
2730
2731 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2732
2733 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2734 if (!skb) {
2735 BT_ERR("%s no memory for command", hdev->name);
2736 return -ENOMEM;
2737 }
2738
11714b3d
JH
2739 /* Stand-alone HCI commands must be flaged as
2740 * single-command requests.
2741 */
2742 bt_cb(skb)->req.start = true;
2743
1da177e4 2744 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2745 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2746
2747 return 0;
2748}
1da177e4 2749
71c76a17 2750/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
2751void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2752 const void *param, u8 event)
71c76a17
JH
2753{
2754 struct hci_dev *hdev = req->hdev;
2755 struct sk_buff *skb;
2756
2757 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2758
34739c1e
AG
2759 /* If an error occured during request building, there is no point in
2760 * queueing the HCI command. We can simply return.
2761 */
2762 if (req->err)
2763 return;
2764
71c76a17
JH
2765 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2766 if (!skb) {
5d73e034
AG
2767 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2768 hdev->name, opcode);
2769 req->err = -ENOMEM;
e348fe6b 2770 return;
71c76a17
JH
2771 }
2772
2773 if (skb_queue_empty(&req->cmd_q))
2774 bt_cb(skb)->req.start = true;
2775
02350a72
JH
2776 bt_cb(skb)->req.event = event;
2777
71c76a17 2778 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2779}
2780
07dc93dd
JH
2781void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2782 const void *param)
02350a72
JH
2783{
2784 hci_req_add_ev(req, opcode, plen, param, 0);
2785}
2786
1da177e4 2787/* Get data from the previously sent command */
a9de9248 2788void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2789{
2790 struct hci_command_hdr *hdr;
2791
2792 if (!hdev->sent_cmd)
2793 return NULL;
2794
2795 hdr = (void *) hdev->sent_cmd->data;
2796
a9de9248 2797 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2798 return NULL;
2799
f0e09510 2800 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2801
2802 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2803}
2804
2805/* Send ACL data */
2806static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2807{
2808 struct hci_acl_hdr *hdr;
2809 int len = skb->len;
2810
badff6d0
ACM
2811 skb_push(skb, HCI_ACL_HDR_SIZE);
2812 skb_reset_transport_header(skb);
9c70220b 2813 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2814 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2815 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2816}
2817
ee22be7e 2818static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2819 struct sk_buff *skb, __u16 flags)
1da177e4 2820{
ee22be7e 2821 struct hci_conn *conn = chan->conn;
1da177e4
LT
2822 struct hci_dev *hdev = conn->hdev;
2823 struct sk_buff *list;
2824
087bfd99
GP
2825 skb->len = skb_headlen(skb);
2826 skb->data_len = 0;
2827
2828 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2829
2830 switch (hdev->dev_type) {
2831 case HCI_BREDR:
2832 hci_add_acl_hdr(skb, conn->handle, flags);
2833 break;
2834 case HCI_AMP:
2835 hci_add_acl_hdr(skb, chan->handle, flags);
2836 break;
2837 default:
2838 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2839 return;
2840 }
087bfd99 2841
70f23020
AE
2842 list = skb_shinfo(skb)->frag_list;
2843 if (!list) {
1da177e4
LT
2844 /* Non fragmented */
2845 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2846
73d80deb 2847 skb_queue_tail(queue, skb);
1da177e4
LT
2848 } else {
2849 /* Fragmented */
2850 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2851
2852 skb_shinfo(skb)->frag_list = NULL;
2853
2854 /* Queue all fragments atomically */
af3e6359 2855 spin_lock(&queue->lock);
1da177e4 2856
73d80deb 2857 __skb_queue_tail(queue, skb);
e702112f
AE
2858
2859 flags &= ~ACL_START;
2860 flags |= ACL_CONT;
1da177e4
LT
2861 do {
2862 skb = list; list = list->next;
8e87d142 2863
0d48d939 2864 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2865 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2866
2867 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2868
73d80deb 2869 __skb_queue_tail(queue, skb);
1da177e4
LT
2870 } while (list);
2871
af3e6359 2872 spin_unlock(&queue->lock);
1da177e4 2873 }
73d80deb
LAD
2874}
2875
2876void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2877{
ee22be7e 2878 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2879
f0e09510 2880 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 2881
ee22be7e 2882 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2883
3eff45ea 2884 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2885}
1da177e4
LT
2886
2887/* Send SCO data */
0d861d8b 2888void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2889{
2890 struct hci_dev *hdev = conn->hdev;
2891 struct hci_sco_hdr hdr;
2892
2893 BT_DBG("%s len %d", hdev->name, skb->len);
2894
aca3192c 2895 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2896 hdr.dlen = skb->len;
2897
badff6d0
ACM
2898 skb_push(skb, HCI_SCO_HDR_SIZE);
2899 skb_reset_transport_header(skb);
9c70220b 2900 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 2901
0d48d939 2902 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2903
1da177e4 2904 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2905 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2906}
1da177e4
LT
2907
2908/* ---- HCI TX task (outgoing data) ---- */
2909
2910/* HCI Connection scheduler */
6039aa73
GP
2911static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2912 int *quote)
1da177e4
LT
2913{
2914 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2915 struct hci_conn *conn = NULL, *c;
abc5de8f 2916 unsigned int num = 0, min = ~0;
1da177e4 2917
8e87d142 2918 /* We don't have to lock device here. Connections are always
1da177e4 2919 * added and removed with TX task disabled. */
bf4c6325
GP
2920
2921 rcu_read_lock();
2922
2923 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2924 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2925 continue;
769be974
MH
2926
2927 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2928 continue;
2929
1da177e4
LT
2930 num++;
2931
2932 if (c->sent < min) {
2933 min = c->sent;
2934 conn = c;
2935 }
52087a79
LAD
2936
2937 if (hci_conn_num(hdev, type) == num)
2938 break;
1da177e4
LT
2939 }
2940
bf4c6325
GP
2941 rcu_read_unlock();
2942
1da177e4 2943 if (conn) {
6ed58ec5
VT
2944 int cnt, q;
2945
2946 switch (conn->type) {
2947 case ACL_LINK:
2948 cnt = hdev->acl_cnt;
2949 break;
2950 case SCO_LINK:
2951 case ESCO_LINK:
2952 cnt = hdev->sco_cnt;
2953 break;
2954 case LE_LINK:
2955 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2956 break;
2957 default:
2958 cnt = 0;
2959 BT_ERR("Unknown link type");
2960 }
2961
2962 q = cnt / num;
1da177e4
LT
2963 *quote = q ? q : 1;
2964 } else
2965 *quote = 0;
2966
2967 BT_DBG("conn %p quote %d", conn, *quote);
2968 return conn;
2969}
2970
6039aa73 2971static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2972{
2973 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2974 struct hci_conn *c;
1da177e4 2975
bae1f5d9 2976 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2977
bf4c6325
GP
2978 rcu_read_lock();
2979
1da177e4 2980 /* Kill stalled connections */
bf4c6325 2981 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2982 if (c->type == type && c->sent) {
6ed93dc6
AE
2983 BT_ERR("%s killing stalled connection %pMR",
2984 hdev->name, &c->dst);
bed71748 2985 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2986 }
2987 }
bf4c6325
GP
2988
2989 rcu_read_unlock();
1da177e4
LT
2990}
2991
6039aa73
GP
2992static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2993 int *quote)
1da177e4 2994{
73d80deb
LAD
2995 struct hci_conn_hash *h = &hdev->conn_hash;
2996 struct hci_chan *chan = NULL;
abc5de8f 2997 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2998 struct hci_conn *conn;
73d80deb
LAD
2999 int cnt, q, conn_num = 0;
3000
3001 BT_DBG("%s", hdev->name);
3002
bf4c6325
GP
3003 rcu_read_lock();
3004
3005 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3006 struct hci_chan *tmp;
3007
3008 if (conn->type != type)
3009 continue;
3010
3011 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3012 continue;
3013
3014 conn_num++;
3015
8192edef 3016 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3017 struct sk_buff *skb;
3018
3019 if (skb_queue_empty(&tmp->data_q))
3020 continue;
3021
3022 skb = skb_peek(&tmp->data_q);
3023 if (skb->priority < cur_prio)
3024 continue;
3025
3026 if (skb->priority > cur_prio) {
3027 num = 0;
3028 min = ~0;
3029 cur_prio = skb->priority;
3030 }
3031
3032 num++;
3033
3034 if (conn->sent < min) {
3035 min = conn->sent;
3036 chan = tmp;
3037 }
3038 }
3039
3040 if (hci_conn_num(hdev, type) == conn_num)
3041 break;
3042 }
3043
bf4c6325
GP
3044 rcu_read_unlock();
3045
73d80deb
LAD
3046 if (!chan)
3047 return NULL;
3048
3049 switch (chan->conn->type) {
3050 case ACL_LINK:
3051 cnt = hdev->acl_cnt;
3052 break;
bd1eb66b
AE
3053 case AMP_LINK:
3054 cnt = hdev->block_cnt;
3055 break;
73d80deb
LAD
3056 case SCO_LINK:
3057 case ESCO_LINK:
3058 cnt = hdev->sco_cnt;
3059 break;
3060 case LE_LINK:
3061 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3062 break;
3063 default:
3064 cnt = 0;
3065 BT_ERR("Unknown link type");
3066 }
3067
3068 q = cnt / num;
3069 *quote = q ? q : 1;
3070 BT_DBG("chan %p quote %d", chan, *quote);
3071 return chan;
3072}
3073
02b20f0b
LAD
3074static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3075{
3076 struct hci_conn_hash *h = &hdev->conn_hash;
3077 struct hci_conn *conn;
3078 int num = 0;
3079
3080 BT_DBG("%s", hdev->name);
3081
bf4c6325
GP
3082 rcu_read_lock();
3083
3084 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3085 struct hci_chan *chan;
3086
3087 if (conn->type != type)
3088 continue;
3089
3090 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3091 continue;
3092
3093 num++;
3094
8192edef 3095 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3096 struct sk_buff *skb;
3097
3098 if (chan->sent) {
3099 chan->sent = 0;
3100 continue;
3101 }
3102
3103 if (skb_queue_empty(&chan->data_q))
3104 continue;
3105
3106 skb = skb_peek(&chan->data_q);
3107 if (skb->priority >= HCI_PRIO_MAX - 1)
3108 continue;
3109
3110 skb->priority = HCI_PRIO_MAX - 1;
3111
3112 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3113 skb->priority);
02b20f0b
LAD
3114 }
3115
3116 if (hci_conn_num(hdev, type) == num)
3117 break;
3118 }
bf4c6325
GP
3119
3120 rcu_read_unlock();
3121
02b20f0b
LAD
3122}
3123
b71d385a
AE
3124static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3125{
3126 /* Calculate count of blocks used by this packet */
3127 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3128}
3129
6039aa73 3130static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3131{
1da177e4
LT
3132 if (!test_bit(HCI_RAW, &hdev->flags)) {
3133 /* ACL tx timeout must be longer than maximum
3134 * link supervision timeout (40.9 seconds) */
63d2bc1b 3135 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3136 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3137 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3138 }
63d2bc1b 3139}
1da177e4 3140
6039aa73 3141static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3142{
3143 unsigned int cnt = hdev->acl_cnt;
3144 struct hci_chan *chan;
3145 struct sk_buff *skb;
3146 int quote;
3147
3148 __check_timeout(hdev, cnt);
04837f64 3149
73d80deb 3150 while (hdev->acl_cnt &&
a8c5fb1a 3151 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3152 u32 priority = (skb_peek(&chan->data_q))->priority;
3153 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3154 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3155 skb->len, skb->priority);
73d80deb 3156
ec1cce24
LAD
3157 /* Stop if priority has changed */
3158 if (skb->priority < priority)
3159 break;
3160
3161 skb = skb_dequeue(&chan->data_q);
3162
73d80deb 3163 hci_conn_enter_active_mode(chan->conn,
04124681 3164 bt_cb(skb)->force_active);
04837f64 3165
57d17d70 3166 hci_send_frame(hdev, skb);
1da177e4
LT
3167 hdev->acl_last_tx = jiffies;
3168
3169 hdev->acl_cnt--;
73d80deb
LAD
3170 chan->sent++;
3171 chan->conn->sent++;
1da177e4
LT
3172 }
3173 }
02b20f0b
LAD
3174
3175 if (cnt != hdev->acl_cnt)
3176 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3177}
3178
6039aa73 3179static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3180{
63d2bc1b 3181 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3182 struct hci_chan *chan;
3183 struct sk_buff *skb;
3184 int quote;
bd1eb66b 3185 u8 type;
b71d385a 3186
63d2bc1b 3187 __check_timeout(hdev, cnt);
b71d385a 3188
bd1eb66b
AE
3189 BT_DBG("%s", hdev->name);
3190
3191 if (hdev->dev_type == HCI_AMP)
3192 type = AMP_LINK;
3193 else
3194 type = ACL_LINK;
3195
b71d385a 3196 while (hdev->block_cnt > 0 &&
bd1eb66b 3197 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3198 u32 priority = (skb_peek(&chan->data_q))->priority;
3199 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3200 int blocks;
3201
3202 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3203 skb->len, skb->priority);
b71d385a
AE
3204
3205 /* Stop if priority has changed */
3206 if (skb->priority < priority)
3207 break;
3208
3209 skb = skb_dequeue(&chan->data_q);
3210
3211 blocks = __get_blocks(hdev, skb);
3212 if (blocks > hdev->block_cnt)
3213 return;
3214
3215 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3216 bt_cb(skb)->force_active);
b71d385a 3217
57d17d70 3218 hci_send_frame(hdev, skb);
b71d385a
AE
3219 hdev->acl_last_tx = jiffies;
3220
3221 hdev->block_cnt -= blocks;
3222 quote -= blocks;
3223
3224 chan->sent += blocks;
3225 chan->conn->sent += blocks;
3226 }
3227 }
3228
3229 if (cnt != hdev->block_cnt)
bd1eb66b 3230 hci_prio_recalculate(hdev, type);
b71d385a
AE
3231}
3232
6039aa73 3233static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3234{
3235 BT_DBG("%s", hdev->name);
3236
bd1eb66b
AE
3237 /* No ACL link over BR/EDR controller */
3238 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3239 return;
3240
3241 /* No AMP link over AMP controller */
3242 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3243 return;
3244
3245 switch (hdev->flow_ctl_mode) {
3246 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3247 hci_sched_acl_pkt(hdev);
3248 break;
3249
3250 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3251 hci_sched_acl_blk(hdev);
3252 break;
3253 }
3254}
3255
1da177e4 3256/* Schedule SCO */
6039aa73 3257static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3258{
3259 struct hci_conn *conn;
3260 struct sk_buff *skb;
3261 int quote;
3262
3263 BT_DBG("%s", hdev->name);
3264
52087a79
LAD
3265 if (!hci_conn_num(hdev, SCO_LINK))
3266 return;
3267
1da177e4
LT
3268 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3269 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3270 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3271 hci_send_frame(hdev, skb);
1da177e4
LT
3272
3273 conn->sent++;
3274 if (conn->sent == ~0)
3275 conn->sent = 0;
3276 }
3277 }
3278}
3279
6039aa73 3280static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3281{
3282 struct hci_conn *conn;
3283 struct sk_buff *skb;
3284 int quote;
3285
3286 BT_DBG("%s", hdev->name);
3287
52087a79
LAD
3288 if (!hci_conn_num(hdev, ESCO_LINK))
3289 return;
3290
8fc9ced3
GP
3291 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3292 &quote))) {
b6a0dc82
MH
3293 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3294 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3295 hci_send_frame(hdev, skb);
b6a0dc82
MH
3296
3297 conn->sent++;
3298 if (conn->sent == ~0)
3299 conn->sent = 0;
3300 }
3301 }
3302}
3303
6039aa73 3304static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3305{
73d80deb 3306 struct hci_chan *chan;
6ed58ec5 3307 struct sk_buff *skb;
02b20f0b 3308 int quote, cnt, tmp;
6ed58ec5
VT
3309
3310 BT_DBG("%s", hdev->name);
3311
52087a79
LAD
3312 if (!hci_conn_num(hdev, LE_LINK))
3313 return;
3314
6ed58ec5
VT
3315 if (!test_bit(HCI_RAW, &hdev->flags)) {
3316 /* LE tx timeout must be longer than maximum
3317 * link supervision timeout (40.9 seconds) */
bae1f5d9 3318 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3319 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3320 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3321 }
3322
3323 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3324 tmp = cnt;
73d80deb 3325 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3326 u32 priority = (skb_peek(&chan->data_q))->priority;
3327 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3328 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3329 skb->len, skb->priority);
6ed58ec5 3330
ec1cce24
LAD
3331 /* Stop if priority has changed */
3332 if (skb->priority < priority)
3333 break;
3334
3335 skb = skb_dequeue(&chan->data_q);
3336
57d17d70 3337 hci_send_frame(hdev, skb);
6ed58ec5
VT
3338 hdev->le_last_tx = jiffies;
3339
3340 cnt--;
73d80deb
LAD
3341 chan->sent++;
3342 chan->conn->sent++;
6ed58ec5
VT
3343 }
3344 }
73d80deb 3345
6ed58ec5
VT
3346 if (hdev->le_pkts)
3347 hdev->le_cnt = cnt;
3348 else
3349 hdev->acl_cnt = cnt;
02b20f0b
LAD
3350
3351 if (cnt != tmp)
3352 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3353}
3354
3eff45ea 3355static void hci_tx_work(struct work_struct *work)
1da177e4 3356{
3eff45ea 3357 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3358 struct sk_buff *skb;
3359
6ed58ec5 3360 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3361 hdev->sco_cnt, hdev->le_cnt);
1da177e4 3362
52de599e
MH
3363 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3364 /* Schedule queues and send stuff to HCI driver */
3365 hci_sched_acl(hdev);
3366 hci_sched_sco(hdev);
3367 hci_sched_esco(hdev);
3368 hci_sched_le(hdev);
3369 }
6ed58ec5 3370
1da177e4
LT
3371 /* Send next queued raw (unknown type) packet */
3372 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 3373 hci_send_frame(hdev, skb);
1da177e4
LT
3374}
3375
25985edc 3376/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3377
3378/* ACL data packet */
6039aa73 3379static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3380{
3381 struct hci_acl_hdr *hdr = (void *) skb->data;
3382 struct hci_conn *conn;
3383 __u16 handle, flags;
3384
3385 skb_pull(skb, HCI_ACL_HDR_SIZE);
3386
3387 handle = __le16_to_cpu(hdr->handle);
3388 flags = hci_flags(handle);
3389 handle = hci_handle(handle);
3390
f0e09510 3391 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3392 handle, flags);
1da177e4
LT
3393
3394 hdev->stat.acl_rx++;
3395
3396 hci_dev_lock(hdev);
3397 conn = hci_conn_hash_lookup_handle(hdev, handle);
3398 hci_dev_unlock(hdev);
8e87d142 3399
1da177e4 3400 if (conn) {
65983fc7 3401 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3402
1da177e4 3403 /* Send to upper protocol */
686ebf28
UF
3404 l2cap_recv_acldata(conn, skb, flags);
3405 return;
1da177e4 3406 } else {
8e87d142 3407 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3408 hdev->name, handle);
1da177e4
LT
3409 }
3410
3411 kfree_skb(skb);
3412}
3413
3414/* SCO data packet */
6039aa73 3415static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3416{
3417 struct hci_sco_hdr *hdr = (void *) skb->data;
3418 struct hci_conn *conn;
3419 __u16 handle;
3420
3421 skb_pull(skb, HCI_SCO_HDR_SIZE);
3422
3423 handle = __le16_to_cpu(hdr->handle);
3424
f0e09510 3425 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3426
3427 hdev->stat.sco_rx++;
3428
3429 hci_dev_lock(hdev);
3430 conn = hci_conn_hash_lookup_handle(hdev, handle);
3431 hci_dev_unlock(hdev);
3432
3433 if (conn) {
1da177e4 3434 /* Send to upper protocol */
686ebf28
UF
3435 sco_recv_scodata(conn, skb);
3436 return;
1da177e4 3437 } else {
8e87d142 3438 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3439 hdev->name, handle);
1da177e4
LT
3440 }
3441
3442 kfree_skb(skb);
3443}
3444
9238f36a
JH
3445static bool hci_req_is_complete(struct hci_dev *hdev)
3446{
3447 struct sk_buff *skb;
3448
3449 skb = skb_peek(&hdev->cmd_q);
3450 if (!skb)
3451 return true;
3452
3453 return bt_cb(skb)->req.start;
3454}
3455
42c6b129
JH
3456static void hci_resend_last(struct hci_dev *hdev)
3457{
3458 struct hci_command_hdr *sent;
3459 struct sk_buff *skb;
3460 u16 opcode;
3461
3462 if (!hdev->sent_cmd)
3463 return;
3464
3465 sent = (void *) hdev->sent_cmd->data;
3466 opcode = __le16_to_cpu(sent->opcode);
3467 if (opcode == HCI_OP_RESET)
3468 return;
3469
3470 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3471 if (!skb)
3472 return;
3473
3474 skb_queue_head(&hdev->cmd_q, skb);
3475 queue_work(hdev->workqueue, &hdev->cmd_work);
3476}
3477
9238f36a
JH
3478void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3479{
3480 hci_req_complete_t req_complete = NULL;
3481 struct sk_buff *skb;
3482 unsigned long flags;
3483
3484 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3485
42c6b129
JH
3486 /* If the completed command doesn't match the last one that was
3487 * sent we need to do special handling of it.
9238f36a 3488 */
42c6b129
JH
3489 if (!hci_sent_cmd_data(hdev, opcode)) {
3490 /* Some CSR based controllers generate a spontaneous
3491 * reset complete event during init and any pending
3492 * command will never be completed. In such a case we
3493 * need to resend whatever was the last sent
3494 * command.
3495 */
3496 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3497 hci_resend_last(hdev);
3498
9238f36a 3499 return;
42c6b129 3500 }
9238f36a
JH
3501
3502 /* If the command succeeded and there's still more commands in
3503 * this request the request is not yet complete.
3504 */
3505 if (!status && !hci_req_is_complete(hdev))
3506 return;
3507
3508 /* If this was the last command in a request the complete
3509 * callback would be found in hdev->sent_cmd instead of the
3510 * command queue (hdev->cmd_q).
3511 */
3512 if (hdev->sent_cmd) {
3513 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
3514
3515 if (req_complete) {
3516 /* We must set the complete callback to NULL to
3517 * avoid calling the callback more than once if
3518 * this function gets called again.
3519 */
3520 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3521
9238f36a 3522 goto call_complete;
53e21fbc 3523 }
9238f36a
JH
3524 }
3525
3526 /* Remove all pending commands belonging to this request */
3527 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3528 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3529 if (bt_cb(skb)->req.start) {
3530 __skb_queue_head(&hdev->cmd_q, skb);
3531 break;
3532 }
3533
3534 req_complete = bt_cb(skb)->req.complete;
3535 kfree_skb(skb);
3536 }
3537 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3538
3539call_complete:
3540 if (req_complete)
3541 req_complete(hdev, status);
3542}
3543
b78752cc 3544static void hci_rx_work(struct work_struct *work)
1da177e4 3545{
b78752cc 3546 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3547 struct sk_buff *skb;
3548
3549 BT_DBG("%s", hdev->name);
3550
1da177e4 3551 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3552 /* Send copy to monitor */
3553 hci_send_to_monitor(hdev, skb);
3554
1da177e4
LT
3555 if (atomic_read(&hdev->promisc)) {
3556 /* Send copy to the sockets */
470fe1b5 3557 hci_send_to_sock(hdev, skb);
1da177e4
LT
3558 }
3559
0736cfa8
MH
3560 if (test_bit(HCI_RAW, &hdev->flags) ||
3561 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
3562 kfree_skb(skb);
3563 continue;
3564 }
3565
3566 if (test_bit(HCI_INIT, &hdev->flags)) {
3567 /* Don't process data packets in this states. */
0d48d939 3568 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3569 case HCI_ACLDATA_PKT:
3570 case HCI_SCODATA_PKT:
3571 kfree_skb(skb);
3572 continue;
3ff50b79 3573 }
1da177e4
LT
3574 }
3575
3576 /* Process frame */
0d48d939 3577 switch (bt_cb(skb)->pkt_type) {
1da177e4 3578 case HCI_EVENT_PKT:
b78752cc 3579 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3580 hci_event_packet(hdev, skb);
3581 break;
3582
3583 case HCI_ACLDATA_PKT:
3584 BT_DBG("%s ACL data packet", hdev->name);
3585 hci_acldata_packet(hdev, skb);
3586 break;
3587
3588 case HCI_SCODATA_PKT:
3589 BT_DBG("%s SCO data packet", hdev->name);
3590 hci_scodata_packet(hdev, skb);
3591 break;
3592
3593 default:
3594 kfree_skb(skb);
3595 break;
3596 }
3597 }
1da177e4
LT
3598}
3599
c347b765 3600static void hci_cmd_work(struct work_struct *work)
1da177e4 3601{
c347b765 3602 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3603 struct sk_buff *skb;
3604
2104786b
AE
3605 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3606 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3607
1da177e4 3608 /* Send queued commands */
5a08ecce
AE
3609 if (atomic_read(&hdev->cmd_cnt)) {
3610 skb = skb_dequeue(&hdev->cmd_q);
3611 if (!skb)
3612 return;
3613
7585b97a 3614 kfree_skb(hdev->sent_cmd);
1da177e4 3615
a675d7f1 3616 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 3617 if (hdev->sent_cmd) {
1da177e4 3618 atomic_dec(&hdev->cmd_cnt);
57d17d70 3619 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
3620 if (test_bit(HCI_RESET, &hdev->flags))
3621 del_timer(&hdev->cmd_timer);
3622 else
3623 mod_timer(&hdev->cmd_timer,
5f246e89 3624 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3625 } else {
3626 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3627 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3628 }
3629 }
3630}