]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Read supported features and commands on AMP controllers
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
77a63e0a
FW
82static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
75e84b7c
JH
84{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
7b1abbbe
JH
107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
75e84b7c
JH
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
7b1abbbe 137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 138 const void *param, u8 event, u32 timeout)
75e84b7c
JH
139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
7b1abbbe 148 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
7b1abbbe
JH
187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 192 const void *param, u32 timeout)
7b1abbbe
JH
193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
1da177e4 198/* Execute request and wait for completion. */
01178cd4 199static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
200 void (*func)(struct hci_request *req,
201 unsigned long opt),
01178cd4 202 unsigned long opt, __u32 timeout)
1da177e4 203{
42c6b129 204 struct hci_request req;
1da177e4
LT
205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
42c6b129
JH
210 hci_req_init(&req, hdev);
211
1da177e4
LT
212 hdev->req_status = HCI_REQ_PEND;
213
42c6b129 214 func(&req, opt);
53cce22d 215
42c6b129
JH
216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
53cce22d 218 hdev->req_status = 0;
920c8300
AG
219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
42c6b129 224 */
920c8300
AG
225 if (err == -ENODATA)
226 return 0;
227
228 return err;
53cce22d
JH
229 }
230
bc4445c7
AG
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
1da177e4
LT
234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
e175072f 243 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
3ff50b79 253 }
1da177e4 254
a5040efa 255 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
01178cd4 262static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
263 void (*req)(struct hci_request *req,
264 unsigned long opt),
01178cd4 265 unsigned long opt, __u32 timeout)
1da177e4
LT
266{
267 int ret;
268
7c6a329e
MH
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
1da177e4
LT
272 /* Serialize all requests */
273 hci_req_lock(hdev);
01178cd4 274 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
42c6b129 280static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 281{
42c6b129 282 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
283
284 /* Reset device */
42c6b129
JH
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
287}
288
42c6b129 289static void bredr_init(struct hci_request *req)
1da177e4 290{
42c6b129 291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 292
1da177e4 293 /* Read Local Supported Features */
42c6b129 294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 295
1143e5a6 296 /* Read Local Version */
42c6b129 297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
298
299 /* Read BD Address */
42c6b129 300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
301}
302
42c6b129 303static void amp_init(struct hci_request *req)
e61ef499 304{
42c6b129 305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 306
e61ef499 307 /* Read Local Version */
42c6b129 308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 309
f6996cfe
MH
310 /* Read Local Supported Commands */
311 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
312
313 /* Read Local Supported Features */
314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
315
6bcbc489 316 /* Read Local AMP Info */
42c6b129 317 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
318
319 /* Read Data Blk size */
42c6b129 320 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
321}
322
42c6b129 323static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 324{
42c6b129 325 struct hci_dev *hdev = req->hdev;
e61ef499
AE
326
327 BT_DBG("%s %ld", hdev->name, opt);
328
11778716
AE
329 /* Reset */
330 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 331 hci_reset_req(req, 0);
11778716 332
e61ef499
AE
333 switch (hdev->dev_type) {
334 case HCI_BREDR:
42c6b129 335 bredr_init(req);
e61ef499
AE
336 break;
337
338 case HCI_AMP:
42c6b129 339 amp_init(req);
e61ef499
AE
340 break;
341
342 default:
343 BT_ERR("Unknown device type %d", hdev->dev_type);
344 break;
345 }
e61ef499
AE
346}
347
42c6b129 348static void bredr_setup(struct hci_request *req)
2177bab5 349{
2177bab5
JH
350 __le16 param;
351 __u8 flt_type;
352
353 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 354 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
355
356 /* Read Class of Device */
42c6b129 357 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
358
359 /* Read Local Name */
42c6b129 360 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
361
362 /* Read Voice Setting */
42c6b129 363 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
364
365 /* Clear Event Filters */
366 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 367 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
368
369 /* Connection accept timeout ~20 secs */
370 param = __constant_cpu_to_le16(0x7d00);
42c6b129 371 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 372
f332ec66
JH
373 /* Read page scan parameters */
374 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
375 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
376 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
377 }
2177bab5
JH
378}
379
42c6b129 380static void le_setup(struct hci_request *req)
2177bab5 381{
c73eee91
JH
382 struct hci_dev *hdev = req->hdev;
383
2177bab5 384 /* Read LE Buffer Size */
42c6b129 385 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
386
387 /* Read LE Local Supported Features */
42c6b129 388 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
389
390 /* Read LE Advertising Channel TX Power */
42c6b129 391 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
392
393 /* Read LE White List Size */
42c6b129 394 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
395
396 /* Read LE Supported States */
42c6b129 397 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
398
399 /* LE-only controllers have LE implicitly enabled */
400 if (!lmp_bredr_capable(hdev))
401 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
402}
403
404static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
405{
406 if (lmp_ext_inq_capable(hdev))
407 return 0x02;
408
409 if (lmp_inq_rssi_capable(hdev))
410 return 0x01;
411
412 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
413 hdev->lmp_subver == 0x0757)
414 return 0x01;
415
416 if (hdev->manufacturer == 15) {
417 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
418 return 0x01;
419 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
420 return 0x01;
421 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
422 return 0x01;
423 }
424
425 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
426 hdev->lmp_subver == 0x1805)
427 return 0x01;
428
429 return 0x00;
430}
431
42c6b129 432static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
433{
434 u8 mode;
435
42c6b129 436 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 437
42c6b129 438 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
439}
440
42c6b129 441static void hci_setup_event_mask(struct hci_request *req)
2177bab5 442{
42c6b129
JH
443 struct hci_dev *hdev = req->hdev;
444
2177bab5
JH
445 /* The second byte is 0xff instead of 0x9f (two reserved bits
446 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
447 * command otherwise.
448 */
449 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
450
451 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
452 * any event mask for pre 1.2 devices.
453 */
454 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
455 return;
456
457 if (lmp_bredr_capable(hdev)) {
458 events[4] |= 0x01; /* Flow Specification Complete */
459 events[4] |= 0x02; /* Inquiry Result with RSSI */
460 events[4] |= 0x04; /* Read Remote Extended Features Complete */
461 events[5] |= 0x08; /* Synchronous Connection Complete */
462 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
463 } else {
464 /* Use a different default for LE-only devices */
465 memset(events, 0, sizeof(events));
466 events[0] |= 0x10; /* Disconnection Complete */
467 events[0] |= 0x80; /* Encryption Change */
468 events[1] |= 0x08; /* Read Remote Version Information Complete */
469 events[1] |= 0x20; /* Command Complete */
470 events[1] |= 0x40; /* Command Status */
471 events[1] |= 0x80; /* Hardware Error */
472 events[2] |= 0x04; /* Number of Completed Packets */
473 events[3] |= 0x02; /* Data Buffer Overflow */
474 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
475 }
476
477 if (lmp_inq_rssi_capable(hdev))
478 events[4] |= 0x02; /* Inquiry Result with RSSI */
479
480 if (lmp_sniffsubr_capable(hdev))
481 events[5] |= 0x20; /* Sniff Subrating */
482
483 if (lmp_pause_enc_capable(hdev))
484 events[5] |= 0x80; /* Encryption Key Refresh Complete */
485
486 if (lmp_ext_inq_capable(hdev))
487 events[5] |= 0x40; /* Extended Inquiry Result */
488
489 if (lmp_no_flush_capable(hdev))
490 events[7] |= 0x01; /* Enhanced Flush Complete */
491
492 if (lmp_lsto_capable(hdev))
493 events[6] |= 0x80; /* Link Supervision Timeout Changed */
494
495 if (lmp_ssp_capable(hdev)) {
496 events[6] |= 0x01; /* IO Capability Request */
497 events[6] |= 0x02; /* IO Capability Response */
498 events[6] |= 0x04; /* User Confirmation Request */
499 events[6] |= 0x08; /* User Passkey Request */
500 events[6] |= 0x10; /* Remote OOB Data Request */
501 events[6] |= 0x20; /* Simple Pairing Complete */
502 events[7] |= 0x04; /* User Passkey Notification */
503 events[7] |= 0x08; /* Keypress Notification */
504 events[7] |= 0x10; /* Remote Host Supported
505 * Features Notification
506 */
507 }
508
509 if (lmp_le_capable(hdev))
510 events[7] |= 0x20; /* LE Meta-Event */
511
42c6b129 512 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
513
514 if (lmp_le_capable(hdev)) {
515 memset(events, 0, sizeof(events));
516 events[0] = 0x1f;
42c6b129
JH
517 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
518 sizeof(events), events);
2177bab5
JH
519 }
520}
521
42c6b129 522static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 523{
42c6b129
JH
524 struct hci_dev *hdev = req->hdev;
525
2177bab5 526 if (lmp_bredr_capable(hdev))
42c6b129 527 bredr_setup(req);
56f87901
JH
528 else
529 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
530
531 if (lmp_le_capable(hdev))
42c6b129 532 le_setup(req);
2177bab5 533
42c6b129 534 hci_setup_event_mask(req);
2177bab5 535
3f8e2d75
JH
536 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
537 * local supported commands HCI command.
538 */
539 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 540 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
541
542 if (lmp_ssp_capable(hdev)) {
543 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
544 u8 mode = 0x01;
42c6b129
JH
545 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
546 sizeof(mode), &mode);
2177bab5
JH
547 } else {
548 struct hci_cp_write_eir cp;
549
550 memset(hdev->eir, 0, sizeof(hdev->eir));
551 memset(&cp, 0, sizeof(cp));
552
42c6b129 553 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
554 }
555 }
556
557 if (lmp_inq_rssi_capable(hdev))
42c6b129 558 hci_setup_inquiry_mode(req);
2177bab5
JH
559
560 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 561 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
562
563 if (lmp_ext_feat_capable(hdev)) {
564 struct hci_cp_read_local_ext_features cp;
565
566 cp.page = 0x01;
42c6b129
JH
567 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
568 sizeof(cp), &cp);
2177bab5
JH
569 }
570
571 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
572 u8 enable = 1;
42c6b129
JH
573 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
574 &enable);
2177bab5
JH
575 }
576}
577
42c6b129 578static void hci_setup_link_policy(struct hci_request *req)
2177bab5 579{
42c6b129 580 struct hci_dev *hdev = req->hdev;
2177bab5
JH
581 struct hci_cp_write_def_link_policy cp;
582 u16 link_policy = 0;
583
584 if (lmp_rswitch_capable(hdev))
585 link_policy |= HCI_LP_RSWITCH;
586 if (lmp_hold_capable(hdev))
587 link_policy |= HCI_LP_HOLD;
588 if (lmp_sniff_capable(hdev))
589 link_policy |= HCI_LP_SNIFF;
590 if (lmp_park_capable(hdev))
591 link_policy |= HCI_LP_PARK;
592
593 cp.policy = cpu_to_le16(link_policy);
42c6b129 594 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
595}
596
42c6b129 597static void hci_set_le_support(struct hci_request *req)
2177bab5 598{
42c6b129 599 struct hci_dev *hdev = req->hdev;
2177bab5
JH
600 struct hci_cp_write_le_host_supported cp;
601
c73eee91
JH
602 /* LE-only devices do not support explicit enablement */
603 if (!lmp_bredr_capable(hdev))
604 return;
605
2177bab5
JH
606 memset(&cp, 0, sizeof(cp));
607
608 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
609 cp.le = 0x01;
610 cp.simul = lmp_le_br_capable(hdev);
611 }
612
613 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
614 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
615 &cp);
2177bab5
JH
616}
617
d62e6d67
JH
618static void hci_set_event_mask_page_2(struct hci_request *req)
619{
620 struct hci_dev *hdev = req->hdev;
621 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
622
623 /* If Connectionless Slave Broadcast master role is supported
624 * enable all necessary events for it.
625 */
626 if (hdev->features[2][0] & 0x01) {
627 events[1] |= 0x40; /* Triggered Clock Capture */
628 events[1] |= 0x80; /* Synchronization Train Complete */
629 events[2] |= 0x10; /* Slave Page Response Timeout */
630 events[2] |= 0x20; /* CSB Channel Map Change */
631 }
632
633 /* If Connectionless Slave Broadcast slave role is supported
634 * enable all necessary events for it.
635 */
636 if (hdev->features[2][0] & 0x02) {
637 events[2] |= 0x01; /* Synchronization Train Received */
638 events[2] |= 0x02; /* CSB Receive */
639 events[2] |= 0x04; /* CSB Timeout */
640 events[2] |= 0x08; /* Truncated Page Complete */
641 }
642
643 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
644}
645
42c6b129 646static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 647{
42c6b129 648 struct hci_dev *hdev = req->hdev;
d2c5d77f 649 u8 p;
42c6b129 650
b8f4e068
GP
651 /* Some Broadcom based Bluetooth controllers do not support the
652 * Delete Stored Link Key command. They are clearly indicating its
653 * absence in the bit mask of supported commands.
654 *
655 * Check the supported commands and only if the the command is marked
656 * as supported send it. If not supported assume that the controller
657 * does not have actual support for stored link keys which makes this
658 * command redundant anyway.
637b4cae 659 */
59f45d57
JH
660 if (hdev->commands[6] & 0x80) {
661 struct hci_cp_delete_stored_link_key cp;
662
663 bacpy(&cp.bdaddr, BDADDR_ANY);
664 cp.delete_all = 0x01;
665 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
666 sizeof(cp), &cp);
667 }
668
2177bab5 669 if (hdev->commands[5] & 0x10)
42c6b129 670 hci_setup_link_policy(req);
2177bab5 671
04b4edcb 672 if (lmp_le_capable(hdev)) {
42c6b129 673 hci_set_le_support(req);
04b4edcb
JH
674 hci_update_ad(req);
675 }
d2c5d77f
JH
676
677 /* Read features beyond page 1 if available */
678 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
679 struct hci_cp_read_local_ext_features cp;
680
681 cp.page = p;
682 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
683 sizeof(cp), &cp);
684 }
2177bab5
JH
685}
686
5d4e7e8d
JH
687static void hci_init4_req(struct hci_request *req, unsigned long opt)
688{
689 struct hci_dev *hdev = req->hdev;
690
d62e6d67
JH
691 /* Set event mask page 2 if the HCI command for it is supported */
692 if (hdev->commands[22] & 0x04)
693 hci_set_event_mask_page_2(req);
694
5d4e7e8d
JH
695 /* Check for Synchronization Train support */
696 if (hdev->features[2][0] & 0x04)
697 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
698}
699
2177bab5
JH
700static int __hci_init(struct hci_dev *hdev)
701{
702 int err;
703
704 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
705 if (err < 0)
706 return err;
707
708 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
709 * BR/EDR/LE type controllers. AMP controllers only need the
710 * first stage init.
711 */
712 if (hdev->dev_type != HCI_BREDR)
713 return 0;
714
715 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
716 if (err < 0)
717 return err;
718
5d4e7e8d
JH
719 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
720 if (err < 0)
721 return err;
722
723 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
2177bab5
JH
724}
725
42c6b129 726static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
727{
728 __u8 scan = opt;
729
42c6b129 730 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
731
732 /* Inquiry and Page scans */
42c6b129 733 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
734}
735
42c6b129 736static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
737{
738 __u8 auth = opt;
739
42c6b129 740 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
741
742 /* Authentication */
42c6b129 743 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
744}
745
42c6b129 746static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
747{
748 __u8 encrypt = opt;
749
42c6b129 750 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 751
e4e8e37c 752 /* Encryption */
42c6b129 753 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
754}
755
42c6b129 756static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
757{
758 __le16 policy = cpu_to_le16(opt);
759
42c6b129 760 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
761
762 /* Default link policy */
42c6b129 763 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
764}
765
8e87d142 766/* Get HCI device by index.
1da177e4
LT
767 * Device is held on return. */
768struct hci_dev *hci_dev_get(int index)
769{
8035ded4 770 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
771
772 BT_DBG("%d", index);
773
774 if (index < 0)
775 return NULL;
776
777 read_lock(&hci_dev_list_lock);
8035ded4 778 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
779 if (d->id == index) {
780 hdev = hci_dev_hold(d);
781 break;
782 }
783 }
784 read_unlock(&hci_dev_list_lock);
785 return hdev;
786}
1da177e4
LT
787
788/* ---- Inquiry support ---- */
ff9ef578 789
30dc78e1
JH
790bool hci_discovery_active(struct hci_dev *hdev)
791{
792 struct discovery_state *discov = &hdev->discovery;
793
6fbe195d 794 switch (discov->state) {
343f935b 795 case DISCOVERY_FINDING:
6fbe195d 796 case DISCOVERY_RESOLVING:
30dc78e1
JH
797 return true;
798
6fbe195d
AG
799 default:
800 return false;
801 }
30dc78e1
JH
802}
803
ff9ef578
JH
804void hci_discovery_set_state(struct hci_dev *hdev, int state)
805{
806 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
807
808 if (hdev->discovery.state == state)
809 return;
810
811 switch (state) {
812 case DISCOVERY_STOPPED:
7b99b659
AG
813 if (hdev->discovery.state != DISCOVERY_STARTING)
814 mgmt_discovering(hdev, 0);
ff9ef578
JH
815 break;
816 case DISCOVERY_STARTING:
817 break;
343f935b 818 case DISCOVERY_FINDING:
ff9ef578
JH
819 mgmt_discovering(hdev, 1);
820 break;
30dc78e1
JH
821 case DISCOVERY_RESOLVING:
822 break;
ff9ef578
JH
823 case DISCOVERY_STOPPING:
824 break;
825 }
826
827 hdev->discovery.state = state;
828}
829
1f9b9a5d 830void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 831{
30883512 832 struct discovery_state *cache = &hdev->discovery;
b57c1a56 833 struct inquiry_entry *p, *n;
1da177e4 834
561aafbc
JH
835 list_for_each_entry_safe(p, n, &cache->all, all) {
836 list_del(&p->all);
b57c1a56 837 kfree(p);
1da177e4 838 }
561aafbc
JH
839
840 INIT_LIST_HEAD(&cache->unknown);
841 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
842}
843
a8c5fb1a
GP
844struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
845 bdaddr_t *bdaddr)
1da177e4 846{
30883512 847 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
848 struct inquiry_entry *e;
849
6ed93dc6 850 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 851
561aafbc
JH
852 list_for_each_entry(e, &cache->all, all) {
853 if (!bacmp(&e->data.bdaddr, bdaddr))
854 return e;
855 }
856
857 return NULL;
858}
859
860struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 861 bdaddr_t *bdaddr)
561aafbc 862{
30883512 863 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
864 struct inquiry_entry *e;
865
6ed93dc6 866 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
867
868 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 869 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
870 return e;
871 }
872
873 return NULL;
1da177e4
LT
874}
875
30dc78e1 876struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
877 bdaddr_t *bdaddr,
878 int state)
30dc78e1
JH
879{
880 struct discovery_state *cache = &hdev->discovery;
881 struct inquiry_entry *e;
882
6ed93dc6 883 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
884
885 list_for_each_entry(e, &cache->resolve, list) {
886 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
887 return e;
888 if (!bacmp(&e->data.bdaddr, bdaddr))
889 return e;
890 }
891
892 return NULL;
893}
894
a3d4e20a 895void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 896 struct inquiry_entry *ie)
a3d4e20a
JH
897{
898 struct discovery_state *cache = &hdev->discovery;
899 struct list_head *pos = &cache->resolve;
900 struct inquiry_entry *p;
901
902 list_del(&ie->list);
903
904 list_for_each_entry(p, &cache->resolve, list) {
905 if (p->name_state != NAME_PENDING &&
a8c5fb1a 906 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
907 break;
908 pos = &p->list;
909 }
910
911 list_add(&ie->list, pos);
912}
913
3175405b 914bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 915 bool name_known, bool *ssp)
1da177e4 916{
30883512 917 struct discovery_state *cache = &hdev->discovery;
70f23020 918 struct inquiry_entry *ie;
1da177e4 919
6ed93dc6 920 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 921
2b2fec4d
SJ
922 hci_remove_remote_oob_data(hdev, &data->bdaddr);
923
388fc8fa
JH
924 if (ssp)
925 *ssp = data->ssp_mode;
926
70f23020 927 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 928 if (ie) {
388fc8fa
JH
929 if (ie->data.ssp_mode && ssp)
930 *ssp = true;
931
a3d4e20a 932 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 933 data->rssi != ie->data.rssi) {
a3d4e20a
JH
934 ie->data.rssi = data->rssi;
935 hci_inquiry_cache_update_resolve(hdev, ie);
936 }
937
561aafbc 938 goto update;
a3d4e20a 939 }
561aafbc
JH
940
941 /* Entry not in the cache. Add new one. */
942 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
943 if (!ie)
3175405b 944 return false;
561aafbc
JH
945
946 list_add(&ie->all, &cache->all);
947
948 if (name_known) {
949 ie->name_state = NAME_KNOWN;
950 } else {
951 ie->name_state = NAME_NOT_KNOWN;
952 list_add(&ie->list, &cache->unknown);
953 }
70f23020 954
561aafbc
JH
955update:
956 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 957 ie->name_state != NAME_PENDING) {
561aafbc
JH
958 ie->name_state = NAME_KNOWN;
959 list_del(&ie->list);
1da177e4
LT
960 }
961
70f23020
AE
962 memcpy(&ie->data, data, sizeof(*data));
963 ie->timestamp = jiffies;
1da177e4 964 cache->timestamp = jiffies;
3175405b
JH
965
966 if (ie->name_state == NAME_NOT_KNOWN)
967 return false;
968
969 return true;
1da177e4
LT
970}
971
972static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
973{
30883512 974 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
975 struct inquiry_info *info = (struct inquiry_info *) buf;
976 struct inquiry_entry *e;
977 int copied = 0;
978
561aafbc 979 list_for_each_entry(e, &cache->all, all) {
1da177e4 980 struct inquiry_data *data = &e->data;
b57c1a56
JH
981
982 if (copied >= num)
983 break;
984
1da177e4
LT
985 bacpy(&info->bdaddr, &data->bdaddr);
986 info->pscan_rep_mode = data->pscan_rep_mode;
987 info->pscan_period_mode = data->pscan_period_mode;
988 info->pscan_mode = data->pscan_mode;
989 memcpy(info->dev_class, data->dev_class, 3);
990 info->clock_offset = data->clock_offset;
b57c1a56 991
1da177e4 992 info++;
b57c1a56 993 copied++;
1da177e4
LT
994 }
995
996 BT_DBG("cache %p, copied %d", cache, copied);
997 return copied;
998}
999
42c6b129 1000static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1001{
1002 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1003 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1004 struct hci_cp_inquiry cp;
1005
1006 BT_DBG("%s", hdev->name);
1007
1008 if (test_bit(HCI_INQUIRY, &hdev->flags))
1009 return;
1010
1011 /* Start Inquiry */
1012 memcpy(&cp.lap, &ir->lap, 3);
1013 cp.length = ir->length;
1014 cp.num_rsp = ir->num_rsp;
42c6b129 1015 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1016}
1017
3e13fa1e
AG
1018static int wait_inquiry(void *word)
1019{
1020 schedule();
1021 return signal_pending(current);
1022}
1023
1da177e4
LT
1024int hci_inquiry(void __user *arg)
1025{
1026 __u8 __user *ptr = arg;
1027 struct hci_inquiry_req ir;
1028 struct hci_dev *hdev;
1029 int err = 0, do_inquiry = 0, max_rsp;
1030 long timeo;
1031 __u8 *buf;
1032
1033 if (copy_from_user(&ir, ptr, sizeof(ir)))
1034 return -EFAULT;
1035
5a08ecce
AE
1036 hdev = hci_dev_get(ir.dev_id);
1037 if (!hdev)
1da177e4
LT
1038 return -ENODEV;
1039
0736cfa8
MH
1040 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1041 err = -EBUSY;
1042 goto done;
1043 }
1044
56f87901
JH
1045 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1046 err = -EOPNOTSUPP;
1047 goto done;
1048 }
1049
09fd0de5 1050 hci_dev_lock(hdev);
8e87d142 1051 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1052 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1053 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1054 do_inquiry = 1;
1055 }
09fd0de5 1056 hci_dev_unlock(hdev);
1da177e4 1057
04837f64 1058 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1059
1060 if (do_inquiry) {
01178cd4
JH
1061 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1062 timeo);
70f23020
AE
1063 if (err < 0)
1064 goto done;
3e13fa1e
AG
1065
1066 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1067 * cleared). If it is interrupted by a signal, return -EINTR.
1068 */
1069 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1070 TASK_INTERRUPTIBLE))
1071 return -EINTR;
70f23020 1072 }
1da177e4 1073
8fc9ced3
GP
1074 /* for unlimited number of responses we will use buffer with
1075 * 255 entries
1076 */
1da177e4
LT
1077 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1078
1079 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1080 * copy it to the user space.
1081 */
01df8c31 1082 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1083 if (!buf) {
1da177e4
LT
1084 err = -ENOMEM;
1085 goto done;
1086 }
1087
09fd0de5 1088 hci_dev_lock(hdev);
1da177e4 1089 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1090 hci_dev_unlock(hdev);
1da177e4
LT
1091
1092 BT_DBG("num_rsp %d", ir.num_rsp);
1093
1094 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1095 ptr += sizeof(ir);
1096 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1097 ir.num_rsp))
1da177e4 1098 err = -EFAULT;
8e87d142 1099 } else
1da177e4
LT
1100 err = -EFAULT;
1101
1102 kfree(buf);
1103
1104done:
1105 hci_dev_put(hdev);
1106 return err;
1107}
1108
3f0f524b
JH
1109static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1110{
1111 u8 ad_len = 0, flags = 0;
1112 size_t name_len;
1113
f3d3444a 1114 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
3f0f524b
JH
1115 flags |= LE_AD_GENERAL;
1116
11802b29
JH
1117 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1118 if (lmp_le_br_capable(hdev))
1119 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1120 if (lmp_host_le_br_capable(hdev))
1121 flags |= LE_AD_SIM_LE_BREDR_HOST;
1122 } else {
3f0f524b 1123 flags |= LE_AD_NO_BREDR;
11802b29 1124 }
3f0f524b
JH
1125
1126 if (flags) {
1127 BT_DBG("adv flags 0x%02x", flags);
1128
1129 ptr[0] = 2;
1130 ptr[1] = EIR_FLAGS;
1131 ptr[2] = flags;
1132
1133 ad_len += 3;
1134 ptr += 3;
1135 }
1136
1137 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1138 ptr[0] = 2;
1139 ptr[1] = EIR_TX_POWER;
1140 ptr[2] = (u8) hdev->adv_tx_power;
1141
1142 ad_len += 3;
1143 ptr += 3;
1144 }
1145
1146 name_len = strlen(hdev->dev_name);
1147 if (name_len > 0) {
1148 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1149
1150 if (name_len > max_len) {
1151 name_len = max_len;
1152 ptr[1] = EIR_NAME_SHORT;
1153 } else
1154 ptr[1] = EIR_NAME_COMPLETE;
1155
1156 ptr[0] = name_len + 1;
1157
1158 memcpy(ptr + 2, hdev->dev_name, name_len);
1159
1160 ad_len += (name_len + 2);
1161 ptr += (name_len + 2);
1162 }
1163
1164 return ad_len;
1165}
1166
04b4edcb 1167void hci_update_ad(struct hci_request *req)
3f0f524b 1168{
04b4edcb 1169 struct hci_dev *hdev = req->hdev;
3f0f524b
JH
1170 struct hci_cp_le_set_adv_data cp;
1171 u8 len;
3f0f524b 1172
04b4edcb
JH
1173 if (!lmp_le_capable(hdev))
1174 return;
3f0f524b
JH
1175
1176 memset(&cp, 0, sizeof(cp));
1177
1178 len = create_ad(hdev, cp.data);
1179
1180 if (hdev->adv_data_len == len &&
04b4edcb
JH
1181 memcmp(cp.data, hdev->adv_data, len) == 0)
1182 return;
3f0f524b
JH
1183
1184 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1185 hdev->adv_data_len = len;
1186
1187 cp.length = len;
3f0f524b 1188
04b4edcb 1189 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
3f0f524b
JH
1190}
1191
cbed0ca1 1192static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1193{
1da177e4
LT
1194 int ret = 0;
1195
1da177e4
LT
1196 BT_DBG("%s %p", hdev->name, hdev);
1197
1198 hci_req_lock(hdev);
1199
94324962
JH
1200 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1201 ret = -ENODEV;
1202 goto done;
1203 }
1204
a5c8f270
MH
1205 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1206 /* Check for rfkill but allow the HCI setup stage to
1207 * proceed (which in itself doesn't cause any RF activity).
1208 */
1209 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1210 ret = -ERFKILL;
1211 goto done;
1212 }
1213
1214 /* Check for valid public address or a configured static
1215 * random adddress, but let the HCI setup proceed to
1216 * be able to determine if there is a public address
1217 * or not.
1218 *
1219 * This check is only valid for BR/EDR controllers
1220 * since AMP controllers do not have an address.
1221 */
1222 if (hdev->dev_type == HCI_BREDR &&
1223 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1224 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1225 ret = -EADDRNOTAVAIL;
1226 goto done;
1227 }
611b30f7
MH
1228 }
1229
1da177e4
LT
1230 if (test_bit(HCI_UP, &hdev->flags)) {
1231 ret = -EALREADY;
1232 goto done;
1233 }
1234
1da177e4
LT
1235 if (hdev->open(hdev)) {
1236 ret = -EIO;
1237 goto done;
1238 }
1239
f41c70c4
MH
1240 atomic_set(&hdev->cmd_cnt, 1);
1241 set_bit(HCI_INIT, &hdev->flags);
1242
1243 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1244 ret = hdev->setup(hdev);
1245
1246 if (!ret) {
f41c70c4
MH
1247 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1248 set_bit(HCI_RAW, &hdev->flags);
1249
0736cfa8
MH
1250 if (!test_bit(HCI_RAW, &hdev->flags) &&
1251 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1252 ret = __hci_init(hdev);
1da177e4
LT
1253 }
1254
f41c70c4
MH
1255 clear_bit(HCI_INIT, &hdev->flags);
1256
1da177e4
LT
1257 if (!ret) {
1258 hci_dev_hold(hdev);
1259 set_bit(HCI_UP, &hdev->flags);
1260 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1261 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1262 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1263 hdev->dev_type == HCI_BREDR) {
09fd0de5 1264 hci_dev_lock(hdev);
744cf19e 1265 mgmt_powered(hdev, 1);
09fd0de5 1266 hci_dev_unlock(hdev);
56e5cb86 1267 }
8e87d142 1268 } else {
1da177e4 1269 /* Init failed, cleanup */
3eff45ea 1270 flush_work(&hdev->tx_work);
c347b765 1271 flush_work(&hdev->cmd_work);
b78752cc 1272 flush_work(&hdev->rx_work);
1da177e4
LT
1273
1274 skb_queue_purge(&hdev->cmd_q);
1275 skb_queue_purge(&hdev->rx_q);
1276
1277 if (hdev->flush)
1278 hdev->flush(hdev);
1279
1280 if (hdev->sent_cmd) {
1281 kfree_skb(hdev->sent_cmd);
1282 hdev->sent_cmd = NULL;
1283 }
1284
1285 hdev->close(hdev);
1286 hdev->flags = 0;
1287 }
1288
1289done:
1290 hci_req_unlock(hdev);
1da177e4
LT
1291 return ret;
1292}
1293
cbed0ca1
JH
1294/* ---- HCI ioctl helpers ---- */
1295
1296int hci_dev_open(__u16 dev)
1297{
1298 struct hci_dev *hdev;
1299 int err;
1300
1301 hdev = hci_dev_get(dev);
1302 if (!hdev)
1303 return -ENODEV;
1304
e1d08f40
JH
1305 /* We need to ensure that no other power on/off work is pending
1306 * before proceeding to call hci_dev_do_open. This is
1307 * particularly important if the setup procedure has not yet
1308 * completed.
1309 */
1310 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1311 cancel_delayed_work(&hdev->power_off);
1312
a5c8f270
MH
1313 /* After this call it is guaranteed that the setup procedure
1314 * has finished. This means that error conditions like RFKILL
1315 * or no valid public or static random address apply.
1316 */
e1d08f40
JH
1317 flush_workqueue(hdev->req_workqueue);
1318
cbed0ca1
JH
1319 err = hci_dev_do_open(hdev);
1320
1321 hci_dev_put(hdev);
1322
1323 return err;
1324}
1325
1da177e4
LT
1326static int hci_dev_do_close(struct hci_dev *hdev)
1327{
1328 BT_DBG("%s %p", hdev->name, hdev);
1329
78c04c0b
VCG
1330 cancel_delayed_work(&hdev->power_off);
1331
1da177e4
LT
1332 hci_req_cancel(hdev, ENODEV);
1333 hci_req_lock(hdev);
1334
1335 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1336 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1337 hci_req_unlock(hdev);
1338 return 0;
1339 }
1340
3eff45ea
GP
1341 /* Flush RX and TX works */
1342 flush_work(&hdev->tx_work);
b78752cc 1343 flush_work(&hdev->rx_work);
1da177e4 1344
16ab91ab 1345 if (hdev->discov_timeout > 0) {
e0f9309f 1346 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1347 hdev->discov_timeout = 0;
5e5282bb 1348 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1349 }
1350
a8b2d5c2 1351 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1352 cancel_delayed_work(&hdev->service_cache);
1353
7ba8b4be
AG
1354 cancel_delayed_work_sync(&hdev->le_scan_disable);
1355
09fd0de5 1356 hci_dev_lock(hdev);
1f9b9a5d 1357 hci_inquiry_cache_flush(hdev);
1da177e4 1358 hci_conn_hash_flush(hdev);
09fd0de5 1359 hci_dev_unlock(hdev);
1da177e4
LT
1360
1361 hci_notify(hdev, HCI_DEV_DOWN);
1362
1363 if (hdev->flush)
1364 hdev->flush(hdev);
1365
1366 /* Reset device */
1367 skb_queue_purge(&hdev->cmd_q);
1368 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1369 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1370 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1371 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1372 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1373 clear_bit(HCI_INIT, &hdev->flags);
1374 }
1375
c347b765
GP
1376 /* flush cmd work */
1377 flush_work(&hdev->cmd_work);
1da177e4
LT
1378
1379 /* Drop queues */
1380 skb_queue_purge(&hdev->rx_q);
1381 skb_queue_purge(&hdev->cmd_q);
1382 skb_queue_purge(&hdev->raw_q);
1383
1384 /* Drop last sent command */
1385 if (hdev->sent_cmd) {
b79f44c1 1386 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1387 kfree_skb(hdev->sent_cmd);
1388 hdev->sent_cmd = NULL;
1389 }
1390
b6ddb638
JH
1391 kfree_skb(hdev->recv_evt);
1392 hdev->recv_evt = NULL;
1393
1da177e4
LT
1394 /* After this point our queues are empty
1395 * and no tasks are scheduled. */
1396 hdev->close(hdev);
1397
35b973c9
JH
1398 /* Clear flags */
1399 hdev->flags = 0;
1400 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1401
93c311a0
MH
1402 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1403 if (hdev->dev_type == HCI_BREDR) {
1404 hci_dev_lock(hdev);
1405 mgmt_powered(hdev, 0);
1406 hci_dev_unlock(hdev);
1407 }
8ee56540 1408 }
5add6af8 1409
ced5c338 1410 /* Controller radio is available but is currently powered down */
536619e8 1411 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1412
e59fda8d 1413 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1414 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1415
1da177e4
LT
1416 hci_req_unlock(hdev);
1417
1418 hci_dev_put(hdev);
1419 return 0;
1420}
1421
1422int hci_dev_close(__u16 dev)
1423{
1424 struct hci_dev *hdev;
1425 int err;
1426
70f23020
AE
1427 hdev = hci_dev_get(dev);
1428 if (!hdev)
1da177e4 1429 return -ENODEV;
8ee56540 1430
0736cfa8
MH
1431 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1432 err = -EBUSY;
1433 goto done;
1434 }
1435
8ee56540
MH
1436 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1437 cancel_delayed_work(&hdev->power_off);
1438
1da177e4 1439 err = hci_dev_do_close(hdev);
8ee56540 1440
0736cfa8 1441done:
1da177e4
LT
1442 hci_dev_put(hdev);
1443 return err;
1444}
1445
1446int hci_dev_reset(__u16 dev)
1447{
1448 struct hci_dev *hdev;
1449 int ret = 0;
1450
70f23020
AE
1451 hdev = hci_dev_get(dev);
1452 if (!hdev)
1da177e4
LT
1453 return -ENODEV;
1454
1455 hci_req_lock(hdev);
1da177e4 1456
808a049e
MH
1457 if (!test_bit(HCI_UP, &hdev->flags)) {
1458 ret = -ENETDOWN;
1da177e4 1459 goto done;
808a049e 1460 }
1da177e4 1461
0736cfa8
MH
1462 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1463 ret = -EBUSY;
1464 goto done;
1465 }
1466
1da177e4
LT
1467 /* Drop queues */
1468 skb_queue_purge(&hdev->rx_q);
1469 skb_queue_purge(&hdev->cmd_q);
1470
09fd0de5 1471 hci_dev_lock(hdev);
1f9b9a5d 1472 hci_inquiry_cache_flush(hdev);
1da177e4 1473 hci_conn_hash_flush(hdev);
09fd0de5 1474 hci_dev_unlock(hdev);
1da177e4
LT
1475
1476 if (hdev->flush)
1477 hdev->flush(hdev);
1478
8e87d142 1479 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1480 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1481
1482 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1483 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1484
1485done:
1da177e4
LT
1486 hci_req_unlock(hdev);
1487 hci_dev_put(hdev);
1488 return ret;
1489}
1490
1491int hci_dev_reset_stat(__u16 dev)
1492{
1493 struct hci_dev *hdev;
1494 int ret = 0;
1495
70f23020
AE
1496 hdev = hci_dev_get(dev);
1497 if (!hdev)
1da177e4
LT
1498 return -ENODEV;
1499
0736cfa8
MH
1500 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1501 ret = -EBUSY;
1502 goto done;
1503 }
1504
1da177e4
LT
1505 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1506
0736cfa8 1507done:
1da177e4 1508 hci_dev_put(hdev);
1da177e4
LT
1509 return ret;
1510}
1511
1512int hci_dev_cmd(unsigned int cmd, void __user *arg)
1513{
1514 struct hci_dev *hdev;
1515 struct hci_dev_req dr;
1516 int err = 0;
1517
1518 if (copy_from_user(&dr, arg, sizeof(dr)))
1519 return -EFAULT;
1520
70f23020
AE
1521 hdev = hci_dev_get(dr.dev_id);
1522 if (!hdev)
1da177e4
LT
1523 return -ENODEV;
1524
0736cfa8
MH
1525 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1526 err = -EBUSY;
1527 goto done;
1528 }
1529
56f87901
JH
1530 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1531 err = -EOPNOTSUPP;
1532 goto done;
1533 }
1534
1da177e4
LT
1535 switch (cmd) {
1536 case HCISETAUTH:
01178cd4
JH
1537 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1538 HCI_INIT_TIMEOUT);
1da177e4
LT
1539 break;
1540
1541 case HCISETENCRYPT:
1542 if (!lmp_encrypt_capable(hdev)) {
1543 err = -EOPNOTSUPP;
1544 break;
1545 }
1546
1547 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1548 /* Auth must be enabled first */
01178cd4
JH
1549 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1550 HCI_INIT_TIMEOUT);
1da177e4
LT
1551 if (err)
1552 break;
1553 }
1554
01178cd4
JH
1555 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1556 HCI_INIT_TIMEOUT);
1da177e4
LT
1557 break;
1558
1559 case HCISETSCAN:
01178cd4
JH
1560 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1561 HCI_INIT_TIMEOUT);
1da177e4
LT
1562 break;
1563
1da177e4 1564 case HCISETLINKPOL:
01178cd4
JH
1565 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1566 HCI_INIT_TIMEOUT);
1da177e4
LT
1567 break;
1568
1569 case HCISETLINKMODE:
e4e8e37c
MH
1570 hdev->link_mode = ((__u16) dr.dev_opt) &
1571 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1572 break;
1573
1574 case HCISETPTYPE:
1575 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1576 break;
1577
1578 case HCISETACLMTU:
e4e8e37c
MH
1579 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1580 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1581 break;
1582
1583 case HCISETSCOMTU:
e4e8e37c
MH
1584 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1585 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1586 break;
1587
1588 default:
1589 err = -EINVAL;
1590 break;
1591 }
e4e8e37c 1592
0736cfa8 1593done:
1da177e4
LT
1594 hci_dev_put(hdev);
1595 return err;
1596}
1597
1598int hci_get_dev_list(void __user *arg)
1599{
8035ded4 1600 struct hci_dev *hdev;
1da177e4
LT
1601 struct hci_dev_list_req *dl;
1602 struct hci_dev_req *dr;
1da177e4
LT
1603 int n = 0, size, err;
1604 __u16 dev_num;
1605
1606 if (get_user(dev_num, (__u16 __user *) arg))
1607 return -EFAULT;
1608
1609 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1610 return -EINVAL;
1611
1612 size = sizeof(*dl) + dev_num * sizeof(*dr);
1613
70f23020
AE
1614 dl = kzalloc(size, GFP_KERNEL);
1615 if (!dl)
1da177e4
LT
1616 return -ENOMEM;
1617
1618 dr = dl->dev_req;
1619
f20d09d5 1620 read_lock(&hci_dev_list_lock);
8035ded4 1621 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1622 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1623 cancel_delayed_work(&hdev->power_off);
c542a06c 1624
a8b2d5c2
JH
1625 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1626 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1627
1da177e4
LT
1628 (dr + n)->dev_id = hdev->id;
1629 (dr + n)->dev_opt = hdev->flags;
c542a06c 1630
1da177e4
LT
1631 if (++n >= dev_num)
1632 break;
1633 }
f20d09d5 1634 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1635
1636 dl->dev_num = n;
1637 size = sizeof(*dl) + n * sizeof(*dr);
1638
1639 err = copy_to_user(arg, dl, size);
1640 kfree(dl);
1641
1642 return err ? -EFAULT : 0;
1643}
1644
1645int hci_get_dev_info(void __user *arg)
1646{
1647 struct hci_dev *hdev;
1648 struct hci_dev_info di;
1649 int err = 0;
1650
1651 if (copy_from_user(&di, arg, sizeof(di)))
1652 return -EFAULT;
1653
70f23020
AE
1654 hdev = hci_dev_get(di.dev_id);
1655 if (!hdev)
1da177e4
LT
1656 return -ENODEV;
1657
a8b2d5c2 1658 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1659 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1660
a8b2d5c2
JH
1661 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1662 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1663
1da177e4
LT
1664 strcpy(di.name, hdev->name);
1665 di.bdaddr = hdev->bdaddr;
60f2a3ed 1666 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
1667 di.flags = hdev->flags;
1668 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1669 if (lmp_bredr_capable(hdev)) {
1670 di.acl_mtu = hdev->acl_mtu;
1671 di.acl_pkts = hdev->acl_pkts;
1672 di.sco_mtu = hdev->sco_mtu;
1673 di.sco_pkts = hdev->sco_pkts;
1674 } else {
1675 di.acl_mtu = hdev->le_mtu;
1676 di.acl_pkts = hdev->le_pkts;
1677 di.sco_mtu = 0;
1678 di.sco_pkts = 0;
1679 }
1da177e4
LT
1680 di.link_policy = hdev->link_policy;
1681 di.link_mode = hdev->link_mode;
1682
1683 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1684 memcpy(&di.features, &hdev->features, sizeof(di.features));
1685
1686 if (copy_to_user(arg, &di, sizeof(di)))
1687 err = -EFAULT;
1688
1689 hci_dev_put(hdev);
1690
1691 return err;
1692}
1693
1694/* ---- Interface to HCI drivers ---- */
1695
611b30f7
MH
1696static int hci_rfkill_set_block(void *data, bool blocked)
1697{
1698 struct hci_dev *hdev = data;
1699
1700 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1701
0736cfa8
MH
1702 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1703 return -EBUSY;
1704
5e130367
JH
1705 if (blocked) {
1706 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
1707 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1708 hci_dev_do_close(hdev);
5e130367
JH
1709 } else {
1710 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 1711 }
611b30f7
MH
1712
1713 return 0;
1714}
1715
1716static const struct rfkill_ops hci_rfkill_ops = {
1717 .set_block = hci_rfkill_set_block,
1718};
1719
ab81cbf9
JH
1720static void hci_power_on(struct work_struct *work)
1721{
1722 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 1723 int err;
ab81cbf9
JH
1724
1725 BT_DBG("%s", hdev->name);
1726
cbed0ca1 1727 err = hci_dev_do_open(hdev);
96570ffc
JH
1728 if (err < 0) {
1729 mgmt_set_powered_failed(hdev, err);
ab81cbf9 1730 return;
96570ffc 1731 }
ab81cbf9 1732
a5c8f270
MH
1733 /* During the HCI setup phase, a few error conditions are
1734 * ignored and they need to be checked now. If they are still
1735 * valid, it is important to turn the device back off.
1736 */
1737 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1738 (hdev->dev_type == HCI_BREDR &&
1739 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1740 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
1741 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1742 hci_dev_do_close(hdev);
1743 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
1744 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1745 HCI_AUTO_OFF_TIMEOUT);
bf543036 1746 }
ab81cbf9 1747
a8b2d5c2 1748 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1749 mgmt_index_added(hdev);
ab81cbf9
JH
1750}
1751
1752static void hci_power_off(struct work_struct *work)
1753{
3243553f 1754 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1755 power_off.work);
ab81cbf9
JH
1756
1757 BT_DBG("%s", hdev->name);
1758
8ee56540 1759 hci_dev_do_close(hdev);
ab81cbf9
JH
1760}
1761
16ab91ab
JH
1762static void hci_discov_off(struct work_struct *work)
1763{
1764 struct hci_dev *hdev;
1765 u8 scan = SCAN_PAGE;
1766
1767 hdev = container_of(work, struct hci_dev, discov_off.work);
1768
1769 BT_DBG("%s", hdev->name);
1770
09fd0de5 1771 hci_dev_lock(hdev);
16ab91ab
JH
1772
1773 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1774
1775 hdev->discov_timeout = 0;
1776
09fd0de5 1777 hci_dev_unlock(hdev);
16ab91ab
JH
1778}
1779
2aeb9a1a
JH
1780int hci_uuids_clear(struct hci_dev *hdev)
1781{
4821002c 1782 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1783
4821002c
JH
1784 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1785 list_del(&uuid->list);
2aeb9a1a
JH
1786 kfree(uuid);
1787 }
1788
1789 return 0;
1790}
1791
55ed8ca1
JH
1792int hci_link_keys_clear(struct hci_dev *hdev)
1793{
1794 struct list_head *p, *n;
1795
1796 list_for_each_safe(p, n, &hdev->link_keys) {
1797 struct link_key *key;
1798
1799 key = list_entry(p, struct link_key, list);
1800
1801 list_del(p);
1802 kfree(key);
1803 }
1804
1805 return 0;
1806}
1807
b899efaf
VCG
1808int hci_smp_ltks_clear(struct hci_dev *hdev)
1809{
1810 struct smp_ltk *k, *tmp;
1811
1812 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1813 list_del(&k->list);
1814 kfree(k);
1815 }
1816
1817 return 0;
1818}
1819
55ed8ca1
JH
1820struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1821{
8035ded4 1822 struct link_key *k;
55ed8ca1 1823
8035ded4 1824 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1825 if (bacmp(bdaddr, &k->bdaddr) == 0)
1826 return k;
55ed8ca1
JH
1827
1828 return NULL;
1829}
1830
745c0ce3 1831static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1832 u8 key_type, u8 old_key_type)
d25e28ab
JH
1833{
1834 /* Legacy key */
1835 if (key_type < 0x03)
745c0ce3 1836 return true;
d25e28ab
JH
1837
1838 /* Debug keys are insecure so don't store them persistently */
1839 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1840 return false;
d25e28ab
JH
1841
1842 /* Changed combination key and there's no previous one */
1843 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1844 return false;
d25e28ab
JH
1845
1846 /* Security mode 3 case */
1847 if (!conn)
745c0ce3 1848 return true;
d25e28ab
JH
1849
1850 /* Neither local nor remote side had no-bonding as requirement */
1851 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1852 return true;
d25e28ab
JH
1853
1854 /* Local side had dedicated bonding as requirement */
1855 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1856 return true;
d25e28ab
JH
1857
1858 /* Remote side had dedicated bonding as requirement */
1859 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1860 return true;
d25e28ab
JH
1861
1862 /* If none of the above criteria match, then don't store the key
1863 * persistently */
745c0ce3 1864 return false;
d25e28ab
JH
1865}
1866
c9839a11 1867struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1868{
c9839a11 1869 struct smp_ltk *k;
75d262c2 1870
c9839a11
VCG
1871 list_for_each_entry(k, &hdev->long_term_keys, list) {
1872 if (k->ediv != ediv ||
a8c5fb1a 1873 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1874 continue;
1875
c9839a11 1876 return k;
75d262c2
VCG
1877 }
1878
1879 return NULL;
1880}
75d262c2 1881
c9839a11 1882struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1883 u8 addr_type)
75d262c2 1884{
c9839a11 1885 struct smp_ltk *k;
75d262c2 1886
c9839a11
VCG
1887 list_for_each_entry(k, &hdev->long_term_keys, list)
1888 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1889 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1890 return k;
1891
1892 return NULL;
1893}
75d262c2 1894
d25e28ab 1895int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1896 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1897{
1898 struct link_key *key, *old_key;
745c0ce3
VA
1899 u8 old_key_type;
1900 bool persistent;
55ed8ca1
JH
1901
1902 old_key = hci_find_link_key(hdev, bdaddr);
1903 if (old_key) {
1904 old_key_type = old_key->type;
1905 key = old_key;
1906 } else {
12adcf3a 1907 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1908 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1909 if (!key)
1910 return -ENOMEM;
1911 list_add(&key->list, &hdev->link_keys);
1912 }
1913
6ed93dc6 1914 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1915
d25e28ab
JH
1916 /* Some buggy controller combinations generate a changed
1917 * combination key for legacy pairing even when there's no
1918 * previous key */
1919 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1920 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1921 type = HCI_LK_COMBINATION;
655fe6ec
JH
1922 if (conn)
1923 conn->key_type = type;
1924 }
d25e28ab 1925
55ed8ca1 1926 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1927 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1928 key->pin_len = pin_len;
1929
b6020ba0 1930 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1931 key->type = old_key_type;
4748fed2
JH
1932 else
1933 key->type = type;
1934
4df378a1
JH
1935 if (!new_key)
1936 return 0;
1937
1938 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1939
744cf19e 1940 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1941
6ec5bcad
VA
1942 if (conn)
1943 conn->flush_key = !persistent;
55ed8ca1
JH
1944
1945 return 0;
1946}
1947
c9839a11 1948int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1949 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1950 ediv, u8 rand[8])
75d262c2 1951{
c9839a11 1952 struct smp_ltk *key, *old_key;
75d262c2 1953
c9839a11
VCG
1954 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1955 return 0;
75d262c2 1956
c9839a11
VCG
1957 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1958 if (old_key)
75d262c2 1959 key = old_key;
c9839a11
VCG
1960 else {
1961 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1962 if (!key)
1963 return -ENOMEM;
c9839a11 1964 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1965 }
1966
75d262c2 1967 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1968 key->bdaddr_type = addr_type;
1969 memcpy(key->val, tk, sizeof(key->val));
1970 key->authenticated = authenticated;
1971 key->ediv = ediv;
1972 key->enc_size = enc_size;
1973 key->type = type;
1974 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1975
c9839a11
VCG
1976 if (!new_key)
1977 return 0;
75d262c2 1978
261cc5aa
VCG
1979 if (type & HCI_SMP_LTK)
1980 mgmt_new_ltk(hdev, key, 1);
1981
75d262c2
VCG
1982 return 0;
1983}
1984
55ed8ca1
JH
1985int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1986{
1987 struct link_key *key;
1988
1989 key = hci_find_link_key(hdev, bdaddr);
1990 if (!key)
1991 return -ENOENT;
1992
6ed93dc6 1993 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1994
1995 list_del(&key->list);
1996 kfree(key);
1997
1998 return 0;
1999}
2000
b899efaf
VCG
2001int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2002{
2003 struct smp_ltk *k, *tmp;
2004
2005 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2006 if (bacmp(bdaddr, &k->bdaddr))
2007 continue;
2008
6ed93dc6 2009 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2010
2011 list_del(&k->list);
2012 kfree(k);
2013 }
2014
2015 return 0;
2016}
2017
6bd32326 2018/* HCI command timer function */
bda4f23a 2019static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2020{
2021 struct hci_dev *hdev = (void *) arg;
2022
bda4f23a
AE
2023 if (hdev->sent_cmd) {
2024 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2025 u16 opcode = __le16_to_cpu(sent->opcode);
2026
2027 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2028 } else {
2029 BT_ERR("%s command tx timeout", hdev->name);
2030 }
2031
6bd32326 2032 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2033 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2034}
2035
2763eda6 2036struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2037 bdaddr_t *bdaddr)
2763eda6
SJ
2038{
2039 struct oob_data *data;
2040
2041 list_for_each_entry(data, &hdev->remote_oob_data, list)
2042 if (bacmp(bdaddr, &data->bdaddr) == 0)
2043 return data;
2044
2045 return NULL;
2046}
2047
2048int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2049{
2050 struct oob_data *data;
2051
2052 data = hci_find_remote_oob_data(hdev, bdaddr);
2053 if (!data)
2054 return -ENOENT;
2055
6ed93dc6 2056 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2057
2058 list_del(&data->list);
2059 kfree(data);
2060
2061 return 0;
2062}
2063
2064int hci_remote_oob_data_clear(struct hci_dev *hdev)
2065{
2066 struct oob_data *data, *n;
2067
2068 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2069 list_del(&data->list);
2070 kfree(data);
2071 }
2072
2073 return 0;
2074}
2075
2076int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 2077 u8 *randomizer)
2763eda6
SJ
2078{
2079 struct oob_data *data;
2080
2081 data = hci_find_remote_oob_data(hdev, bdaddr);
2082
2083 if (!data) {
2084 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2085 if (!data)
2086 return -ENOMEM;
2087
2088 bacpy(&data->bdaddr, bdaddr);
2089 list_add(&data->list, &hdev->remote_oob_data);
2090 }
2091
2092 memcpy(data->hash, hash, sizeof(data->hash));
2093 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2094
6ed93dc6 2095 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2096
2097 return 0;
2098}
2099
04124681 2100struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 2101{
8035ded4 2102 struct bdaddr_list *b;
b2a66aad 2103
8035ded4 2104 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
2105 if (bacmp(bdaddr, &b->bdaddr) == 0)
2106 return b;
b2a66aad
AJ
2107
2108 return NULL;
2109}
2110
2111int hci_blacklist_clear(struct hci_dev *hdev)
2112{
2113 struct list_head *p, *n;
2114
2115 list_for_each_safe(p, n, &hdev->blacklist) {
2116 struct bdaddr_list *b;
2117
2118 b = list_entry(p, struct bdaddr_list, list);
2119
2120 list_del(p);
2121 kfree(b);
2122 }
2123
2124 return 0;
2125}
2126
88c1fe4b 2127int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2128{
2129 struct bdaddr_list *entry;
b2a66aad
AJ
2130
2131 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2132 return -EBADF;
2133
5e762444
AJ
2134 if (hci_blacklist_lookup(hdev, bdaddr))
2135 return -EEXIST;
b2a66aad
AJ
2136
2137 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2138 if (!entry)
2139 return -ENOMEM;
b2a66aad
AJ
2140
2141 bacpy(&entry->bdaddr, bdaddr);
2142
2143 list_add(&entry->list, &hdev->blacklist);
2144
88c1fe4b 2145 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2146}
2147
88c1fe4b 2148int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2149{
2150 struct bdaddr_list *entry;
b2a66aad 2151
1ec918ce 2152 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 2153 return hci_blacklist_clear(hdev);
b2a66aad
AJ
2154
2155 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 2156 if (!entry)
5e762444 2157 return -ENOENT;
b2a66aad
AJ
2158
2159 list_del(&entry->list);
2160 kfree(entry);
2161
88c1fe4b 2162 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2163}
2164
4c87eaab 2165static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2166{
4c87eaab
AG
2167 if (status) {
2168 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2169
4c87eaab
AG
2170 hci_dev_lock(hdev);
2171 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2172 hci_dev_unlock(hdev);
2173 return;
2174 }
7ba8b4be
AG
2175}
2176
4c87eaab 2177static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2178{
4c87eaab
AG
2179 /* General inquiry access code (GIAC) */
2180 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2181 struct hci_request req;
2182 struct hci_cp_inquiry cp;
7ba8b4be
AG
2183 int err;
2184
4c87eaab
AG
2185 if (status) {
2186 BT_ERR("Failed to disable LE scanning: status %d", status);
2187 return;
2188 }
7ba8b4be 2189
4c87eaab
AG
2190 switch (hdev->discovery.type) {
2191 case DISCOV_TYPE_LE:
2192 hci_dev_lock(hdev);
2193 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2194 hci_dev_unlock(hdev);
2195 break;
7ba8b4be 2196
4c87eaab
AG
2197 case DISCOV_TYPE_INTERLEAVED:
2198 hci_req_init(&req, hdev);
7ba8b4be 2199
4c87eaab
AG
2200 memset(&cp, 0, sizeof(cp));
2201 memcpy(&cp.lap, lap, sizeof(cp.lap));
2202 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2203 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2204
4c87eaab 2205 hci_dev_lock(hdev);
7dbfac1d 2206
4c87eaab 2207 hci_inquiry_cache_flush(hdev);
7dbfac1d 2208
4c87eaab
AG
2209 err = hci_req_run(&req, inquiry_complete);
2210 if (err) {
2211 BT_ERR("Inquiry request failed: err %d", err);
2212 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2213 }
7dbfac1d 2214
4c87eaab
AG
2215 hci_dev_unlock(hdev);
2216 break;
7dbfac1d 2217 }
7dbfac1d
AG
2218}
2219
7ba8b4be
AG
2220static void le_scan_disable_work(struct work_struct *work)
2221{
2222 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2223 le_scan_disable.work);
7ba8b4be 2224 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
2225 struct hci_request req;
2226 int err;
7ba8b4be
AG
2227
2228 BT_DBG("%s", hdev->name);
2229
4c87eaab 2230 hci_req_init(&req, hdev);
28b75a89 2231
7ba8b4be 2232 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
2233 cp.enable = LE_SCAN_DISABLE;
2234 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 2235
4c87eaab
AG
2236 err = hci_req_run(&req, le_scan_disable_work_complete);
2237 if (err)
2238 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2239}
2240
9be0dab7
DH
2241/* Alloc HCI device */
2242struct hci_dev *hci_alloc_dev(void)
2243{
2244 struct hci_dev *hdev;
2245
2246 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2247 if (!hdev)
2248 return NULL;
2249
b1b813d4
DH
2250 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2251 hdev->esco_type = (ESCO_HV1);
2252 hdev->link_mode = (HCI_LM_ACCEPT);
2253 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2254 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2255 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2256
b1b813d4
DH
2257 hdev->sniff_max_interval = 800;
2258 hdev->sniff_min_interval = 80;
2259
2260 mutex_init(&hdev->lock);
2261 mutex_init(&hdev->req_lock);
2262
2263 INIT_LIST_HEAD(&hdev->mgmt_pending);
2264 INIT_LIST_HEAD(&hdev->blacklist);
2265 INIT_LIST_HEAD(&hdev->uuids);
2266 INIT_LIST_HEAD(&hdev->link_keys);
2267 INIT_LIST_HEAD(&hdev->long_term_keys);
2268 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2269 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2270
2271 INIT_WORK(&hdev->rx_work, hci_rx_work);
2272 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2273 INIT_WORK(&hdev->tx_work, hci_tx_work);
2274 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2275
b1b813d4
DH
2276 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2277 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2278 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2279
b1b813d4
DH
2280 skb_queue_head_init(&hdev->rx_q);
2281 skb_queue_head_init(&hdev->cmd_q);
2282 skb_queue_head_init(&hdev->raw_q);
2283
2284 init_waitqueue_head(&hdev->req_wait_q);
2285
bda4f23a 2286 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2287
b1b813d4
DH
2288 hci_init_sysfs(hdev);
2289 discovery_init(hdev);
9be0dab7
DH
2290
2291 return hdev;
2292}
2293EXPORT_SYMBOL(hci_alloc_dev);
2294
2295/* Free HCI device */
2296void hci_free_dev(struct hci_dev *hdev)
2297{
9be0dab7
DH
2298 /* will free via device release */
2299 put_device(&hdev->dev);
2300}
2301EXPORT_SYMBOL(hci_free_dev);
2302
1da177e4
LT
2303/* Register HCI device */
2304int hci_register_dev(struct hci_dev *hdev)
2305{
b1b813d4 2306 int id, error;
1da177e4 2307
010666a1 2308 if (!hdev->open || !hdev->close)
1da177e4
LT
2309 return -EINVAL;
2310
08add513
MM
2311 /* Do not allow HCI_AMP devices to register at index 0,
2312 * so the index can be used as the AMP controller ID.
2313 */
3df92b31
SL
2314 switch (hdev->dev_type) {
2315 case HCI_BREDR:
2316 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2317 break;
2318 case HCI_AMP:
2319 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2320 break;
2321 default:
2322 return -EINVAL;
1da177e4 2323 }
8e87d142 2324
3df92b31
SL
2325 if (id < 0)
2326 return id;
2327
1da177e4
LT
2328 sprintf(hdev->name, "hci%d", id);
2329 hdev->id = id;
2d8b3a11
AE
2330
2331 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2332
d8537548
KC
2333 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2334 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
2335 if (!hdev->workqueue) {
2336 error = -ENOMEM;
2337 goto err;
2338 }
f48fd9c8 2339
d8537548
KC
2340 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2341 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
2342 if (!hdev->req_workqueue) {
2343 destroy_workqueue(hdev->workqueue);
2344 error = -ENOMEM;
2345 goto err;
2346 }
2347
33ca954d
DH
2348 error = hci_add_sysfs(hdev);
2349 if (error < 0)
2350 goto err_wqueue;
1da177e4 2351
611b30f7 2352 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2353 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2354 hdev);
611b30f7
MH
2355 if (hdev->rfkill) {
2356 if (rfkill_register(hdev->rfkill) < 0) {
2357 rfkill_destroy(hdev->rfkill);
2358 hdev->rfkill = NULL;
2359 }
2360 }
2361
5e130367
JH
2362 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2363 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2364
a8b2d5c2 2365 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 2366 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 2367
01cd3404 2368 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
2369 /* Assume BR/EDR support until proven otherwise (such as
2370 * through reading supported features during init.
2371 */
2372 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2373 }
ce2be9ac 2374
fcee3377
GP
2375 write_lock(&hci_dev_list_lock);
2376 list_add(&hdev->list, &hci_dev_list);
2377 write_unlock(&hci_dev_list_lock);
2378
1da177e4 2379 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2380 hci_dev_hold(hdev);
1da177e4 2381
19202573 2382 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2383
1da177e4 2384 return id;
f48fd9c8 2385
33ca954d
DH
2386err_wqueue:
2387 destroy_workqueue(hdev->workqueue);
6ead1bbc 2388 destroy_workqueue(hdev->req_workqueue);
33ca954d 2389err:
3df92b31 2390 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 2391
33ca954d 2392 return error;
1da177e4
LT
2393}
2394EXPORT_SYMBOL(hci_register_dev);
2395
2396/* Unregister HCI device */
59735631 2397void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2398{
3df92b31 2399 int i, id;
ef222013 2400
c13854ce 2401 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2402
94324962
JH
2403 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2404
3df92b31
SL
2405 id = hdev->id;
2406
f20d09d5 2407 write_lock(&hci_dev_list_lock);
1da177e4 2408 list_del(&hdev->list);
f20d09d5 2409 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2410
2411 hci_dev_do_close(hdev);
2412
cd4c5391 2413 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2414 kfree_skb(hdev->reassembly[i]);
2415
b9b5ef18
GP
2416 cancel_work_sync(&hdev->power_on);
2417
ab81cbf9 2418 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2419 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2420 hci_dev_lock(hdev);
744cf19e 2421 mgmt_index_removed(hdev);
09fd0de5 2422 hci_dev_unlock(hdev);
56e5cb86 2423 }
ab81cbf9 2424
2e58ef3e
JH
2425 /* mgmt_index_removed should take care of emptying the
2426 * pending list */
2427 BUG_ON(!list_empty(&hdev->mgmt_pending));
2428
1da177e4
LT
2429 hci_notify(hdev, HCI_DEV_UNREG);
2430
611b30f7
MH
2431 if (hdev->rfkill) {
2432 rfkill_unregister(hdev->rfkill);
2433 rfkill_destroy(hdev->rfkill);
2434 }
2435
ce242970 2436 hci_del_sysfs(hdev);
147e2d59 2437
f48fd9c8 2438 destroy_workqueue(hdev->workqueue);
6ead1bbc 2439 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2440
09fd0de5 2441 hci_dev_lock(hdev);
e2e0cacb 2442 hci_blacklist_clear(hdev);
2aeb9a1a 2443 hci_uuids_clear(hdev);
55ed8ca1 2444 hci_link_keys_clear(hdev);
b899efaf 2445 hci_smp_ltks_clear(hdev);
2763eda6 2446 hci_remote_oob_data_clear(hdev);
09fd0de5 2447 hci_dev_unlock(hdev);
e2e0cacb 2448
dc946bd8 2449 hci_dev_put(hdev);
3df92b31
SL
2450
2451 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2452}
2453EXPORT_SYMBOL(hci_unregister_dev);
2454
2455/* Suspend HCI device */
2456int hci_suspend_dev(struct hci_dev *hdev)
2457{
2458 hci_notify(hdev, HCI_DEV_SUSPEND);
2459 return 0;
2460}
2461EXPORT_SYMBOL(hci_suspend_dev);
2462
2463/* Resume HCI device */
2464int hci_resume_dev(struct hci_dev *hdev)
2465{
2466 hci_notify(hdev, HCI_DEV_RESUME);
2467 return 0;
2468}
2469EXPORT_SYMBOL(hci_resume_dev);
2470
76bca880
MH
2471/* Receive frame from HCI drivers */
2472int hci_recv_frame(struct sk_buff *skb)
2473{
2474 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2475 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2476 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2477 kfree_skb(skb);
2478 return -ENXIO;
2479 }
2480
d82603c6 2481 /* Incoming skb */
76bca880
MH
2482 bt_cb(skb)->incoming = 1;
2483
2484 /* Time stamp */
2485 __net_timestamp(skb);
2486
76bca880 2487 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2488 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2489
76bca880
MH
2490 return 0;
2491}
2492EXPORT_SYMBOL(hci_recv_frame);
2493
33e882a5 2494static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2495 int count, __u8 index)
33e882a5
SS
2496{
2497 int len = 0;
2498 int hlen = 0;
2499 int remain = count;
2500 struct sk_buff *skb;
2501 struct bt_skb_cb *scb;
2502
2503 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2504 index >= NUM_REASSEMBLY)
33e882a5
SS
2505 return -EILSEQ;
2506
2507 skb = hdev->reassembly[index];
2508
2509 if (!skb) {
2510 switch (type) {
2511 case HCI_ACLDATA_PKT:
2512 len = HCI_MAX_FRAME_SIZE;
2513 hlen = HCI_ACL_HDR_SIZE;
2514 break;
2515 case HCI_EVENT_PKT:
2516 len = HCI_MAX_EVENT_SIZE;
2517 hlen = HCI_EVENT_HDR_SIZE;
2518 break;
2519 case HCI_SCODATA_PKT:
2520 len = HCI_MAX_SCO_SIZE;
2521 hlen = HCI_SCO_HDR_SIZE;
2522 break;
2523 }
2524
1e429f38 2525 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2526 if (!skb)
2527 return -ENOMEM;
2528
2529 scb = (void *) skb->cb;
2530 scb->expect = hlen;
2531 scb->pkt_type = type;
2532
2533 skb->dev = (void *) hdev;
2534 hdev->reassembly[index] = skb;
2535 }
2536
2537 while (count) {
2538 scb = (void *) skb->cb;
89bb46d0 2539 len = min_t(uint, scb->expect, count);
33e882a5
SS
2540
2541 memcpy(skb_put(skb, len), data, len);
2542
2543 count -= len;
2544 data += len;
2545 scb->expect -= len;
2546 remain = count;
2547
2548 switch (type) {
2549 case HCI_EVENT_PKT:
2550 if (skb->len == HCI_EVENT_HDR_SIZE) {
2551 struct hci_event_hdr *h = hci_event_hdr(skb);
2552 scb->expect = h->plen;
2553
2554 if (skb_tailroom(skb) < scb->expect) {
2555 kfree_skb(skb);
2556 hdev->reassembly[index] = NULL;
2557 return -ENOMEM;
2558 }
2559 }
2560 break;
2561
2562 case HCI_ACLDATA_PKT:
2563 if (skb->len == HCI_ACL_HDR_SIZE) {
2564 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2565 scb->expect = __le16_to_cpu(h->dlen);
2566
2567 if (skb_tailroom(skb) < scb->expect) {
2568 kfree_skb(skb);
2569 hdev->reassembly[index] = NULL;
2570 return -ENOMEM;
2571 }
2572 }
2573 break;
2574
2575 case HCI_SCODATA_PKT:
2576 if (skb->len == HCI_SCO_HDR_SIZE) {
2577 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2578 scb->expect = h->dlen;
2579
2580 if (skb_tailroom(skb) < scb->expect) {
2581 kfree_skb(skb);
2582 hdev->reassembly[index] = NULL;
2583 return -ENOMEM;
2584 }
2585 }
2586 break;
2587 }
2588
2589 if (scb->expect == 0) {
2590 /* Complete frame */
2591
2592 bt_cb(skb)->pkt_type = type;
2593 hci_recv_frame(skb);
2594
2595 hdev->reassembly[index] = NULL;
2596 return remain;
2597 }
2598 }
2599
2600 return remain;
2601}
2602
ef222013
MH
2603int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2604{
f39a3c06
SS
2605 int rem = 0;
2606
ef222013
MH
2607 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2608 return -EILSEQ;
2609
da5f6c37 2610 while (count) {
1e429f38 2611 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2612 if (rem < 0)
2613 return rem;
ef222013 2614
f39a3c06
SS
2615 data += (count - rem);
2616 count = rem;
f81c6224 2617 }
ef222013 2618
f39a3c06 2619 return rem;
ef222013
MH
2620}
2621EXPORT_SYMBOL(hci_recv_fragment);
2622
99811510
SS
2623#define STREAM_REASSEMBLY 0
2624
2625int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2626{
2627 int type;
2628 int rem = 0;
2629
da5f6c37 2630 while (count) {
99811510
SS
2631 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2632
2633 if (!skb) {
2634 struct { char type; } *pkt;
2635
2636 /* Start of the frame */
2637 pkt = data;
2638 type = pkt->type;
2639
2640 data++;
2641 count--;
2642 } else
2643 type = bt_cb(skb)->pkt_type;
2644
1e429f38 2645 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2646 STREAM_REASSEMBLY);
99811510
SS
2647 if (rem < 0)
2648 return rem;
2649
2650 data += (count - rem);
2651 count = rem;
f81c6224 2652 }
99811510
SS
2653
2654 return rem;
2655}
2656EXPORT_SYMBOL(hci_recv_stream_fragment);
2657
1da177e4
LT
2658/* ---- Interface to upper protocols ---- */
2659
1da177e4
LT
2660int hci_register_cb(struct hci_cb *cb)
2661{
2662 BT_DBG("%p name %s", cb, cb->name);
2663
f20d09d5 2664 write_lock(&hci_cb_list_lock);
1da177e4 2665 list_add(&cb->list, &hci_cb_list);
f20d09d5 2666 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2667
2668 return 0;
2669}
2670EXPORT_SYMBOL(hci_register_cb);
2671
2672int hci_unregister_cb(struct hci_cb *cb)
2673{
2674 BT_DBG("%p name %s", cb, cb->name);
2675
f20d09d5 2676 write_lock(&hci_cb_list_lock);
1da177e4 2677 list_del(&cb->list);
f20d09d5 2678 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2679
2680 return 0;
2681}
2682EXPORT_SYMBOL(hci_unregister_cb);
2683
2684static int hci_send_frame(struct sk_buff *skb)
2685{
2686 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2687
2688 if (!hdev) {
2689 kfree_skb(skb);
2690 return -ENODEV;
2691 }
2692
0d48d939 2693 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2694
cd82e61c
MH
2695 /* Time stamp */
2696 __net_timestamp(skb);
1da177e4 2697
cd82e61c
MH
2698 /* Send copy to monitor */
2699 hci_send_to_monitor(hdev, skb);
2700
2701 if (atomic_read(&hdev->promisc)) {
2702 /* Send copy to the sockets */
470fe1b5 2703 hci_send_to_sock(hdev, skb);
1da177e4
LT
2704 }
2705
2706 /* Get rid of skb owner, prior to sending to the driver. */
2707 skb_orphan(skb);
2708
2709 return hdev->send(skb);
2710}
2711
3119ae95
JH
2712void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2713{
2714 skb_queue_head_init(&req->cmd_q);
2715 req->hdev = hdev;
5d73e034 2716 req->err = 0;
3119ae95
JH
2717}
2718
2719int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2720{
2721 struct hci_dev *hdev = req->hdev;
2722 struct sk_buff *skb;
2723 unsigned long flags;
2724
2725 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2726
5d73e034
AG
2727 /* If an error occured during request building, remove all HCI
2728 * commands queued on the HCI request queue.
2729 */
2730 if (req->err) {
2731 skb_queue_purge(&req->cmd_q);
2732 return req->err;
2733 }
2734
3119ae95
JH
2735 /* Do not allow empty requests */
2736 if (skb_queue_empty(&req->cmd_q))
382b0c39 2737 return -ENODATA;
3119ae95
JH
2738
2739 skb = skb_peek_tail(&req->cmd_q);
2740 bt_cb(skb)->req.complete = complete;
2741
2742 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2743 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2744 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2745
2746 queue_work(hdev->workqueue, &hdev->cmd_work);
2747
2748 return 0;
2749}
2750
1ca3a9d0 2751static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 2752 u32 plen, const void *param)
1da177e4
LT
2753{
2754 int len = HCI_COMMAND_HDR_SIZE + plen;
2755 struct hci_command_hdr *hdr;
2756 struct sk_buff *skb;
2757
1da177e4 2758 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2759 if (!skb)
2760 return NULL;
1da177e4
LT
2761
2762 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2763 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2764 hdr->plen = plen;
2765
2766 if (plen)
2767 memcpy(skb_put(skb, plen), param, plen);
2768
2769 BT_DBG("skb len %d", skb->len);
2770
0d48d939 2771 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2772 skb->dev = (void *) hdev;
c78ae283 2773
1ca3a9d0
JH
2774 return skb;
2775}
2776
2777/* Send HCI command */
07dc93dd
JH
2778int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2779 const void *param)
1ca3a9d0
JH
2780{
2781 struct sk_buff *skb;
2782
2783 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2784
2785 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2786 if (!skb) {
2787 BT_ERR("%s no memory for command", hdev->name);
2788 return -ENOMEM;
2789 }
2790
11714b3d
JH
2791 /* Stand-alone HCI commands must be flaged as
2792 * single-command requests.
2793 */
2794 bt_cb(skb)->req.start = true;
2795
1da177e4 2796 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2797 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2798
2799 return 0;
2800}
1da177e4 2801
71c76a17 2802/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
2803void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2804 const void *param, u8 event)
71c76a17
JH
2805{
2806 struct hci_dev *hdev = req->hdev;
2807 struct sk_buff *skb;
2808
2809 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2810
34739c1e
AG
2811 /* If an error occured during request building, there is no point in
2812 * queueing the HCI command. We can simply return.
2813 */
2814 if (req->err)
2815 return;
2816
71c76a17
JH
2817 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2818 if (!skb) {
5d73e034
AG
2819 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2820 hdev->name, opcode);
2821 req->err = -ENOMEM;
e348fe6b 2822 return;
71c76a17
JH
2823 }
2824
2825 if (skb_queue_empty(&req->cmd_q))
2826 bt_cb(skb)->req.start = true;
2827
02350a72
JH
2828 bt_cb(skb)->req.event = event;
2829
71c76a17 2830 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2831}
2832
07dc93dd
JH
2833void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2834 const void *param)
02350a72
JH
2835{
2836 hci_req_add_ev(req, opcode, plen, param, 0);
2837}
2838
1da177e4 2839/* Get data from the previously sent command */
a9de9248 2840void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2841{
2842 struct hci_command_hdr *hdr;
2843
2844 if (!hdev->sent_cmd)
2845 return NULL;
2846
2847 hdr = (void *) hdev->sent_cmd->data;
2848
a9de9248 2849 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2850 return NULL;
2851
f0e09510 2852 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2853
2854 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2855}
2856
2857/* Send ACL data */
2858static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2859{
2860 struct hci_acl_hdr *hdr;
2861 int len = skb->len;
2862
badff6d0
ACM
2863 skb_push(skb, HCI_ACL_HDR_SIZE);
2864 skb_reset_transport_header(skb);
9c70220b 2865 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2866 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2867 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2868}
2869
ee22be7e 2870static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2871 struct sk_buff *skb, __u16 flags)
1da177e4 2872{
ee22be7e 2873 struct hci_conn *conn = chan->conn;
1da177e4
LT
2874 struct hci_dev *hdev = conn->hdev;
2875 struct sk_buff *list;
2876
087bfd99
GP
2877 skb->len = skb_headlen(skb);
2878 skb->data_len = 0;
2879
2880 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2881
2882 switch (hdev->dev_type) {
2883 case HCI_BREDR:
2884 hci_add_acl_hdr(skb, conn->handle, flags);
2885 break;
2886 case HCI_AMP:
2887 hci_add_acl_hdr(skb, chan->handle, flags);
2888 break;
2889 default:
2890 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2891 return;
2892 }
087bfd99 2893
70f23020
AE
2894 list = skb_shinfo(skb)->frag_list;
2895 if (!list) {
1da177e4
LT
2896 /* Non fragmented */
2897 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2898
73d80deb 2899 skb_queue_tail(queue, skb);
1da177e4
LT
2900 } else {
2901 /* Fragmented */
2902 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2903
2904 skb_shinfo(skb)->frag_list = NULL;
2905
2906 /* Queue all fragments atomically */
af3e6359 2907 spin_lock(&queue->lock);
1da177e4 2908
73d80deb 2909 __skb_queue_tail(queue, skb);
e702112f
AE
2910
2911 flags &= ~ACL_START;
2912 flags |= ACL_CONT;
1da177e4
LT
2913 do {
2914 skb = list; list = list->next;
8e87d142 2915
1da177e4 2916 skb->dev = (void *) hdev;
0d48d939 2917 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2918 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2919
2920 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2921
73d80deb 2922 __skb_queue_tail(queue, skb);
1da177e4
LT
2923 } while (list);
2924
af3e6359 2925 spin_unlock(&queue->lock);
1da177e4 2926 }
73d80deb
LAD
2927}
2928
2929void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2930{
ee22be7e 2931 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2932
f0e09510 2933 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2934
2935 skb->dev = (void *) hdev;
73d80deb 2936
ee22be7e 2937 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2938
3eff45ea 2939 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2940}
1da177e4
LT
2941
2942/* Send SCO data */
0d861d8b 2943void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2944{
2945 struct hci_dev *hdev = conn->hdev;
2946 struct hci_sco_hdr hdr;
2947
2948 BT_DBG("%s len %d", hdev->name, skb->len);
2949
aca3192c 2950 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2951 hdr.dlen = skb->len;
2952
badff6d0
ACM
2953 skb_push(skb, HCI_SCO_HDR_SIZE);
2954 skb_reset_transport_header(skb);
9c70220b 2955 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2956
2957 skb->dev = (void *) hdev;
0d48d939 2958 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2959
1da177e4 2960 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2961 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2962}
1da177e4
LT
2963
2964/* ---- HCI TX task (outgoing data) ---- */
2965
2966/* HCI Connection scheduler */
6039aa73
GP
2967static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2968 int *quote)
1da177e4
LT
2969{
2970 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2971 struct hci_conn *conn = NULL, *c;
abc5de8f 2972 unsigned int num = 0, min = ~0;
1da177e4 2973
8e87d142 2974 /* We don't have to lock device here. Connections are always
1da177e4 2975 * added and removed with TX task disabled. */
bf4c6325
GP
2976
2977 rcu_read_lock();
2978
2979 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2980 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2981 continue;
769be974
MH
2982
2983 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2984 continue;
2985
1da177e4
LT
2986 num++;
2987
2988 if (c->sent < min) {
2989 min = c->sent;
2990 conn = c;
2991 }
52087a79
LAD
2992
2993 if (hci_conn_num(hdev, type) == num)
2994 break;
1da177e4
LT
2995 }
2996
bf4c6325
GP
2997 rcu_read_unlock();
2998
1da177e4 2999 if (conn) {
6ed58ec5
VT
3000 int cnt, q;
3001
3002 switch (conn->type) {
3003 case ACL_LINK:
3004 cnt = hdev->acl_cnt;
3005 break;
3006 case SCO_LINK:
3007 case ESCO_LINK:
3008 cnt = hdev->sco_cnt;
3009 break;
3010 case LE_LINK:
3011 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3012 break;
3013 default:
3014 cnt = 0;
3015 BT_ERR("Unknown link type");
3016 }
3017
3018 q = cnt / num;
1da177e4
LT
3019 *quote = q ? q : 1;
3020 } else
3021 *quote = 0;
3022
3023 BT_DBG("conn %p quote %d", conn, *quote);
3024 return conn;
3025}
3026
6039aa73 3027static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3028{
3029 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3030 struct hci_conn *c;
1da177e4 3031
bae1f5d9 3032 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3033
bf4c6325
GP
3034 rcu_read_lock();
3035
1da177e4 3036 /* Kill stalled connections */
bf4c6325 3037 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3038 if (c->type == type && c->sent) {
6ed93dc6
AE
3039 BT_ERR("%s killing stalled connection %pMR",
3040 hdev->name, &c->dst);
bed71748 3041 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3042 }
3043 }
bf4c6325
GP
3044
3045 rcu_read_unlock();
1da177e4
LT
3046}
3047
6039aa73
GP
3048static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3049 int *quote)
1da177e4 3050{
73d80deb
LAD
3051 struct hci_conn_hash *h = &hdev->conn_hash;
3052 struct hci_chan *chan = NULL;
abc5de8f 3053 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3054 struct hci_conn *conn;
73d80deb
LAD
3055 int cnt, q, conn_num = 0;
3056
3057 BT_DBG("%s", hdev->name);
3058
bf4c6325
GP
3059 rcu_read_lock();
3060
3061 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3062 struct hci_chan *tmp;
3063
3064 if (conn->type != type)
3065 continue;
3066
3067 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3068 continue;
3069
3070 conn_num++;
3071
8192edef 3072 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3073 struct sk_buff *skb;
3074
3075 if (skb_queue_empty(&tmp->data_q))
3076 continue;
3077
3078 skb = skb_peek(&tmp->data_q);
3079 if (skb->priority < cur_prio)
3080 continue;
3081
3082 if (skb->priority > cur_prio) {
3083 num = 0;
3084 min = ~0;
3085 cur_prio = skb->priority;
3086 }
3087
3088 num++;
3089
3090 if (conn->sent < min) {
3091 min = conn->sent;
3092 chan = tmp;
3093 }
3094 }
3095
3096 if (hci_conn_num(hdev, type) == conn_num)
3097 break;
3098 }
3099
bf4c6325
GP
3100 rcu_read_unlock();
3101
73d80deb
LAD
3102 if (!chan)
3103 return NULL;
3104
3105 switch (chan->conn->type) {
3106 case ACL_LINK:
3107 cnt = hdev->acl_cnt;
3108 break;
bd1eb66b
AE
3109 case AMP_LINK:
3110 cnt = hdev->block_cnt;
3111 break;
73d80deb
LAD
3112 case SCO_LINK:
3113 case ESCO_LINK:
3114 cnt = hdev->sco_cnt;
3115 break;
3116 case LE_LINK:
3117 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3118 break;
3119 default:
3120 cnt = 0;
3121 BT_ERR("Unknown link type");
3122 }
3123
3124 q = cnt / num;
3125 *quote = q ? q : 1;
3126 BT_DBG("chan %p quote %d", chan, *quote);
3127 return chan;
3128}
3129
02b20f0b
LAD
3130static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3131{
3132 struct hci_conn_hash *h = &hdev->conn_hash;
3133 struct hci_conn *conn;
3134 int num = 0;
3135
3136 BT_DBG("%s", hdev->name);
3137
bf4c6325
GP
3138 rcu_read_lock();
3139
3140 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3141 struct hci_chan *chan;
3142
3143 if (conn->type != type)
3144 continue;
3145
3146 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3147 continue;
3148
3149 num++;
3150
8192edef 3151 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3152 struct sk_buff *skb;
3153
3154 if (chan->sent) {
3155 chan->sent = 0;
3156 continue;
3157 }
3158
3159 if (skb_queue_empty(&chan->data_q))
3160 continue;
3161
3162 skb = skb_peek(&chan->data_q);
3163 if (skb->priority >= HCI_PRIO_MAX - 1)
3164 continue;
3165
3166 skb->priority = HCI_PRIO_MAX - 1;
3167
3168 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3169 skb->priority);
02b20f0b
LAD
3170 }
3171
3172 if (hci_conn_num(hdev, type) == num)
3173 break;
3174 }
bf4c6325
GP
3175
3176 rcu_read_unlock();
3177
02b20f0b
LAD
3178}
3179
b71d385a
AE
3180static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3181{
3182 /* Calculate count of blocks used by this packet */
3183 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3184}
3185
6039aa73 3186static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3187{
1da177e4
LT
3188 if (!test_bit(HCI_RAW, &hdev->flags)) {
3189 /* ACL tx timeout must be longer than maximum
3190 * link supervision timeout (40.9 seconds) */
63d2bc1b 3191 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3192 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3193 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3194 }
63d2bc1b 3195}
1da177e4 3196
6039aa73 3197static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3198{
3199 unsigned int cnt = hdev->acl_cnt;
3200 struct hci_chan *chan;
3201 struct sk_buff *skb;
3202 int quote;
3203
3204 __check_timeout(hdev, cnt);
04837f64 3205
73d80deb 3206 while (hdev->acl_cnt &&
a8c5fb1a 3207 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3208 u32 priority = (skb_peek(&chan->data_q))->priority;
3209 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3210 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3211 skb->len, skb->priority);
73d80deb 3212
ec1cce24
LAD
3213 /* Stop if priority has changed */
3214 if (skb->priority < priority)
3215 break;
3216
3217 skb = skb_dequeue(&chan->data_q);
3218
73d80deb 3219 hci_conn_enter_active_mode(chan->conn,
04124681 3220 bt_cb(skb)->force_active);
04837f64 3221
1da177e4
LT
3222 hci_send_frame(skb);
3223 hdev->acl_last_tx = jiffies;
3224
3225 hdev->acl_cnt--;
73d80deb
LAD
3226 chan->sent++;
3227 chan->conn->sent++;
1da177e4
LT
3228 }
3229 }
02b20f0b
LAD
3230
3231 if (cnt != hdev->acl_cnt)
3232 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3233}
3234
6039aa73 3235static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3236{
63d2bc1b 3237 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3238 struct hci_chan *chan;
3239 struct sk_buff *skb;
3240 int quote;
bd1eb66b 3241 u8 type;
b71d385a 3242
63d2bc1b 3243 __check_timeout(hdev, cnt);
b71d385a 3244
bd1eb66b
AE
3245 BT_DBG("%s", hdev->name);
3246
3247 if (hdev->dev_type == HCI_AMP)
3248 type = AMP_LINK;
3249 else
3250 type = ACL_LINK;
3251
b71d385a 3252 while (hdev->block_cnt > 0 &&
bd1eb66b 3253 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3254 u32 priority = (skb_peek(&chan->data_q))->priority;
3255 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3256 int blocks;
3257
3258 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3259 skb->len, skb->priority);
b71d385a
AE
3260
3261 /* Stop if priority has changed */
3262 if (skb->priority < priority)
3263 break;
3264
3265 skb = skb_dequeue(&chan->data_q);
3266
3267 blocks = __get_blocks(hdev, skb);
3268 if (blocks > hdev->block_cnt)
3269 return;
3270
3271 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3272 bt_cb(skb)->force_active);
b71d385a
AE
3273
3274 hci_send_frame(skb);
3275 hdev->acl_last_tx = jiffies;
3276
3277 hdev->block_cnt -= blocks;
3278 quote -= blocks;
3279
3280 chan->sent += blocks;
3281 chan->conn->sent += blocks;
3282 }
3283 }
3284
3285 if (cnt != hdev->block_cnt)
bd1eb66b 3286 hci_prio_recalculate(hdev, type);
b71d385a
AE
3287}
3288
6039aa73 3289static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3290{
3291 BT_DBG("%s", hdev->name);
3292
bd1eb66b
AE
3293 /* No ACL link over BR/EDR controller */
3294 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3295 return;
3296
3297 /* No AMP link over AMP controller */
3298 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3299 return;
3300
3301 switch (hdev->flow_ctl_mode) {
3302 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3303 hci_sched_acl_pkt(hdev);
3304 break;
3305
3306 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3307 hci_sched_acl_blk(hdev);
3308 break;
3309 }
3310}
3311
1da177e4 3312/* Schedule SCO */
6039aa73 3313static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3314{
3315 struct hci_conn *conn;
3316 struct sk_buff *skb;
3317 int quote;
3318
3319 BT_DBG("%s", hdev->name);
3320
52087a79
LAD
3321 if (!hci_conn_num(hdev, SCO_LINK))
3322 return;
3323
1da177e4
LT
3324 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3325 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3326 BT_DBG("skb %p len %d", skb, skb->len);
3327 hci_send_frame(skb);
3328
3329 conn->sent++;
3330 if (conn->sent == ~0)
3331 conn->sent = 0;
3332 }
3333 }
3334}
3335
6039aa73 3336static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3337{
3338 struct hci_conn *conn;
3339 struct sk_buff *skb;
3340 int quote;
3341
3342 BT_DBG("%s", hdev->name);
3343
52087a79
LAD
3344 if (!hci_conn_num(hdev, ESCO_LINK))
3345 return;
3346
8fc9ced3
GP
3347 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3348 &quote))) {
b6a0dc82
MH
3349 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3350 BT_DBG("skb %p len %d", skb, skb->len);
3351 hci_send_frame(skb);
3352
3353 conn->sent++;
3354 if (conn->sent == ~0)
3355 conn->sent = 0;
3356 }
3357 }
3358}
3359
6039aa73 3360static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3361{
73d80deb 3362 struct hci_chan *chan;
6ed58ec5 3363 struct sk_buff *skb;
02b20f0b 3364 int quote, cnt, tmp;
6ed58ec5
VT
3365
3366 BT_DBG("%s", hdev->name);
3367
52087a79
LAD
3368 if (!hci_conn_num(hdev, LE_LINK))
3369 return;
3370
6ed58ec5
VT
3371 if (!test_bit(HCI_RAW, &hdev->flags)) {
3372 /* LE tx timeout must be longer than maximum
3373 * link supervision timeout (40.9 seconds) */
bae1f5d9 3374 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3375 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3376 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3377 }
3378
3379 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3380 tmp = cnt;
73d80deb 3381 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3382 u32 priority = (skb_peek(&chan->data_q))->priority;
3383 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3384 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3385 skb->len, skb->priority);
6ed58ec5 3386
ec1cce24
LAD
3387 /* Stop if priority has changed */
3388 if (skb->priority < priority)
3389 break;
3390
3391 skb = skb_dequeue(&chan->data_q);
3392
6ed58ec5
VT
3393 hci_send_frame(skb);
3394 hdev->le_last_tx = jiffies;
3395
3396 cnt--;
73d80deb
LAD
3397 chan->sent++;
3398 chan->conn->sent++;
6ed58ec5
VT
3399 }
3400 }
73d80deb 3401
6ed58ec5
VT
3402 if (hdev->le_pkts)
3403 hdev->le_cnt = cnt;
3404 else
3405 hdev->acl_cnt = cnt;
02b20f0b
LAD
3406
3407 if (cnt != tmp)
3408 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3409}
3410
3eff45ea 3411static void hci_tx_work(struct work_struct *work)
1da177e4 3412{
3eff45ea 3413 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3414 struct sk_buff *skb;
3415
6ed58ec5 3416 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3417 hdev->sco_cnt, hdev->le_cnt);
1da177e4 3418
52de599e
MH
3419 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3420 /* Schedule queues and send stuff to HCI driver */
3421 hci_sched_acl(hdev);
3422 hci_sched_sco(hdev);
3423 hci_sched_esco(hdev);
3424 hci_sched_le(hdev);
3425 }
6ed58ec5 3426
1da177e4
LT
3427 /* Send next queued raw (unknown type) packet */
3428 while ((skb = skb_dequeue(&hdev->raw_q)))
3429 hci_send_frame(skb);
1da177e4
LT
3430}
3431
25985edc 3432/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3433
3434/* ACL data packet */
6039aa73 3435static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3436{
3437 struct hci_acl_hdr *hdr = (void *) skb->data;
3438 struct hci_conn *conn;
3439 __u16 handle, flags;
3440
3441 skb_pull(skb, HCI_ACL_HDR_SIZE);
3442
3443 handle = __le16_to_cpu(hdr->handle);
3444 flags = hci_flags(handle);
3445 handle = hci_handle(handle);
3446
f0e09510 3447 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3448 handle, flags);
1da177e4
LT
3449
3450 hdev->stat.acl_rx++;
3451
3452 hci_dev_lock(hdev);
3453 conn = hci_conn_hash_lookup_handle(hdev, handle);
3454 hci_dev_unlock(hdev);
8e87d142 3455
1da177e4 3456 if (conn) {
65983fc7 3457 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3458
1da177e4 3459 /* Send to upper protocol */
686ebf28
UF
3460 l2cap_recv_acldata(conn, skb, flags);
3461 return;
1da177e4 3462 } else {
8e87d142 3463 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3464 hdev->name, handle);
1da177e4
LT
3465 }
3466
3467 kfree_skb(skb);
3468}
3469
3470/* SCO data packet */
6039aa73 3471static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3472{
3473 struct hci_sco_hdr *hdr = (void *) skb->data;
3474 struct hci_conn *conn;
3475 __u16 handle;
3476
3477 skb_pull(skb, HCI_SCO_HDR_SIZE);
3478
3479 handle = __le16_to_cpu(hdr->handle);
3480
f0e09510 3481 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3482
3483 hdev->stat.sco_rx++;
3484
3485 hci_dev_lock(hdev);
3486 conn = hci_conn_hash_lookup_handle(hdev, handle);
3487 hci_dev_unlock(hdev);
3488
3489 if (conn) {
1da177e4 3490 /* Send to upper protocol */
686ebf28
UF
3491 sco_recv_scodata(conn, skb);
3492 return;
1da177e4 3493 } else {
8e87d142 3494 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3495 hdev->name, handle);
1da177e4
LT
3496 }
3497
3498 kfree_skb(skb);
3499}
3500
9238f36a
JH
3501static bool hci_req_is_complete(struct hci_dev *hdev)
3502{
3503 struct sk_buff *skb;
3504
3505 skb = skb_peek(&hdev->cmd_q);
3506 if (!skb)
3507 return true;
3508
3509 return bt_cb(skb)->req.start;
3510}
3511
42c6b129
JH
3512static void hci_resend_last(struct hci_dev *hdev)
3513{
3514 struct hci_command_hdr *sent;
3515 struct sk_buff *skb;
3516 u16 opcode;
3517
3518 if (!hdev->sent_cmd)
3519 return;
3520
3521 sent = (void *) hdev->sent_cmd->data;
3522 opcode = __le16_to_cpu(sent->opcode);
3523 if (opcode == HCI_OP_RESET)
3524 return;
3525
3526 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3527 if (!skb)
3528 return;
3529
3530 skb_queue_head(&hdev->cmd_q, skb);
3531 queue_work(hdev->workqueue, &hdev->cmd_work);
3532}
3533
9238f36a
JH
3534void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3535{
3536 hci_req_complete_t req_complete = NULL;
3537 struct sk_buff *skb;
3538 unsigned long flags;
3539
3540 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3541
42c6b129
JH
3542 /* If the completed command doesn't match the last one that was
3543 * sent we need to do special handling of it.
9238f36a 3544 */
42c6b129
JH
3545 if (!hci_sent_cmd_data(hdev, opcode)) {
3546 /* Some CSR based controllers generate a spontaneous
3547 * reset complete event during init and any pending
3548 * command will never be completed. In such a case we
3549 * need to resend whatever was the last sent
3550 * command.
3551 */
3552 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3553 hci_resend_last(hdev);
3554
9238f36a 3555 return;
42c6b129 3556 }
9238f36a
JH
3557
3558 /* If the command succeeded and there's still more commands in
3559 * this request the request is not yet complete.
3560 */
3561 if (!status && !hci_req_is_complete(hdev))
3562 return;
3563
3564 /* If this was the last command in a request the complete
3565 * callback would be found in hdev->sent_cmd instead of the
3566 * command queue (hdev->cmd_q).
3567 */
3568 if (hdev->sent_cmd) {
3569 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
3570
3571 if (req_complete) {
3572 /* We must set the complete callback to NULL to
3573 * avoid calling the callback more than once if
3574 * this function gets called again.
3575 */
3576 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3577
9238f36a 3578 goto call_complete;
53e21fbc 3579 }
9238f36a
JH
3580 }
3581
3582 /* Remove all pending commands belonging to this request */
3583 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3584 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3585 if (bt_cb(skb)->req.start) {
3586 __skb_queue_head(&hdev->cmd_q, skb);
3587 break;
3588 }
3589
3590 req_complete = bt_cb(skb)->req.complete;
3591 kfree_skb(skb);
3592 }
3593 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3594
3595call_complete:
3596 if (req_complete)
3597 req_complete(hdev, status);
3598}
3599
b78752cc 3600static void hci_rx_work(struct work_struct *work)
1da177e4 3601{
b78752cc 3602 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3603 struct sk_buff *skb;
3604
3605 BT_DBG("%s", hdev->name);
3606
1da177e4 3607 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3608 /* Send copy to monitor */
3609 hci_send_to_monitor(hdev, skb);
3610
1da177e4
LT
3611 if (atomic_read(&hdev->promisc)) {
3612 /* Send copy to the sockets */
470fe1b5 3613 hci_send_to_sock(hdev, skb);
1da177e4
LT
3614 }
3615
0736cfa8
MH
3616 if (test_bit(HCI_RAW, &hdev->flags) ||
3617 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
3618 kfree_skb(skb);
3619 continue;
3620 }
3621
3622 if (test_bit(HCI_INIT, &hdev->flags)) {
3623 /* Don't process data packets in this states. */
0d48d939 3624 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3625 case HCI_ACLDATA_PKT:
3626 case HCI_SCODATA_PKT:
3627 kfree_skb(skb);
3628 continue;
3ff50b79 3629 }
1da177e4
LT
3630 }
3631
3632 /* Process frame */
0d48d939 3633 switch (bt_cb(skb)->pkt_type) {
1da177e4 3634 case HCI_EVENT_PKT:
b78752cc 3635 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3636 hci_event_packet(hdev, skb);
3637 break;
3638
3639 case HCI_ACLDATA_PKT:
3640 BT_DBG("%s ACL data packet", hdev->name);
3641 hci_acldata_packet(hdev, skb);
3642 break;
3643
3644 case HCI_SCODATA_PKT:
3645 BT_DBG("%s SCO data packet", hdev->name);
3646 hci_scodata_packet(hdev, skb);
3647 break;
3648
3649 default:
3650 kfree_skb(skb);
3651 break;
3652 }
3653 }
1da177e4
LT
3654}
3655
c347b765 3656static void hci_cmd_work(struct work_struct *work)
1da177e4 3657{
c347b765 3658 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3659 struct sk_buff *skb;
3660
2104786b
AE
3661 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3662 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3663
1da177e4 3664 /* Send queued commands */
5a08ecce
AE
3665 if (atomic_read(&hdev->cmd_cnt)) {
3666 skb = skb_dequeue(&hdev->cmd_q);
3667 if (!skb)
3668 return;
3669
7585b97a 3670 kfree_skb(hdev->sent_cmd);
1da177e4 3671
a675d7f1 3672 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 3673 if (hdev->sent_cmd) {
1da177e4
LT
3674 atomic_dec(&hdev->cmd_cnt);
3675 hci_send_frame(skb);
7bdb8a5c
SJ
3676 if (test_bit(HCI_RESET, &hdev->flags))
3677 del_timer(&hdev->cmd_timer);
3678 else
3679 mod_timer(&hdev->cmd_timer,
5f246e89 3680 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3681 } else {
3682 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3683 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3684 }
3685 }
3686}
2519a1fc 3687
31f7956c
AG
3688u8 bdaddr_to_le(u8 bdaddr_type)
3689{
3690 switch (bdaddr_type) {
3691 case BDADDR_LE_PUBLIC:
3692 return ADDR_LE_DEV_PUBLIC;
3693
3694 default:
3695 /* Fallback to LE Random address type */
3696 return ADDR_LE_DEV_RANDOM;
3697 }
3698}