]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/bluetooth/hci_sock.c
Merge tag 'rtc-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[mirror_ubuntu-jammy-kernel.git] / net / bluetooth / hci_sock.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
7a6038b3 26#include <linux/compat.h>
8c520a59 27#include <linux/export.h>
787b306c 28#include <linux/utsname.h>
70ecce91 29#include <linux/sched.h>
1da177e4
LT
30#include <asm/unaligned.h>
31
32#include <net/bluetooth/bluetooth.h>
33#include <net/bluetooth/hci_core.h>
cd82e61c 34#include <net/bluetooth/hci_mon.h>
fa4335d7
JH
35#include <net/bluetooth/mgmt.h>
36
37#include "mgmt_util.h"
1da177e4 38
801c1e8d
JH
39static LIST_HEAD(mgmt_chan_list);
40static DEFINE_MUTEX(mgmt_chan_list_lock);
41
70ecce91
MH
42static DEFINE_IDA(sock_cookie_ida);
43
cd82e61c
MH
44static atomic_t monitor_promisc = ATOMIC_INIT(0);
45
1da177e4
LT
46/* ----- HCI socket interface ----- */
47
863def58
MH
48/* Socket info */
49#define hci_pi(sk) ((struct hci_pinfo *) sk)
50
51struct hci_pinfo {
52 struct bt_sock bt;
53 struct hci_dev *hdev;
54 struct hci_filter filter;
55 __u32 cmsg_mask;
56 unsigned short channel;
6befc644 57 unsigned long flags;
70ecce91
MH
58 __u32 cookie;
59 char comm[TASK_COMM_LEN];
863def58
MH
60};
61
6befc644
MH
62void hci_sock_set_flag(struct sock *sk, int nr)
63{
64 set_bit(nr, &hci_pi(sk)->flags);
65}
66
67void hci_sock_clear_flag(struct sock *sk, int nr)
68{
69 clear_bit(nr, &hci_pi(sk)->flags);
70}
71
c85be545
MH
72int hci_sock_test_flag(struct sock *sk, int nr)
73{
74 return test_bit(nr, &hci_pi(sk)->flags);
75}
76
d0f172b1
JH
77unsigned short hci_sock_get_channel(struct sock *sk)
78{
79 return hci_pi(sk)->channel;
80}
81
70ecce91
MH
82u32 hci_sock_get_cookie(struct sock *sk)
83{
84 return hci_pi(sk)->cookie;
85}
86
df1cb87a
MH
87static bool hci_sock_gen_cookie(struct sock *sk)
88{
89 int id = hci_pi(sk)->cookie;
90
91 if (!id) {
92 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
93 if (id < 0)
94 id = 0xffffffff;
95
96 hci_pi(sk)->cookie = id;
97 get_task_comm(hci_pi(sk)->comm, current);
98 return true;
99 }
100
101 return false;
102}
103
104static void hci_sock_free_cookie(struct sock *sk)
105{
106 int id = hci_pi(sk)->cookie;
107
108 if (id) {
109 hci_pi(sk)->cookie = 0xffffffff;
110 ida_simple_remove(&sock_cookie_ida, id);
111 }
112}
113
9391976a 114static inline int hci_test_bit(int nr, const void *addr)
1da177e4 115{
9391976a 116 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
1da177e4
LT
117}
118
119/* Security filter */
3ad254f7
MH
120#define HCI_SFLT_MAX_OGF 5
121
122struct hci_sec_filter {
123 __u32 type_mask;
124 __u32 event_mask[2];
125 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
126};
127
7e67c112 128static const struct hci_sec_filter hci_sec_filter = {
1da177e4
LT
129 /* Packet types */
130 0x10,
131 /* Events */
dd7f5527 132 { 0x1000d9fe, 0x0000b00c },
1da177e4
LT
133 /* Commands */
134 {
135 { 0x0 },
136 /* OGF_LINK_CTL */
7c631a67 137 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
1da177e4 138 /* OGF_LINK_POLICY */
7c631a67 139 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
1da177e4 140 /* OGF_HOST_CTL */
7c631a67 141 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
1da177e4 142 /* OGF_INFO_PARAM */
7c631a67 143 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
1da177e4 144 /* OGF_STATUS_PARAM */
7c631a67 145 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
1da177e4
LT
146 }
147};
148
149static struct bt_sock_list hci_sk_list = {
d5fb2962 150 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
1da177e4
LT
151};
152
f81fe64f
MH
153static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
154{
155 struct hci_filter *flt;
156 int flt_type, flt_event;
157
158 /* Apply filter */
159 flt = &hci_pi(sk)->filter;
160
d79f34e3 161 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
f81fe64f
MH
162
163 if (!test_bit(flt_type, &flt->type_mask))
164 return true;
165
166 /* Extra filter for event packets only */
d79f34e3 167 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
f81fe64f
MH
168 return false;
169
170 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
171
172 if (!hci_test_bit(flt_event, &flt->event_mask))
173 return true;
174
175 /* Check filter only when opcode is set */
176 if (!flt->opcode)
177 return false;
178
179 if (flt_event == HCI_EV_CMD_COMPLETE &&
180 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
181 return true;
182
183 if (flt_event == HCI_EV_CMD_STATUS &&
184 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
185 return true;
186
187 return false;
188}
189
1da177e4 190/* Send frame to RAW socket */
470fe1b5 191void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
192{
193 struct sock *sk;
e0edf373 194 struct sk_buff *skb_copy = NULL;
1da177e4
LT
195
196 BT_DBG("hdev %p len %d", hdev, skb->len);
197
198 read_lock(&hci_sk_list.lock);
470fe1b5 199
b67bfe0d 200 sk_for_each(sk, &hci_sk_list.head) {
1da177e4
LT
201 struct sk_buff *nskb;
202
203 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
204 continue;
205
206 /* Don't send frame to the socket it came from */
207 if (skb->sk == sk)
208 continue;
209
23500189 210 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
d79f34e3
MH
211 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
212 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
213 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
cc974003
MH
214 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
215 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
bb77543e 216 continue;
23500189
MH
217 if (is_filtered_packet(sk, skb))
218 continue;
219 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
220 if (!bt_cb(skb)->incoming)
221 continue;
d79f34e3
MH
222 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
223 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
cc974003
MH
224 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
225 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
23500189
MH
226 continue;
227 } else {
228 /* Don't send frame to other channel types */
1da177e4 229 continue;
23500189 230 }
1da177e4 231
e0edf373
MH
232 if (!skb_copy) {
233 /* Create a private copy with headroom */
bad93e9d 234 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
e0edf373
MH
235 if (!skb_copy)
236 continue;
237
238 /* Put type byte before the data */
d79f34e3 239 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
e0edf373
MH
240 }
241
242 nskb = skb_clone(skb_copy, GFP_ATOMIC);
70f23020 243 if (!nskb)
1da177e4
LT
244 continue;
245
470fe1b5
MH
246 if (sock_queue_rcv_skb(sk, nskb))
247 kfree_skb(nskb);
248 }
249
250 read_unlock(&hci_sk_list.lock);
e0edf373
MH
251
252 kfree_skb(skb_copy);
470fe1b5
MH
253}
254
7129069e 255/* Send frame to sockets with specific channel */
a9ee77af
SAS
256static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
257 int flag, struct sock *skip_sk)
470fe1b5
MH
258{
259 struct sock *sk;
470fe1b5 260
7129069e 261 BT_DBG("channel %u len %d", channel, skb->len);
470fe1b5 262
b67bfe0d 263 sk_for_each(sk, &hci_sk_list.head) {
470fe1b5
MH
264 struct sk_buff *nskb;
265
c08b1a1d 266 /* Ignore socket without the flag set */
c85be545 267 if (!hci_sock_test_flag(sk, flag))
d7f72f61
MH
268 continue;
269
c08b1a1d
MH
270 /* Skip the original socket */
271 if (sk == skip_sk)
17711c62
MH
272 continue;
273
274 if (sk->sk_state != BT_BOUND)
275 continue;
276
277 if (hci_pi(sk)->channel != channel)
278 continue;
279
280 nskb = skb_clone(skb, GFP_ATOMIC);
281 if (!nskb)
282 continue;
283
284 if (sock_queue_rcv_skb(sk, nskb))
285 kfree_skb(nskb);
286 }
287
a9ee77af
SAS
288}
289
290void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
291 int flag, struct sock *skip_sk)
292{
293 read_lock(&hci_sk_list.lock);
294 __hci_send_to_channel(channel, skb, flag, skip_sk);
17711c62
MH
295 read_unlock(&hci_sk_list.lock);
296}
297
cd82e61c
MH
298/* Send frame to monitor socket */
299void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
300{
cd82e61c 301 struct sk_buff *skb_copy = NULL;
2b531294 302 struct hci_mon_hdr *hdr;
cd82e61c
MH
303 __le16 opcode;
304
305 if (!atomic_read(&monitor_promisc))
306 return;
307
308 BT_DBG("hdev %p len %d", hdev, skb->len);
309
d79f34e3 310 switch (hci_skb_pkt_type(skb)) {
cd82e61c 311 case HCI_COMMAND_PKT:
dcf4adbf 312 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
cd82e61c
MH
313 break;
314 case HCI_EVENT_PKT:
dcf4adbf 315 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
cd82e61c
MH
316 break;
317 case HCI_ACLDATA_PKT:
318 if (bt_cb(skb)->incoming)
dcf4adbf 319 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
cd82e61c 320 else
dcf4adbf 321 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
cd82e61c
MH
322 break;
323 case HCI_SCODATA_PKT:
324 if (bt_cb(skb)->incoming)
dcf4adbf 325 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
cd82e61c 326 else
dcf4adbf 327 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
cd82e61c 328 break;
f9a619db
LAD
329 case HCI_ISODATA_PKT:
330 if (bt_cb(skb)->incoming)
331 opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
332 else
333 opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
334 break;
e875ff84
MH
335 case HCI_DIAG_PKT:
336 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
337 break;
cd82e61c
MH
338 default:
339 return;
340 }
341
2b531294
MH
342 /* Create a private copy with headroom */
343 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
344 if (!skb_copy)
345 return;
346
347 /* Put header before the data */
d58ff351 348 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
2b531294
MH
349 hdr->opcode = opcode;
350 hdr->index = cpu_to_le16(hdev->id);
351 hdr->len = cpu_to_le16(skb->len);
352
c08b1a1d
MH
353 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
354 HCI_SOCK_TRUSTED, NULL);
cd82e61c
MH
355 kfree_skb(skb_copy);
356}
357
38ceaa00
MH
358void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
359 void *data, u16 data_len, ktime_t tstamp,
360 int flag, struct sock *skip_sk)
361{
362 struct sock *sk;
363 __le16 index;
364
365 if (hdev)
366 index = cpu_to_le16(hdev->id);
367 else
368 index = cpu_to_le16(MGMT_INDEX_NONE);
369
370 read_lock(&hci_sk_list.lock);
371
372 sk_for_each(sk, &hci_sk_list.head) {
373 struct hci_mon_hdr *hdr;
374 struct sk_buff *skb;
375
376 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
377 continue;
378
379 /* Ignore socket without the flag set */
380 if (!hci_sock_test_flag(sk, flag))
381 continue;
382
383 /* Skip the original socket */
384 if (sk == skip_sk)
385 continue;
386
387 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
388 if (!skb)
389 continue;
390
391 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
392 put_unaligned_le16(event, skb_put(skb, 2));
393
394 if (data)
59ae1d12 395 skb_put_data(skb, data, data_len);
38ceaa00
MH
396
397 skb->tstamp = tstamp;
398
d58ff351 399 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
38ceaa00
MH
400 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
401 hdr->index = index;
402 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
403
a9ee77af
SAS
404 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
405 HCI_SOCK_TRUSTED, NULL);
38ceaa00
MH
406 kfree_skb(skb);
407 }
408
409 read_unlock(&hci_sk_list.lock);
410}
411
cd82e61c
MH
412static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
413{
414 struct hci_mon_hdr *hdr;
415 struct hci_mon_new_index *ni;
6c566dd5 416 struct hci_mon_index_info *ii;
cd82e61c
MH
417 struct sk_buff *skb;
418 __le16 opcode;
419
420 switch (event) {
421 case HCI_DEV_REG:
422 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
423 if (!skb)
424 return NULL;
425
4df864c1 426 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
cd82e61c
MH
427 ni->type = hdev->dev_type;
428 ni->bus = hdev->bus;
429 bacpy(&ni->bdaddr, &hdev->bdaddr);
430 memcpy(ni->name, hdev->name, 8);
431
dcf4adbf 432 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
cd82e61c
MH
433 break;
434
435 case HCI_DEV_UNREG:
436 skb = bt_skb_alloc(0, GFP_ATOMIC);
437 if (!skb)
438 return NULL;
439
dcf4adbf 440 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
cd82e61c
MH
441 break;
442
e131d74a
MH
443 case HCI_DEV_SETUP:
444 if (hdev->manufacturer == 0xffff)
445 return NULL;
446
447 /* fall through */
448
6c566dd5
MH
449 case HCI_DEV_UP:
450 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
451 if (!skb)
452 return NULL;
453
4df864c1 454 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
6c566dd5
MH
455 bacpy(&ii->bdaddr, &hdev->bdaddr);
456 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
457
458 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
459 break;
460
22db3cbc
MH
461 case HCI_DEV_OPEN:
462 skb = bt_skb_alloc(0, GFP_ATOMIC);
463 if (!skb)
464 return NULL;
465
466 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
467 break;
468
469 case HCI_DEV_CLOSE:
470 skb = bt_skb_alloc(0, GFP_ATOMIC);
471 if (!skb)
472 return NULL;
473
474 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
475 break;
476
cd82e61c
MH
477 default:
478 return NULL;
479 }
480
481 __net_timestamp(skb);
482
d58ff351 483 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
cd82e61c
MH
484 hdr->opcode = opcode;
485 hdr->index = cpu_to_le16(hdev->id);
486 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
487
488 return skb;
489}
490
249fa169
MH
491static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
492{
493 struct hci_mon_hdr *hdr;
494 struct sk_buff *skb;
d0bef1d2 495 u16 format;
249fa169
MH
496 u8 ver[3];
497 u32 flags;
498
0ef2c42f
MH
499 /* No message needed when cookie is not present */
500 if (!hci_pi(sk)->cookie)
501 return NULL;
502
d0bef1d2 503 switch (hci_pi(sk)->channel) {
f81f5b2d
MH
504 case HCI_CHANNEL_RAW:
505 format = 0x0000;
506 ver[0] = BT_SUBSYS_VERSION;
507 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
508 break;
aa1638dd
MH
509 case HCI_CHANNEL_USER:
510 format = 0x0001;
511 ver[0] = BT_SUBSYS_VERSION;
512 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
513 break;
d0bef1d2
MH
514 case HCI_CHANNEL_CONTROL:
515 format = 0x0002;
516 mgmt_fill_version_info(ver);
517 break;
518 default:
519 /* No message for unsupported format */
520 return NULL;
521 }
522
249fa169
MH
523 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
524 if (!skb)
525 return NULL;
526
249fa169
MH
527 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
528
529 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
530 put_unaligned_le16(format, skb_put(skb, 2));
59ae1d12 531 skb_put_data(skb, ver, sizeof(ver));
249fa169 532 put_unaligned_le32(flags, skb_put(skb, 4));
634fef61 533 skb_put_u8(skb, TASK_COMM_LEN);
59ae1d12 534 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
249fa169
MH
535
536 __net_timestamp(skb);
537
d58ff351 538 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
249fa169 539 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
0ef2c42f
MH
540 if (hci_pi(sk)->hdev)
541 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
542 else
543 hdr->index = cpu_to_le16(HCI_DEV_NONE);
249fa169
MH
544 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
545
546 return skb;
547}
548
549static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
550{
551 struct hci_mon_hdr *hdr;
552 struct sk_buff *skb;
553
0ef2c42f
MH
554 /* No message needed when cookie is not present */
555 if (!hci_pi(sk)->cookie)
556 return NULL;
557
d0bef1d2 558 switch (hci_pi(sk)->channel) {
f81f5b2d 559 case HCI_CHANNEL_RAW:
aa1638dd 560 case HCI_CHANNEL_USER:
d0bef1d2
MH
561 case HCI_CHANNEL_CONTROL:
562 break;
563 default:
564 /* No message for unsupported format */
565 return NULL;
566 }
567
249fa169
MH
568 skb = bt_skb_alloc(4, GFP_ATOMIC);
569 if (!skb)
570 return NULL;
571
572 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
573
574 __net_timestamp(skb);
575
d58ff351 576 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
249fa169 577 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
0ef2c42f
MH
578 if (hci_pi(sk)->hdev)
579 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
580 else
581 hdr->index = cpu_to_le16(HCI_DEV_NONE);
249fa169
MH
582 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
583
584 return skb;
585}
586
38ceaa00
MH
587static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
588 u16 opcode, u16 len,
589 const void *buf)
590{
591 struct hci_mon_hdr *hdr;
592 struct sk_buff *skb;
593
594 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
595 if (!skb)
596 return NULL;
597
598 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
599 put_unaligned_le16(opcode, skb_put(skb, 2));
600
601 if (buf)
59ae1d12 602 skb_put_data(skb, buf, len);
38ceaa00
MH
603
604 __net_timestamp(skb);
605
d58ff351 606 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
38ceaa00
MH
607 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
608 hdr->index = cpu_to_le16(index);
609 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
610
611 return skb;
612}
613
787b306c
JB
614static void __printf(2, 3)
615send_monitor_note(struct sock *sk, const char *fmt, ...)
dd31506d 616{
787b306c 617 size_t len;
dd31506d
MH
618 struct hci_mon_hdr *hdr;
619 struct sk_buff *skb;
787b306c
JB
620 va_list args;
621
622 va_start(args, fmt);
623 len = vsnprintf(NULL, 0, fmt, args);
624 va_end(args);
dd31506d
MH
625
626 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
627 if (!skb)
628 return;
629
787b306c
JB
630 va_start(args, fmt);
631 vsprintf(skb_put(skb, len), fmt, args);
4df864c1 632 *(u8 *)skb_put(skb, 1) = 0;
787b306c 633 va_end(args);
dd31506d
MH
634
635 __net_timestamp(skb);
636
637 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
638 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
639 hdr->index = cpu_to_le16(HCI_DEV_NONE);
640 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
641
642 if (sock_queue_rcv_skb(sk, skb))
643 kfree_skb(skb);
644}
645
cd82e61c
MH
646static void send_monitor_replay(struct sock *sk)
647{
648 struct hci_dev *hdev;
649
650 read_lock(&hci_dev_list_lock);
651
652 list_for_each_entry(hdev, &hci_dev_list, list) {
653 struct sk_buff *skb;
654
655 skb = create_monitor_event(hdev, HCI_DEV_REG);
656 if (!skb)
657 continue;
658
659 if (sock_queue_rcv_skb(sk, skb))
660 kfree_skb(skb);
22db3cbc
MH
661
662 if (!test_bit(HCI_RUNNING, &hdev->flags))
663 continue;
664
665 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
666 if (!skb)
667 continue;
668
669 if (sock_queue_rcv_skb(sk, skb))
670 kfree_skb(skb);
6c566dd5 671
e131d74a
MH
672 if (test_bit(HCI_UP, &hdev->flags))
673 skb = create_monitor_event(hdev, HCI_DEV_UP);
674 else if (hci_dev_test_flag(hdev, HCI_SETUP))
675 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
676 else
677 skb = NULL;
6c566dd5 678
e131d74a
MH
679 if (skb) {
680 if (sock_queue_rcv_skb(sk, skb))
681 kfree_skb(skb);
682 }
cd82e61c
MH
683 }
684
685 read_unlock(&hci_dev_list_lock);
686}
687
249fa169
MH
688static void send_monitor_control_replay(struct sock *mon_sk)
689{
690 struct sock *sk;
691
692 read_lock(&hci_sk_list.lock);
693
694 sk_for_each(sk, &hci_sk_list.head) {
695 struct sk_buff *skb;
696
249fa169
MH
697 skb = create_monitor_ctrl_open(sk);
698 if (!skb)
699 continue;
700
701 if (sock_queue_rcv_skb(mon_sk, skb))
702 kfree_skb(skb);
703 }
704
705 read_unlock(&hci_sk_list.lock);
706}
707
040030ef
MH
708/* Generate internal stack event */
709static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
710{
711 struct hci_event_hdr *hdr;
712 struct hci_ev_stack_internal *ev;
713 struct sk_buff *skb;
714
715 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
716 if (!skb)
717 return;
718
4df864c1 719 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
040030ef
MH
720 hdr->evt = HCI_EV_STACK_INTERNAL;
721 hdr->plen = sizeof(*ev) + dlen;
722
4df864c1 723 ev = skb_put(skb, sizeof(*ev) + dlen);
040030ef
MH
724 ev->type = type;
725 memcpy(ev->data, data, dlen);
726
727 bt_cb(skb)->incoming = 1;
728 __net_timestamp(skb);
729
d79f34e3 730 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
040030ef
MH
731 hci_send_to_sock(hdev, skb);
732 kfree_skb(skb);
733}
734
735void hci_sock_dev_event(struct hci_dev *hdev, int event)
736{
040030ef
MH
737 BT_DBG("hdev %s event %d", hdev->name, event);
738
cd82e61c
MH
739 if (atomic_read(&monitor_promisc)) {
740 struct sk_buff *skb;
741
ed1b28a4 742 /* Send event to monitor */
cd82e61c
MH
743 skb = create_monitor_event(hdev, event);
744 if (skb) {
c08b1a1d
MH
745 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
746 HCI_SOCK_TRUSTED, NULL);
cd82e61c
MH
747 kfree_skb(skb);
748 }
749 }
750
ed1b28a4
MH
751 if (event <= HCI_DEV_DOWN) {
752 struct hci_ev_si_device ev;
753
754 /* Send event to sockets */
755 ev.event = event;
756 ev.dev_id = hdev->id;
757 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
758 }
040030ef
MH
759
760 if (event == HCI_DEV_UNREG) {
761 struct sock *sk;
040030ef
MH
762
763 /* Detach sockets from device */
764 read_lock(&hci_sk_list.lock);
b67bfe0d 765 sk_for_each(sk, &hci_sk_list.head) {
040030ef
MH
766 bh_lock_sock_nested(sk);
767 if (hci_pi(sk)->hdev == hdev) {
768 hci_pi(sk)->hdev = NULL;
769 sk->sk_err = EPIPE;
770 sk->sk_state = BT_OPEN;
771 sk->sk_state_change(sk);
772
773 hci_dev_put(hdev);
774 }
775 bh_unlock_sock(sk);
776 }
777 read_unlock(&hci_sk_list.lock);
778 }
779}
780
801c1e8d
JH
781static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
782{
783 struct hci_mgmt_chan *c;
784
785 list_for_each_entry(c, &mgmt_chan_list, list) {
786 if (c->channel == channel)
787 return c;
788 }
789
790 return NULL;
791}
792
793static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
794{
795 struct hci_mgmt_chan *c;
796
797 mutex_lock(&mgmt_chan_list_lock);
798 c = __hci_mgmt_chan_find(channel);
799 mutex_unlock(&mgmt_chan_list_lock);
800
801 return c;
802}
803
804int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
805{
806 if (c->channel < HCI_CHANNEL_CONTROL)
807 return -EINVAL;
808
809 mutex_lock(&mgmt_chan_list_lock);
810 if (__hci_mgmt_chan_find(c->channel)) {
811 mutex_unlock(&mgmt_chan_list_lock);
812 return -EALREADY;
813 }
814
815 list_add_tail(&c->list, &mgmt_chan_list);
816
817 mutex_unlock(&mgmt_chan_list_lock);
818
819 return 0;
820}
821EXPORT_SYMBOL(hci_mgmt_chan_register);
822
823void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
824{
825 mutex_lock(&mgmt_chan_list_lock);
826 list_del(&c->list);
827 mutex_unlock(&mgmt_chan_list_lock);
828}
829EXPORT_SYMBOL(hci_mgmt_chan_unregister);
830
1da177e4
LT
831static int hci_sock_release(struct socket *sock)
832{
833 struct sock *sk = sock->sk;
7b005bd3 834 struct hci_dev *hdev;
249fa169 835 struct sk_buff *skb;
1da177e4
LT
836
837 BT_DBG("sock %p sk %p", sock, sk);
838
839 if (!sk)
840 return 0;
841
11eb85ec
DC
842 lock_sock(sk);
843
70ecce91
MH
844 switch (hci_pi(sk)->channel) {
845 case HCI_CHANNEL_MONITOR:
cd82e61c 846 atomic_dec(&monitor_promisc);
70ecce91 847 break;
f81f5b2d 848 case HCI_CHANNEL_RAW:
aa1638dd 849 case HCI_CHANNEL_USER:
70ecce91 850 case HCI_CHANNEL_CONTROL:
249fa169
MH
851 /* Send event to monitor */
852 skb = create_monitor_ctrl_close(sk);
853 if (skb) {
854 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
855 HCI_SOCK_TRUSTED, NULL);
856 kfree_skb(skb);
857 }
858
df1cb87a 859 hci_sock_free_cookie(sk);
70ecce91
MH
860 break;
861 }
cd82e61c 862
1da177e4
LT
863 bt_sock_unlink(&hci_sk_list, sk);
864
e20a2e9c 865 hdev = hci_pi(sk)->hdev;
1da177e4 866 if (hdev) {
23500189 867 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
9332ef9d 868 /* When releasing a user channel exclusive access,
6b3cc1db
SF
869 * call hci_dev_do_close directly instead of calling
870 * hci_dev_close to ensure the exclusive access will
871 * be released and the controller brought back down.
872 *
873 * The checking of HCI_AUTO_OFF is not needed in this
874 * case since it will have been cleared already when
875 * opening the user channel.
876 */
877 hci_dev_do_close(hdev);
9380f9ea
LP
878 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
879 mgmt_index_added(hdev);
23500189
MH
880 }
881
1da177e4
LT
882 atomic_dec(&hdev->promisc);
883 hci_dev_put(hdev);
884 }
885
886 sock_orphan(sk);
887
888 skb_queue_purge(&sk->sk_receive_queue);
889 skb_queue_purge(&sk->sk_write_queue);
890
11eb85ec 891 release_sock(sk);
1da177e4
LT
892 sock_put(sk);
893 return 0;
894}
895
b2a66aad 896static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
f0358568
JH
897{
898 bdaddr_t bdaddr;
5e762444 899 int err;
f0358568
JH
900
901 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
902 return -EFAULT;
903
09fd0de5 904 hci_dev_lock(hdev);
5e762444 905
dcc36c16 906 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
5e762444 907
09fd0de5 908 hci_dev_unlock(hdev);
5e762444
AJ
909
910 return err;
f0358568
JH
911}
912
b2a66aad 913static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
f0358568
JH
914{
915 bdaddr_t bdaddr;
5e762444 916 int err;
f0358568
JH
917
918 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
919 return -EFAULT;
920
09fd0de5 921 hci_dev_lock(hdev);
5e762444 922
dcc36c16 923 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
5e762444 924
09fd0de5 925 hci_dev_unlock(hdev);
5e762444
AJ
926
927 return err;
f0358568
JH
928}
929
8e87d142 930/* Ioctls that require bound socket */
6039aa73
GP
931static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
932 unsigned long arg)
1da177e4
LT
933{
934 struct hci_dev *hdev = hci_pi(sk)->hdev;
935
936 if (!hdev)
937 return -EBADFD;
938
d7a5a11d 939 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
940 return -EBUSY;
941
d7a5a11d 942 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
fee746b0
MH
943 return -EOPNOTSUPP;
944
ca8bee5d 945 if (hdev->dev_type != HCI_PRIMARY)
5b69bef5
MH
946 return -EOPNOTSUPP;
947
1da177e4
LT
948 switch (cmd) {
949 case HCISETRAW:
950 if (!capable(CAP_NET_ADMIN))
bf5b30b8 951 return -EPERM;
db596681 952 return -EOPNOTSUPP;
1da177e4 953
1da177e4 954 case HCIGETCONNINFO:
8528d3f7 955 return hci_get_conn_info(hdev, (void __user *)arg);
40be492f
MH
956
957 case HCIGETAUTHINFO:
8528d3f7 958 return hci_get_auth_info(hdev, (void __user *)arg);
1da177e4 959
f0358568
JH
960 case HCIBLOCKADDR:
961 if (!capable(CAP_NET_ADMIN))
bf5b30b8 962 return -EPERM;
8528d3f7 963 return hci_sock_blacklist_add(hdev, (void __user *)arg);
f0358568
JH
964
965 case HCIUNBLOCKADDR:
966 if (!capable(CAP_NET_ADMIN))
bf5b30b8 967 return -EPERM;
8528d3f7 968 return hci_sock_blacklist_del(hdev, (void __user *)arg);
1da177e4 969 }
0736cfa8 970
324d36ed 971 return -ENOIOCTLCMD;
1da177e4
LT
972}
973
8fc9ced3
GP
974static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
975 unsigned long arg)
1da177e4 976{
8528d3f7 977 void __user *argp = (void __user *)arg;
0736cfa8 978 struct sock *sk = sock->sk;
1da177e4
LT
979 int err;
980
981 BT_DBG("cmd %x arg %lx", cmd, arg);
982
c1c4f956
MH
983 lock_sock(sk);
984
985 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
986 err = -EBADFD;
987 goto done;
988 }
989
f81f5b2d
MH
990 /* When calling an ioctl on an unbound raw socket, then ensure
991 * that the monitor gets informed. Ensure that the resulting event
992 * is only send once by checking if the cookie exists or not. The
993 * socket cookie will be only ever generated once for the lifetime
994 * of a given socket.
995 */
996 if (hci_sock_gen_cookie(sk)) {
997 struct sk_buff *skb;
998
999 if (capable(CAP_NET_ADMIN))
1000 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1001
1002 /* Send event to monitor */
1003 skb = create_monitor_ctrl_open(sk);
1004 if (skb) {
1005 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1006 HCI_SOCK_TRUSTED, NULL);
1007 kfree_skb(skb);
1008 }
1009 }
1010
c1c4f956
MH
1011 release_sock(sk);
1012
1da177e4
LT
1013 switch (cmd) {
1014 case HCIGETDEVLIST:
1015 return hci_get_dev_list(argp);
1016
1017 case HCIGETDEVINFO:
1018 return hci_get_dev_info(argp);
1019
1020 case HCIGETCONNLIST:
1021 return hci_get_conn_list(argp);
1022
1023 case HCIDEVUP:
1024 if (!capable(CAP_NET_ADMIN))
bf5b30b8 1025 return -EPERM;
1da177e4
LT
1026 return hci_dev_open(arg);
1027
1028 case HCIDEVDOWN:
1029 if (!capable(CAP_NET_ADMIN))
bf5b30b8 1030 return -EPERM;
1da177e4
LT
1031 return hci_dev_close(arg);
1032
1033 case HCIDEVRESET:
1034 if (!capable(CAP_NET_ADMIN))
bf5b30b8 1035 return -EPERM;
1da177e4
LT
1036 return hci_dev_reset(arg);
1037
1038 case HCIDEVRESTAT:
1039 if (!capable(CAP_NET_ADMIN))
bf5b30b8 1040 return -EPERM;
1da177e4
LT
1041 return hci_dev_reset_stat(arg);
1042
1043 case HCISETSCAN:
1044 case HCISETAUTH:
1045 case HCISETENCRYPT:
1046 case HCISETPTYPE:
1047 case HCISETLINKPOL:
1048 case HCISETLINKMODE:
1049 case HCISETACLMTU:
1050 case HCISETSCOMTU:
1051 if (!capable(CAP_NET_ADMIN))
bf5b30b8 1052 return -EPERM;
1da177e4
LT
1053 return hci_dev_cmd(cmd, argp);
1054
1055 case HCIINQUIRY:
1056 return hci_inquiry(argp);
1da177e4 1057 }
c1c4f956
MH
1058
1059 lock_sock(sk);
1060
1061 err = hci_sock_bound_ioctl(sk, cmd, arg);
1062
1063done:
1064 release_sock(sk);
1065 return err;
1da177e4
LT
1066}
1067
7a6038b3
AB
1068#ifdef CONFIG_COMPAT
1069static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1070 unsigned long arg)
1071{
1072 switch (cmd) {
1073 case HCIDEVUP:
1074 case HCIDEVDOWN:
1075 case HCIDEVRESET:
1076 case HCIDEVRESTAT:
1077 return hci_sock_ioctl(sock, cmd, arg);
1078 }
1079
1080 return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1081}
1082#endif
1083
8fc9ced3
GP
1084static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1085 int addr_len)
1da177e4 1086{
0381101f 1087 struct sockaddr_hci haddr;
1da177e4
LT
1088 struct sock *sk = sock->sk;
1089 struct hci_dev *hdev = NULL;
f4cdbb3f 1090 struct sk_buff *skb;
0381101f 1091 int len, err = 0;
1da177e4
LT
1092
1093 BT_DBG("sock %p sk %p", sock, sk);
1094
0381101f
JH
1095 if (!addr)
1096 return -EINVAL;
1097
1098 memset(&haddr, 0, sizeof(haddr));
1099 len = min_t(unsigned int, sizeof(haddr), addr_len);
1100 memcpy(&haddr, addr, len);
1101
1102 if (haddr.hci_family != AF_BLUETOOTH)
1103 return -EINVAL;
1104
1da177e4
LT
1105 lock_sock(sk);
1106
7cc2ade2 1107 if (sk->sk_state == BT_BOUND) {
1da177e4
LT
1108 err = -EALREADY;
1109 goto done;
1110 }
1111
7cc2ade2
MH
1112 switch (haddr.hci_channel) {
1113 case HCI_CHANNEL_RAW:
1114 if (hci_pi(sk)->hdev) {
1115 err = -EALREADY;
1da177e4
LT
1116 goto done;
1117 }
1118
7cc2ade2
MH
1119 if (haddr.hci_dev != HCI_DEV_NONE) {
1120 hdev = hci_dev_get(haddr.hci_dev);
1121 if (!hdev) {
1122 err = -ENODEV;
1123 goto done;
1124 }
1125
1126 atomic_inc(&hdev->promisc);
1127 }
1128
5a6d2cf5 1129 hci_pi(sk)->channel = haddr.hci_channel;
f81f5b2d 1130
f4cdbb3f
MH
1131 if (!hci_sock_gen_cookie(sk)) {
1132 /* In the case when a cookie has already been assigned,
1133 * then there has been already an ioctl issued against
1134 * an unbound socket and with that triggerd an open
1135 * notification. Send a close notification first to
1136 * allow the state transition to bounded.
1137 */
1138 skb = create_monitor_ctrl_close(sk);
f81f5b2d
MH
1139 if (skb) {
1140 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1141 HCI_SOCK_TRUSTED, NULL);
1142 kfree_skb(skb);
1143 }
1144 }
f4cdbb3f
MH
1145
1146 if (capable(CAP_NET_ADMIN))
1147 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1148
1149 hci_pi(sk)->hdev = hdev;
1150
1151 /* Send event to monitor */
1152 skb = create_monitor_ctrl_open(sk);
1153 if (skb) {
1154 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1155 HCI_SOCK_TRUSTED, NULL);
1156 kfree_skb(skb);
1157 }
7cc2ade2
MH
1158 break;
1159
23500189
MH
1160 case HCI_CHANNEL_USER:
1161 if (hci_pi(sk)->hdev) {
1162 err = -EALREADY;
1163 goto done;
1164 }
1165
1166 if (haddr.hci_dev == HCI_DEV_NONE) {
1167 err = -EINVAL;
1168 goto done;
1169 }
1170
10a8b86f 1171 if (!capable(CAP_NET_ADMIN)) {
23500189
MH
1172 err = -EPERM;
1173 goto done;
1174 }
1175
1176 hdev = hci_dev_get(haddr.hci_dev);
1177 if (!hdev) {
1178 err = -ENODEV;
1179 goto done;
1180 }
1181
781f899f 1182 if (test_bit(HCI_INIT, &hdev->flags) ||
d7a5a11d 1183 hci_dev_test_flag(hdev, HCI_SETUP) ||
781f899f
MH
1184 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1185 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1186 test_bit(HCI_UP, &hdev->flags))) {
23500189
MH
1187 err = -EBUSY;
1188 hci_dev_put(hdev);
1189 goto done;
1190 }
1191
238be788 1192 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
23500189
MH
1193 err = -EUSERS;
1194 hci_dev_put(hdev);
1195 goto done;
1196 }
1197
0602a8ad 1198 mgmt_index_removed(hdev);
23500189
MH
1199
1200 err = hci_dev_open(hdev->id);
1201 if (err) {
781f899f
MH
1202 if (err == -EALREADY) {
1203 /* In case the transport is already up and
1204 * running, clear the error here.
1205 *
9332ef9d 1206 * This can happen when opening a user
781f899f
MH
1207 * channel and HCI_AUTO_OFF grace period
1208 * is still active.
1209 */
1210 err = 0;
1211 } else {
1212 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1213 mgmt_index_added(hdev);
1214 hci_dev_put(hdev);
1215 goto done;
1216 }
23500189
MH
1217 }
1218
5a6d2cf5 1219 hci_pi(sk)->channel = haddr.hci_channel;
aa1638dd
MH
1220
1221 if (!hci_sock_gen_cookie(sk)) {
1222 /* In the case when a cookie has already been assigned,
1223 * this socket will transition from a raw socket into
9332ef9d 1224 * a user channel socket. For a clean transition, send
aa1638dd
MH
1225 * the close notification first.
1226 */
1227 skb = create_monitor_ctrl_close(sk);
1228 if (skb) {
1229 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1230 HCI_SOCK_TRUSTED, NULL);
1231 kfree_skb(skb);
1232 }
1233 }
1234
1235 /* The user channel is restricted to CAP_NET_ADMIN
1236 * capabilities and with that implicitly trusted.
1237 */
1238 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1239
23500189 1240 hci_pi(sk)->hdev = hdev;
5a6d2cf5 1241
aa1638dd
MH
1242 /* Send event to monitor */
1243 skb = create_monitor_ctrl_open(sk);
1244 if (skb) {
1245 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1246 HCI_SOCK_TRUSTED, NULL);
1247 kfree_skb(skb);
1248 }
1249
5a6d2cf5 1250 atomic_inc(&hdev->promisc);
23500189
MH
1251 break;
1252
cd82e61c
MH
1253 case HCI_CHANNEL_MONITOR:
1254 if (haddr.hci_dev != HCI_DEV_NONE) {
1255 err = -EINVAL;
1256 goto done;
1257 }
1258
1259 if (!capable(CAP_NET_RAW)) {
1260 err = -EPERM;
1261 goto done;
1262 }
1263
5a6d2cf5
MH
1264 hci_pi(sk)->channel = haddr.hci_channel;
1265
50ebc055
MH
1266 /* The monitor interface is restricted to CAP_NET_RAW
1267 * capabilities and with that implicitly trusted.
1268 */
1269 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1270
787b306c
JB
1271 send_monitor_note(sk, "Linux version %s (%s)",
1272 init_utsname()->release,
1273 init_utsname()->machine);
9e8305b3
MH
1274 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1275 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
cd82e61c 1276 send_monitor_replay(sk);
249fa169 1277 send_monitor_control_replay(sk);
cd82e61c
MH
1278
1279 atomic_inc(&monitor_promisc);
1280 break;
1281
ac714949
MH
1282 case HCI_CHANNEL_LOGGING:
1283 if (haddr.hci_dev != HCI_DEV_NONE) {
1284 err = -EINVAL;
1285 goto done;
1286 }
1287
1288 if (!capable(CAP_NET_ADMIN)) {
1289 err = -EPERM;
1290 goto done;
1291 }
5a6d2cf5
MH
1292
1293 hci_pi(sk)->channel = haddr.hci_channel;
ac714949
MH
1294 break;
1295
7cc2ade2 1296 default:
801c1e8d
JH
1297 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1298 err = -EINVAL;
1299 goto done;
1300 }
1301
1302 if (haddr.hci_dev != HCI_DEV_NONE) {
1303 err = -EINVAL;
1304 goto done;
1305 }
1306
1195fbb8
MH
1307 /* Users with CAP_NET_ADMIN capabilities are allowed
1308 * access to all management commands and events. For
1309 * untrusted users the interface is restricted and
1310 * also only untrusted events are sent.
50ebc055 1311 */
1195fbb8
MH
1312 if (capable(CAP_NET_ADMIN))
1313 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
50ebc055 1314
5a6d2cf5
MH
1315 hci_pi(sk)->channel = haddr.hci_channel;
1316
f9207338
MH
1317 /* At the moment the index and unconfigured index events
1318 * are enabled unconditionally. Setting them on each
1319 * socket when binding keeps this functionality. They
1320 * however might be cleared later and then sending of these
1321 * events will be disabled, but that is then intentional.
f6b7712e
MH
1322 *
1323 * This also enables generic events that are safe to be
1324 * received by untrusted users. Example for such events
1325 * are changes to settings, class of device, name etc.
f9207338 1326 */
5a6d2cf5 1327 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
f4cdbb3f
MH
1328 if (!hci_sock_gen_cookie(sk)) {
1329 /* In the case when a cookie has already been
1330 * assigned, this socket will transtion from
1331 * a raw socket into a control socket. To
1332 * allow for a clean transtion, send the
1333 * close notification first.
1334 */
1335 skb = create_monitor_ctrl_close(sk);
1336 if (skb) {
1337 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1338 HCI_SOCK_TRUSTED, NULL);
1339 kfree_skb(skb);
1340 }
1341 }
70ecce91 1342
249fa169
MH
1343 /* Send event to monitor */
1344 skb = create_monitor_ctrl_open(sk);
1345 if (skb) {
1346 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1347 HCI_SOCK_TRUSTED, NULL);
1348 kfree_skb(skb);
1349 }
1350
f9207338
MH
1351 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1352 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
5504c3a3
MH
1353 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1354 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1355 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1356 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
f9207338 1357 }
801c1e8d 1358 break;
1da177e4
LT
1359 }
1360
1da177e4
LT
1361 sk->sk_state = BT_BOUND;
1362
1363done:
1364 release_sock(sk);
1365 return err;
1366}
1367
8fc9ced3 1368static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
9b2c45d4 1369 int peer)
1da177e4 1370{
8528d3f7 1371 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1da177e4 1372 struct sock *sk = sock->sk;
9d4b68b2
MH
1373 struct hci_dev *hdev;
1374 int err = 0;
1da177e4
LT
1375
1376 BT_DBG("sock %p sk %p", sock, sk);
1377
06f43cbc
MH
1378 if (peer)
1379 return -EOPNOTSUPP;
1380
1da177e4
LT
1381 lock_sock(sk);
1382
9d4b68b2
MH
1383 hdev = hci_pi(sk)->hdev;
1384 if (!hdev) {
1385 err = -EBADFD;
1386 goto done;
1387 }
1388
1da177e4 1389 haddr->hci_family = AF_BLUETOOTH;
7b005bd3 1390 haddr->hci_dev = hdev->id;
9d4b68b2 1391 haddr->hci_channel= hci_pi(sk)->channel;
9b2c45d4 1392 err = sizeof(*haddr);
1da177e4 1393
9d4b68b2 1394done:
1da177e4 1395 release_sock(sk);
9d4b68b2 1396 return err;
1da177e4
LT
1397}
1398
6039aa73
GP
1399static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1400 struct sk_buff *skb)
1da177e4
LT
1401{
1402 __u32 mask = hci_pi(sk)->cmsg_mask;
1403
0d48d939
MH
1404 if (mask & HCI_CMSG_DIR) {
1405 int incoming = bt_cb(skb)->incoming;
8fc9ced3
GP
1406 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1407 &incoming);
0d48d939 1408 }
1da177e4 1409
a61bbcf2 1410 if (mask & HCI_CMSG_TSTAMP) {
f6e623a6 1411#ifdef CONFIG_COMPAT
13c6ee2a 1412 struct old_timeval32 ctv;
f6e623a6 1413#endif
13c6ee2a 1414 struct __kernel_old_timeval tv;
767c5eb5
MH
1415 void *data;
1416 int len;
a61bbcf2
PM
1417
1418 skb_get_timestamp(skb, &tv);
767c5eb5 1419
1da97f83
DM
1420 data = &tv;
1421 len = sizeof(tv);
1422#ifdef CONFIG_COMPAT
da88cea1
L
1423 if (!COMPAT_USE_64BIT_TIME &&
1424 (msg->msg_flags & MSG_CMSG_COMPAT)) {
767c5eb5
MH
1425 ctv.tv_sec = tv.tv_sec;
1426 ctv.tv_usec = tv.tv_usec;
1427 data = &ctv;
1428 len = sizeof(ctv);
767c5eb5 1429 }
1da97f83 1430#endif
767c5eb5
MH
1431
1432 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
a61bbcf2 1433 }
1da177e4 1434}
8e87d142 1435
8528d3f7
MH
1436static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1437 size_t len, int flags)
1da177e4
LT
1438{
1439 int noblock = flags & MSG_DONTWAIT;
1440 struct sock *sk = sock->sk;
1441 struct sk_buff *skb;
1442 int copied, err;
83871f8c 1443 unsigned int skblen;
1da177e4
LT
1444
1445 BT_DBG("sock %p, sk %p", sock, sk);
1446
d94a6104 1447 if (flags & MSG_OOB)
1da177e4
LT
1448 return -EOPNOTSUPP;
1449
ac714949
MH
1450 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1451 return -EOPNOTSUPP;
1452
1da177e4
LT
1453 if (sk->sk_state == BT_CLOSED)
1454 return 0;
1455
70f23020
AE
1456 skb = skb_recv_datagram(sk, flags, noblock, &err);
1457 if (!skb)
1da177e4
LT
1458 return err;
1459
83871f8c 1460 skblen = skb->len;
1da177e4
LT
1461 copied = skb->len;
1462 if (len < copied) {
1463 msg->msg_flags |= MSG_TRUNC;
1464 copied = len;
1465 }
1466
badff6d0 1467 skb_reset_transport_header(skb);
51f3d02b 1468 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1da177e4 1469
3a208627
MH
1470 switch (hci_pi(sk)->channel) {
1471 case HCI_CHANNEL_RAW:
1472 hci_sock_cmsg(sk, msg, skb);
1473 break;
23500189 1474 case HCI_CHANNEL_USER:
cd82e61c
MH
1475 case HCI_CHANNEL_MONITOR:
1476 sock_recv_timestamp(msg, sk, skb);
1477 break;
801c1e8d
JH
1478 default:
1479 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1480 sock_recv_timestamp(msg, sk, skb);
1481 break;
3a208627 1482 }
1da177e4
LT
1483
1484 skb_free_datagram(sk, skb);
1485
4f34228b 1486 if (flags & MSG_TRUNC)
83871f8c
DK
1487 copied = skblen;
1488
1da177e4
LT
1489 return err ? : copied;
1490}
1491
fa4335d7
JH
1492static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1493 struct msghdr *msg, size_t msglen)
1494{
1495 void *buf;
1496 u8 *cp;
1497 struct mgmt_hdr *hdr;
1498 u16 opcode, index, len;
1499 struct hci_dev *hdev = NULL;
1500 const struct hci_mgmt_handler *handler;
1501 bool var_len, no_hdev;
1502 int err;
1503
1504 BT_DBG("got %zu bytes", msglen);
1505
1506 if (msglen < sizeof(*hdr))
1507 return -EINVAL;
1508
1509 buf = kmalloc(msglen, GFP_KERNEL);
1510 if (!buf)
1511 return -ENOMEM;
1512
1513 if (memcpy_from_msg(buf, msg, msglen)) {
1514 err = -EFAULT;
1515 goto done;
1516 }
1517
1518 hdr = buf;
1519 opcode = __le16_to_cpu(hdr->opcode);
1520 index = __le16_to_cpu(hdr->index);
1521 len = __le16_to_cpu(hdr->len);
1522
1523 if (len != msglen - sizeof(*hdr)) {
1524 err = -EINVAL;
1525 goto done;
1526 }
1527
38ceaa00
MH
1528 if (chan->channel == HCI_CHANNEL_CONTROL) {
1529 struct sk_buff *skb;
1530
1531 /* Send event to monitor */
1532 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1533 buf + sizeof(*hdr));
1534 if (skb) {
1535 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1536 HCI_SOCK_TRUSTED, NULL);
1537 kfree_skb(skb);
1538 }
1539 }
1540
fa4335d7
JH
1541 if (opcode >= chan->handler_count ||
1542 chan->handlers[opcode].func == NULL) {
1543 BT_DBG("Unknown op %u", opcode);
1544 err = mgmt_cmd_status(sk, index, opcode,
1545 MGMT_STATUS_UNKNOWN_COMMAND);
1546 goto done;
1547 }
1548
1549 handler = &chan->handlers[opcode];
1550
1551 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1552 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1553 err = mgmt_cmd_status(sk, index, opcode,
1554 MGMT_STATUS_PERMISSION_DENIED);
1555 goto done;
1556 }
1557
1558 if (index != MGMT_INDEX_NONE) {
1559 hdev = hci_dev_get(index);
1560 if (!hdev) {
1561 err = mgmt_cmd_status(sk, index, opcode,
1562 MGMT_STATUS_INVALID_INDEX);
1563 goto done;
1564 }
1565
1566 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1567 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1568 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1569 err = mgmt_cmd_status(sk, index, opcode,
1570 MGMT_STATUS_INVALID_INDEX);
1571 goto done;
1572 }
1573
1574 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1575 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1576 err = mgmt_cmd_status(sk, index, opcode,
1577 MGMT_STATUS_INVALID_INDEX);
1578 goto done;
1579 }
1580 }
1581
1582 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1583 if (no_hdev != !hdev) {
1584 err = mgmt_cmd_status(sk, index, opcode,
1585 MGMT_STATUS_INVALID_INDEX);
1586 goto done;
1587 }
1588
1589 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1590 if ((var_len && len < handler->data_len) ||
1591 (!var_len && len != handler->data_len)) {
1592 err = mgmt_cmd_status(sk, index, opcode,
1593 MGMT_STATUS_INVALID_PARAMS);
1594 goto done;
1595 }
1596
1597 if (hdev && chan->hdev_init)
1598 chan->hdev_init(sk, hdev);
1599
1600 cp = buf + sizeof(*hdr);
1601
1602 err = handler->func(sk, hdev, cp, len);
1603 if (err < 0)
1604 goto done;
1605
1606 err = msglen;
1607
1608done:
1609 if (hdev)
1610 hci_dev_put(hdev);
1611
1612 kfree(buf);
1613 return err;
1614}
1615
ac714949
MH
1616static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1617{
1618 struct hci_mon_hdr *hdr;
1619 struct sk_buff *skb;
1620 struct hci_dev *hdev;
1621 u16 index;
1622 int err;
1623
1624 /* The logging frame consists at minimum of the standard header,
1625 * the priority byte, the ident length byte and at least one string
1626 * terminator NUL byte. Anything shorter are invalid packets.
1627 */
1628 if (len < sizeof(*hdr) + 3)
1629 return -EINVAL;
1630
1631 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1632 if (!skb)
1633 return err;
1634
1635 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1636 err = -EFAULT;
1637 goto drop;
1638 }
1639
1640 hdr = (void *)skb->data;
1641
1642 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1643 err = -EINVAL;
1644 goto drop;
1645 }
1646
1647 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1648 __u8 priority = skb->data[sizeof(*hdr)];
1649 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1650
1651 /* Only the priorities 0-7 are valid and with that any other
1652 * value results in an invalid packet.
1653 *
1654 * The priority byte is followed by an ident length byte and
1655 * the NUL terminated ident string. Check that the ident
1656 * length is not overflowing the packet and also that the
1657 * ident string itself is NUL terminated. In case the ident
1658 * length is zero, the length value actually doubles as NUL
1659 * terminator identifier.
1660 *
1661 * The message follows the ident string (if present) and
1662 * must be NUL terminated. Otherwise it is not a valid packet.
1663 */
1664 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1665 ident_len > len - sizeof(*hdr) - 3 ||
1666 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1667 err = -EINVAL;
1668 goto drop;
1669 }
1670 } else {
1671 err = -EINVAL;
1672 goto drop;
1673 }
1674
1675 index = __le16_to_cpu(hdr->index);
1676
1677 if (index != MGMT_INDEX_NONE) {
1678 hdev = hci_dev_get(index);
1679 if (!hdev) {
1680 err = -ENODEV;
1681 goto drop;
1682 }
1683 } else {
1684 hdev = NULL;
1685 }
1686
1687 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1688
1689 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1690 err = len;
1691
1692 if (hdev)
1693 hci_dev_put(hdev);
1694
1695drop:
1696 kfree_skb(skb);
1697 return err;
1698}
1699
1b784140
YX
1700static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1701 size_t len)
1da177e4
LT
1702{
1703 struct sock *sk = sock->sk;
801c1e8d 1704 struct hci_mgmt_chan *chan;
1da177e4
LT
1705 struct hci_dev *hdev;
1706 struct sk_buff *skb;
1707 int err;
1708
1709 BT_DBG("sock %p sk %p", sock, sk);
1710
1711 if (msg->msg_flags & MSG_OOB)
1712 return -EOPNOTSUPP;
1713
ab89f0bd
SJ
1714 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1715 MSG_CMSG_COMPAT))
1da177e4
LT
1716 return -EINVAL;
1717
1718 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1719 return -EINVAL;
1720
1721 lock_sock(sk);
1722
0381101f
JH
1723 switch (hci_pi(sk)->channel) {
1724 case HCI_CHANNEL_RAW:
23500189 1725 case HCI_CHANNEL_USER:
0381101f 1726 break;
cd82e61c
MH
1727 case HCI_CHANNEL_MONITOR:
1728 err = -EOPNOTSUPP;
1729 goto done;
ac714949
MH
1730 case HCI_CHANNEL_LOGGING:
1731 err = hci_logging_frame(sk, msg, len);
1732 goto done;
0381101f 1733 default:
801c1e8d
JH
1734 mutex_lock(&mgmt_chan_list_lock);
1735 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1736 if (chan)
fa4335d7 1737 err = hci_mgmt_cmd(chan, sk, msg, len);
801c1e8d
JH
1738 else
1739 err = -EINVAL;
1740
1741 mutex_unlock(&mgmt_chan_list_lock);
0381101f
JH
1742 goto done;
1743 }
1744
70f23020
AE
1745 hdev = hci_pi(sk)->hdev;
1746 if (!hdev) {
1da177e4
LT
1747 err = -EBADFD;
1748 goto done;
1749 }
1750
7e21addc
MH
1751 if (!test_bit(HCI_UP, &hdev->flags)) {
1752 err = -ENETDOWN;
1753 goto done;
1754 }
1755
70f23020
AE
1756 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1757 if (!skb)
1da177e4
LT
1758 goto done;
1759
6ce8e9ce 1760 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1da177e4
LT
1761 err = -EFAULT;
1762 goto drop;
1763 }
1764
8528d3f7 1765 hci_skb_pkt_type(skb) = skb->data[0];
1da177e4 1766 skb_pull(skb, 1);
1da177e4 1767
1bc5ad16
MH
1768 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1769 /* No permission check is needed for user channel
1770 * since that gets enforced when binding the socket.
1771 *
1772 * However check that the packet type is valid.
1773 */
d79f34e3
MH
1774 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1775 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
cc974003
MH
1776 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1777 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1bc5ad16
MH
1778 err = -EINVAL;
1779 goto drop;
1780 }
1781
1782 skb_queue_tail(&hdev->raw_q, skb);
1783 queue_work(hdev->workqueue, &hdev->tx_work);
d79f34e3 1784 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
83985319 1785 u16 opcode = get_unaligned_le16(skb->data);
1da177e4
LT
1786 u16 ogf = hci_opcode_ogf(opcode);
1787 u16 ocf = hci_opcode_ocf(opcode);
1788
1789 if (((ogf > HCI_SFLT_MAX_OGF) ||
3bb3c755
GP
1790 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1791 &hci_sec_filter.ocf_mask[ogf])) &&
1792 !capable(CAP_NET_RAW)) {
1da177e4
LT
1793 err = -EPERM;
1794 goto drop;
1795 }
1796
1982162b
MH
1797 /* Since the opcode has already been extracted here, store
1798 * a copy of the value for later use by the drivers.
1799 */
1800 hci_skb_opcode(skb) = opcode;
1801
fee746b0 1802 if (ogf == 0x3f) {
1da177e4 1803 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 1804 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 1805 } else {
49c922bb 1806 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
1807 * single-command requests.
1808 */
44d27137 1809 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
11714b3d 1810
1da177e4 1811 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 1812 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
1813 }
1814 } else {
1815 if (!capable(CAP_NET_RAW)) {
1816 err = -EPERM;
1817 goto drop;
1818 }
1819
d79f34e3 1820 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
cc974003
MH
1821 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1822 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
bb77543e
MH
1823 err = -EINVAL;
1824 goto drop;
1825 }
1826
1da177e4 1827 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 1828 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
1829 }
1830
1831 err = len;
1832
1833done:
1834 release_sock(sk);
1835 return err;
1836
1837drop:
1838 kfree_skb(skb);
1839 goto done;
1840}
1841
8fc9ced3
GP
1842static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1843 char __user *optval, unsigned int len)
1da177e4
LT
1844{
1845 struct hci_ufilter uf = { .opcode = 0 };
1846 struct sock *sk = sock->sk;
1847 int err = 0, opt = 0;
1848
1849 BT_DBG("sk %p, opt %d", sk, optname);
1850
47b0f573
MH
1851 if (level != SOL_HCI)
1852 return -ENOPROTOOPT;
1853
1da177e4
LT
1854 lock_sock(sk);
1855
2f39cdb7 1856 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
c2371e80 1857 err = -EBADFD;
2f39cdb7
MH
1858 goto done;
1859 }
1860
1da177e4
LT
1861 switch (optname) {
1862 case HCI_DATA_DIR:
1863 if (get_user(opt, (int __user *)optval)) {
1864 err = -EFAULT;
1865 break;
1866 }
1867
1868 if (opt)
1869 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1870 else
1871 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1872 break;
1873
1874 case HCI_TIME_STAMP:
1875 if (get_user(opt, (int __user *)optval)) {
1876 err = -EFAULT;
1877 break;
1878 }
1879
1880 if (opt)
1881 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1882 else
1883 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1884 break;
1885
1886 case HCI_FILTER:
0878b666
MH
1887 {
1888 struct hci_filter *f = &hci_pi(sk)->filter;
1889
1890 uf.type_mask = f->type_mask;
1891 uf.opcode = f->opcode;
1892 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1893 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1894 }
1895
1da177e4
LT
1896 len = min_t(unsigned int, len, sizeof(uf));
1897 if (copy_from_user(&uf, optval, len)) {
1898 err = -EFAULT;
1899 break;
1900 }
1901
1902 if (!capable(CAP_NET_RAW)) {
1903 uf.type_mask &= hci_sec_filter.type_mask;
1904 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1905 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1906 }
1907
1908 {
1909 struct hci_filter *f = &hci_pi(sk)->filter;
1910
1911 f->type_mask = uf.type_mask;
1912 f->opcode = uf.opcode;
1913 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1914 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1915 }
8e87d142 1916 break;
1da177e4
LT
1917
1918 default:
1919 err = -ENOPROTOOPT;
1920 break;
1921 }
1922
2f39cdb7 1923done:
1da177e4
LT
1924 release_sock(sk);
1925 return err;
1926}
1927
8fc9ced3
GP
1928static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1929 char __user *optval, int __user *optlen)
1da177e4
LT
1930{
1931 struct hci_ufilter uf;
1932 struct sock *sk = sock->sk;
cedc5469
MH
1933 int len, opt, err = 0;
1934
1935 BT_DBG("sk %p, opt %d", sk, optname);
1da177e4 1936
47b0f573
MH
1937 if (level != SOL_HCI)
1938 return -ENOPROTOOPT;
1939
1da177e4
LT
1940 if (get_user(len, optlen))
1941 return -EFAULT;
1942
cedc5469
MH
1943 lock_sock(sk);
1944
1945 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
c2371e80 1946 err = -EBADFD;
cedc5469
MH
1947 goto done;
1948 }
1949
1da177e4
LT
1950 switch (optname) {
1951 case HCI_DATA_DIR:
1952 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1953 opt = 1;
8e87d142 1954 else
1da177e4
LT
1955 opt = 0;
1956
1957 if (put_user(opt, optval))
cedc5469 1958 err = -EFAULT;
1da177e4
LT
1959 break;
1960
1961 case HCI_TIME_STAMP:
1962 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1963 opt = 1;
8e87d142 1964 else
1da177e4
LT
1965 opt = 0;
1966
1967 if (put_user(opt, optval))
cedc5469 1968 err = -EFAULT;
1da177e4
LT
1969 break;
1970
1971 case HCI_FILTER:
1972 {
1973 struct hci_filter *f = &hci_pi(sk)->filter;
1974
e15ca9a0 1975 memset(&uf, 0, sizeof(uf));
1da177e4
LT
1976 uf.type_mask = f->type_mask;
1977 uf.opcode = f->opcode;
1978 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1979 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1980 }
1981
1982 len = min_t(unsigned int, len, sizeof(uf));
1983 if (copy_to_user(optval, &uf, len))
cedc5469 1984 err = -EFAULT;
1da177e4
LT
1985 break;
1986
1987 default:
cedc5469 1988 err = -ENOPROTOOPT;
1da177e4
LT
1989 break;
1990 }
1991
cedc5469
MH
1992done:
1993 release_sock(sk);
1994 return err;
1da177e4
LT
1995}
1996
90ddc4f0 1997static const struct proto_ops hci_sock_ops = {
1da177e4
LT
1998 .family = PF_BLUETOOTH,
1999 .owner = THIS_MODULE,
2000 .release = hci_sock_release,
2001 .bind = hci_sock_bind,
2002 .getname = hci_sock_getname,
2003 .sendmsg = hci_sock_sendmsg,
2004 .recvmsg = hci_sock_recvmsg,
2005 .ioctl = hci_sock_ioctl,
7a6038b3
AB
2006#ifdef CONFIG_COMPAT
2007 .compat_ioctl = hci_sock_compat_ioctl,
2008#endif
a11e1d43 2009 .poll = datagram_poll,
1da177e4
LT
2010 .listen = sock_no_listen,
2011 .shutdown = sock_no_shutdown,
2012 .setsockopt = hci_sock_setsockopt,
2013 .getsockopt = hci_sock_getsockopt,
2014 .connect = sock_no_connect,
2015 .socketpair = sock_no_socketpair,
2016 .accept = sock_no_accept,
2017 .mmap = sock_no_mmap
2018};
2019
2020static struct proto hci_sk_proto = {
2021 .name = "HCI",
2022 .owner = THIS_MODULE,
2023 .obj_size = sizeof(struct hci_pinfo)
2024};
2025
3f378b68
EP
2026static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2027 int kern)
1da177e4
LT
2028{
2029 struct sock *sk;
2030
2031 BT_DBG("sock %p", sock);
2032
2033 if (sock->type != SOCK_RAW)
2034 return -ESOCKTNOSUPPORT;
2035
2036 sock->ops = &hci_sock_ops;
2037
11aa9c28 2038 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1da177e4
LT
2039 if (!sk)
2040 return -ENOMEM;
2041
2042 sock_init_data(sock, sk);
2043
2044 sock_reset_flag(sk, SOCK_ZAPPED);
2045
2046 sk->sk_protocol = protocol;
2047
2048 sock->state = SS_UNCONNECTED;
2049 sk->sk_state = BT_OPEN;
2050
2051 bt_sock_link(&hci_sk_list, sk);
2052 return 0;
2053}
2054
ec1b4cf7 2055static const struct net_proto_family hci_sock_family_ops = {
1da177e4
LT
2056 .family = PF_BLUETOOTH,
2057 .owner = THIS_MODULE,
2058 .create = hci_sock_create,
2059};
2060
1da177e4
LT
2061int __init hci_sock_init(void)
2062{
2063 int err;
2064
b0a8e282
MH
2065 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2066
1da177e4
LT
2067 err = proto_register(&hci_sk_proto, 0);
2068 if (err < 0)
2069 return err;
2070
2071 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
f7c86637
MY
2072 if (err < 0) {
2073 BT_ERR("HCI socket registration failed");
1da177e4 2074 goto error;
f7c86637
MY
2075 }
2076
b0316615 2077 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
f7c86637
MY
2078 if (err < 0) {
2079 BT_ERR("Failed to create HCI proc file");
2080 bt_sock_unregister(BTPROTO_HCI);
2081 goto error;
2082 }
1da177e4 2083
1da177e4
LT
2084 BT_INFO("HCI socket layer initialized");
2085
2086 return 0;
2087
2088error:
1da177e4
LT
2089 proto_unregister(&hci_sk_proto);
2090 return err;
2091}
2092
b7440a14 2093void hci_sock_cleanup(void)
1da177e4 2094{
f7c86637 2095 bt_procfs_cleanup(&init_net, "hci");
5e9d7f86 2096 bt_sock_unregister(BTPROTO_HCI);
1da177e4 2097 proto_unregister(&hci_sk_proto);
1da177e4 2098}