]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - net/qrtr/qrtr.c
net: qrtr: Use sk_buff->cb in receive path
[mirror_ubuntu-hirsute-kernel.git] / net / qrtr / qrtr.c
CommitLineData
bdabad3e
CC
1/*
2 * Copyright (c) 2015, Sony Mobile Communications Inc.
3 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/module.h>
15#include <linux/netlink.h>
16#include <linux/qrtr.h>
17#include <linux/termios.h> /* For TIOCINQ/OUTQ */
18
19#include <net/sock.h>
20
21#include "qrtr.h"
22
23#define QRTR_PROTO_VER 1
24
25/* auto-bind range */
26#define QRTR_MIN_EPH_SOCKET 0x4000
27#define QRTR_MAX_EPH_SOCKET 0x7fff
28
bdabad3e
CC
29/**
30 * struct qrtr_hdr - (I|R)PCrouter packet header
31 * @version: protocol version
32 * @type: packet type; one of QRTR_TYPE_*
33 * @src_node_id: source node
34 * @src_port_id: source port
35 * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
36 * @size: length of packet, excluding this header
37 * @dst_node_id: destination node
38 * @dst_port_id: destination port
39 */
40struct qrtr_hdr {
41 __le32 version;
42 __le32 type;
43 __le32 src_node_id;
44 __le32 src_port_id;
45 __le32 confirm_rx;
46 __le32 size;
47 __le32 dst_node_id;
48 __le32 dst_port_id;
49} __packed;
50
f507a9b6
BA
51struct qrtr_cb {
52 u32 src_node;
53 u32 src_port;
54 u32 dst_node;
55 u32 dst_port;
56
57 u8 type;
58 u8 confirm_rx;
59};
60
bdabad3e 61#define QRTR_HDR_SIZE sizeof(struct qrtr_hdr)
bdabad3e
CC
62
63struct qrtr_sock {
64 /* WARNING: sk must be the first member */
65 struct sock sk;
66 struct sockaddr_qrtr us;
67 struct sockaddr_qrtr peer;
68};
69
70static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
71{
72 BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
73 return container_of(sk, struct qrtr_sock, sk);
74}
75
76static unsigned int qrtr_local_nid = -1;
77
78/* for node ids */
79static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
80/* broadcast list */
81static LIST_HEAD(qrtr_all_nodes);
82/* lock for qrtr_nodes, qrtr_all_nodes and node reference */
83static DEFINE_MUTEX(qrtr_node_lock);
84
85/* local port allocation management */
86static DEFINE_IDR(qrtr_ports);
87static DEFINE_MUTEX(qrtr_port_lock);
88
89/**
90 * struct qrtr_node - endpoint node
91 * @ep_lock: lock for endpoint management and callbacks
92 * @ep: endpoint
93 * @ref: reference count for node
94 * @nid: node id
95 * @rx_queue: receive queue
96 * @work: scheduled work struct for recv work
97 * @item: list item for broadcast list
98 */
99struct qrtr_node {
100 struct mutex ep_lock;
101 struct qrtr_endpoint *ep;
102 struct kref ref;
103 unsigned int nid;
104
105 struct sk_buff_head rx_queue;
106 struct work_struct work;
107 struct list_head item;
108};
109
e7044482
BA
110static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
111 int type, struct sockaddr_qrtr *from,
112 struct sockaddr_qrtr *to);
113static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
114 int type, struct sockaddr_qrtr *from,
115 struct sockaddr_qrtr *to);
8acc8ee4 116
bdabad3e
CC
117/* Release node resources and free the node.
118 *
119 * Do not call directly, use qrtr_node_release. To be used with
120 * kref_put_mutex. As such, the node mutex is expected to be locked on call.
121 */
122static void __qrtr_node_release(struct kref *kref)
123{
124 struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
125
126 if (node->nid != QRTR_EP_NID_AUTO)
127 radix_tree_delete(&qrtr_nodes, node->nid);
128
129 list_del(&node->item);
130 mutex_unlock(&qrtr_node_lock);
131
132 skb_queue_purge(&node->rx_queue);
133 kfree(node);
134}
135
136/* Increment reference to node. */
137static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
138{
139 if (node)
140 kref_get(&node->ref);
141 return node;
142}
143
144/* Decrement reference to node and release as necessary. */
145static void qrtr_node_release(struct qrtr_node *node)
146{
147 if (!node)
148 return;
149 kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
150}
151
152/* Pass an outgoing packet socket buffer to the endpoint driver. */
e7044482
BA
153static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
154 int type, struct sockaddr_qrtr *from,
155 struct sockaddr_qrtr *to)
bdabad3e 156{
e7044482
BA
157 struct qrtr_hdr *hdr;
158 size_t len = skb->len;
bdabad3e
CC
159 int rc = -ENODEV;
160
e7044482
BA
161 hdr = skb_push(skb, QRTR_HDR_SIZE);
162 hdr->version = cpu_to_le32(QRTR_PROTO_VER);
163 hdr->type = cpu_to_le32(type);
164 hdr->src_node_id = cpu_to_le32(from->sq_node);
165 hdr->src_port_id = cpu_to_le32(from->sq_port);
166 hdr->dst_node_id = cpu_to_le32(to->sq_node);
167 hdr->dst_port_id = cpu_to_le32(to->sq_port);
168
169 hdr->size = cpu_to_le32(len);
170 hdr->confirm_rx = 0;
171
172 skb_put_padto(skb, ALIGN(len, 4));
173
bdabad3e
CC
174 mutex_lock(&node->ep_lock);
175 if (node->ep)
176 rc = node->ep->xmit(node->ep, skb);
177 else
178 kfree_skb(skb);
179 mutex_unlock(&node->ep_lock);
180
181 return rc;
182}
183
184/* Lookup node by id.
185 *
186 * callers must release with qrtr_node_release()
187 */
188static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
189{
190 struct qrtr_node *node;
191
192 mutex_lock(&qrtr_node_lock);
193 node = radix_tree_lookup(&qrtr_nodes, nid);
194 node = qrtr_node_acquire(node);
195 mutex_unlock(&qrtr_node_lock);
196
197 return node;
198}
199
200/* Assign node id to node.
201 *
202 * This is mostly useful for automatic node id assignment, based on
203 * the source id in the incoming packet.
204 */
205static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
206{
207 if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
208 return;
209
210 mutex_lock(&qrtr_node_lock);
211 radix_tree_insert(&qrtr_nodes, nid, node);
212 node->nid = nid;
213 mutex_unlock(&qrtr_node_lock);
214}
215
216/**
217 * qrtr_endpoint_post() - post incoming data
218 * @ep: endpoint handle
219 * @data: data pointer
220 * @len: size of data in bytes
221 *
222 * Return: 0 on success; negative error code on failure
223 */
224int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
225{
226 struct qrtr_node *node = ep->node;
227 const struct qrtr_hdr *phdr = data;
228 struct sk_buff *skb;
f507a9b6 229 struct qrtr_cb *cb;
bdabad3e
CC
230 unsigned int psize;
231 unsigned int size;
232 unsigned int type;
233 unsigned int ver;
234 unsigned int dst;
235
236 if (len < QRTR_HDR_SIZE || len & 3)
237 return -EINVAL;
238
239 ver = le32_to_cpu(phdr->version);
240 size = le32_to_cpu(phdr->size);
241 type = le32_to_cpu(phdr->type);
242 dst = le32_to_cpu(phdr->dst_port_id);
243
244 psize = (size + 3) & ~3;
245
246 if (ver != QRTR_PROTO_VER)
247 return -EINVAL;
248
249 if (len != psize + QRTR_HDR_SIZE)
250 return -EINVAL;
251
252 if (dst != QRTR_PORT_CTRL && type != QRTR_TYPE_DATA)
253 return -EINVAL;
254
255 skb = netdev_alloc_skb(NULL, len);
256 if (!skb)
257 return -ENOMEM;
258
f507a9b6
BA
259 cb = (struct qrtr_cb *)skb->cb;
260 cb->src_node = le32_to_cpu(phdr->src_node_id);
261 cb->src_port = le32_to_cpu(phdr->src_port_id);
262 cb->dst_node = le32_to_cpu(phdr->dst_node_id);
263 cb->dst_port = le32_to_cpu(phdr->dst_port_id);
264 cb->type = type;
265 cb->confirm_rx = !!phdr->confirm_rx;
266
267 skb_put_data(skb, data + QRTR_HDR_SIZE, size);
bdabad3e
CC
268
269 skb_queue_tail(&node->rx_queue, skb);
270 schedule_work(&node->work);
271
272 return 0;
273}
274EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
275
1a7959c7
BA
276/**
277 * qrtr_alloc_ctrl_packet() - allocate control packet skb
278 * @pkt: reference to qrtr_ctrl_pkt pointer
279 *
280 * Returns newly allocated sk_buff, or NULL on failure
281 *
282 * This function allocates a sk_buff large enough to carry a qrtr_ctrl_pkt and
283 * on success returns a reference to the control packet in @pkt.
284 */
285static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt)
bdabad3e 286{
1a7959c7 287 const int pkt_len = sizeof(struct qrtr_ctrl_pkt);
bdabad3e 288 struct sk_buff *skb;
bdabad3e
CC
289
290 skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
291 if (!skb)
292 return NULL;
bdabad3e 293
e7044482 294 skb_reserve(skb, QRTR_HDR_SIZE);
1a7959c7 295 *pkt = skb_put_zero(skb, pkt_len);
1784473b
BA
296
297 return skb;
298}
299
bdabad3e
CC
300static struct qrtr_sock *qrtr_port_lookup(int port);
301static void qrtr_port_put(struct qrtr_sock *ipc);
302
303/* Handle and route a received packet.
304 *
305 * This will auto-reply with resume-tx packet as necessary.
306 */
307static void qrtr_node_rx_work(struct work_struct *work)
308{
309 struct qrtr_node *node = container_of(work, struct qrtr_node, work);
1a7959c7 310 struct qrtr_ctrl_pkt *pkt;
e7044482
BA
311 struct sockaddr_qrtr dst;
312 struct sockaddr_qrtr src;
bdabad3e
CC
313 struct sk_buff *skb;
314
315 while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
bdabad3e 316 struct qrtr_sock *ipc;
f507a9b6 317 struct qrtr_cb *cb;
bdabad3e
CC
318 int confirm;
319
f507a9b6
BA
320 cb = (struct qrtr_cb *)skb->cb;
321 src.sq_node = cb->src_node;
322 src.sq_port = cb->src_port;
323 dst.sq_node = cb->dst_node;
324 dst.sq_port = cb->dst_port;
325 confirm = !!cb->confirm_rx;
bdabad3e 326
f507a9b6 327 qrtr_node_assign(node, cb->src_node);
e7044482 328
f507a9b6 329 ipc = qrtr_port_lookup(cb->dst_port);
bdabad3e
CC
330 if (!ipc) {
331 kfree_skb(skb);
332 } else {
333 if (sock_queue_rcv_skb(&ipc->sk, skb))
334 kfree_skb(skb);
335
336 qrtr_port_put(ipc);
337 }
338
339 if (confirm) {
1a7959c7 340 skb = qrtr_alloc_ctrl_packet(&pkt);
bdabad3e
CC
341 if (!skb)
342 break;
e7044482 343
1a7959c7
BA
344 pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX);
345 pkt->client.node = cpu_to_le32(dst.sq_node);
346 pkt->client.port = cpu_to_le32(dst.sq_port);
347
e7044482
BA
348 if (qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX,
349 &dst, &src))
bdabad3e
CC
350 break;
351 }
352 }
353}
354
355/**
356 * qrtr_endpoint_register() - register a new endpoint
357 * @ep: endpoint to register
358 * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
359 * Return: 0 on success; negative error code on failure
360 *
361 * The specified endpoint must have the xmit function pointer set on call.
362 */
363int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
364{
365 struct qrtr_node *node;
366
367 if (!ep || !ep->xmit)
368 return -EINVAL;
369
370 node = kzalloc(sizeof(*node), GFP_KERNEL);
371 if (!node)
372 return -ENOMEM;
373
374 INIT_WORK(&node->work, qrtr_node_rx_work);
375 kref_init(&node->ref);
376 mutex_init(&node->ep_lock);
377 skb_queue_head_init(&node->rx_queue);
378 node->nid = QRTR_EP_NID_AUTO;
379 node->ep = ep;
380
381 qrtr_node_assign(node, nid);
382
383 mutex_lock(&qrtr_node_lock);
384 list_add(&node->item, &qrtr_all_nodes);
385 mutex_unlock(&qrtr_node_lock);
386 ep->node = node;
387
388 return 0;
389}
390EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
391
392/**
393 * qrtr_endpoint_unregister - unregister endpoint
394 * @ep: endpoint to unregister
395 */
396void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
397{
398 struct qrtr_node *node = ep->node;
e7044482
BA
399 struct sockaddr_qrtr src = {AF_QIPCRTR, node->nid, QRTR_PORT_CTRL};
400 struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL};
1a7959c7 401 struct qrtr_ctrl_pkt *pkt;
8acc8ee4 402 struct sk_buff *skb;
bdabad3e
CC
403
404 mutex_lock(&node->ep_lock);
405 node->ep = NULL;
406 mutex_unlock(&node->ep_lock);
407
8acc8ee4 408 /* Notify the local controller about the event */
1a7959c7
BA
409 skb = qrtr_alloc_ctrl_packet(&pkt);
410 if (skb) {
411 pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE);
e7044482 412 qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst);
1a7959c7 413 }
8acc8ee4 414
bdabad3e
CC
415 qrtr_node_release(node);
416 ep->node = NULL;
417}
418EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
419
420/* Lookup socket by port.
421 *
422 * Callers must release with qrtr_port_put()
423 */
424static struct qrtr_sock *qrtr_port_lookup(int port)
425{
426 struct qrtr_sock *ipc;
427
428 if (port == QRTR_PORT_CTRL)
429 port = 0;
430
431 mutex_lock(&qrtr_port_lock);
432 ipc = idr_find(&qrtr_ports, port);
433 if (ipc)
434 sock_hold(&ipc->sk);
435 mutex_unlock(&qrtr_port_lock);
436
437 return ipc;
438}
439
440/* Release acquired socket. */
441static void qrtr_port_put(struct qrtr_sock *ipc)
442{
443 sock_put(&ipc->sk);
444}
445
446/* Remove port assignment. */
447static void qrtr_port_remove(struct qrtr_sock *ipc)
448{
1a7959c7 449 struct qrtr_ctrl_pkt *pkt;
1784473b 450 struct sk_buff *skb;
bdabad3e 451 int port = ipc->us.sq_port;
e7044482
BA
452 struct sockaddr_qrtr to;
453
454 to.sq_family = AF_QIPCRTR;
455 to.sq_node = QRTR_NODE_BCAST;
456 to.sq_port = QRTR_PORT_CTRL;
bdabad3e 457
1a7959c7 458 skb = qrtr_alloc_ctrl_packet(&pkt);
1784473b 459 if (skb) {
1a7959c7
BA
460 pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
461 pkt->client.node = cpu_to_le32(ipc->us.sq_node);
462 pkt->client.port = cpu_to_le32(ipc->us.sq_port);
463
1784473b 464 skb_set_owner_w(skb, &ipc->sk);
e7044482
BA
465 qrtr_bcast_enqueue(NULL, skb, QRTR_TYPE_DEL_CLIENT, &ipc->us,
466 &to);
1784473b
BA
467 }
468
bdabad3e
CC
469 if (port == QRTR_PORT_CTRL)
470 port = 0;
471
472 __sock_put(&ipc->sk);
473
474 mutex_lock(&qrtr_port_lock);
475 idr_remove(&qrtr_ports, port);
476 mutex_unlock(&qrtr_port_lock);
477}
478
479/* Assign port number to socket.
480 *
481 * Specify port in the integer pointed to by port, and it will be adjusted
482 * on return as necesssary.
483 *
484 * Port may be:
485 * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
486 * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
487 * >QRTR_MIN_EPH_SOCKET: Specified; available to all
488 */
489static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
490{
491 int rc;
492
493 mutex_lock(&qrtr_port_lock);
494 if (!*port) {
495 rc = idr_alloc(&qrtr_ports, ipc,
496 QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
497 GFP_ATOMIC);
498 if (rc >= 0)
499 *port = rc;
500 } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
501 rc = -EACCES;
502 } else if (*port == QRTR_PORT_CTRL) {
503 rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
504 } else {
505 rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
506 if (rc >= 0)
507 *port = rc;
508 }
509 mutex_unlock(&qrtr_port_lock);
510
511 if (rc == -ENOSPC)
512 return -EADDRINUSE;
513 else if (rc < 0)
514 return rc;
515
516 sock_hold(&ipc->sk);
517
518 return 0;
519}
520
b24844b1
BA
521/* Reset all non-control ports */
522static void qrtr_reset_ports(void)
523{
524 struct qrtr_sock *ipc;
525 int id;
526
527 mutex_lock(&qrtr_port_lock);
528 idr_for_each_entry(&qrtr_ports, ipc, id) {
529 /* Don't reset control port */
530 if (id == 0)
531 continue;
532
533 sock_hold(&ipc->sk);
534 ipc->sk.sk_err = ENETRESET;
ae85bfa8 535 ipc->sk.sk_error_report(&ipc->sk);
b24844b1
BA
536 sock_put(&ipc->sk);
537 }
538 mutex_unlock(&qrtr_port_lock);
539}
540
bdabad3e
CC
541/* Bind socket to address.
542 *
543 * Socket should be locked upon call.
544 */
545static int __qrtr_bind(struct socket *sock,
546 const struct sockaddr_qrtr *addr, int zapped)
547{
548 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
549 struct sock *sk = sock->sk;
550 int port;
551 int rc;
552
553 /* rebinding ok */
554 if (!zapped && addr->sq_port == ipc->us.sq_port)
555 return 0;
556
557 port = addr->sq_port;
558 rc = qrtr_port_assign(ipc, &port);
559 if (rc)
560 return rc;
561
562 /* unbind previous, if any */
563 if (!zapped)
564 qrtr_port_remove(ipc);
565 ipc->us.sq_port = port;
566
567 sock_reset_flag(sk, SOCK_ZAPPED);
568
b24844b1
BA
569 /* Notify all open ports about the new controller */
570 if (port == QRTR_PORT_CTRL)
571 qrtr_reset_ports();
572
bdabad3e
CC
573 return 0;
574}
575
576/* Auto bind to an ephemeral port. */
577static int qrtr_autobind(struct socket *sock)
578{
579 struct sock *sk = sock->sk;
580 struct sockaddr_qrtr addr;
581
582 if (!sock_flag(sk, SOCK_ZAPPED))
583 return 0;
584
585 addr.sq_family = AF_QIPCRTR;
586 addr.sq_node = qrtr_local_nid;
587 addr.sq_port = 0;
588
589 return __qrtr_bind(sock, &addr, 1);
590}
591
592/* Bind socket to specified sockaddr. */
593static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
594{
595 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
596 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
597 struct sock *sk = sock->sk;
598 int rc;
599
600 if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
601 return -EINVAL;
602
603 if (addr->sq_node != ipc->us.sq_node)
604 return -EINVAL;
605
606 lock_sock(sk);
607 rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
608 release_sock(sk);
609
610 return rc;
611}
612
613/* Queue packet to local peer socket. */
e7044482
BA
614static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
615 int type, struct sockaddr_qrtr *from,
616 struct sockaddr_qrtr *to)
bdabad3e 617{
bdabad3e 618 struct qrtr_sock *ipc;
f507a9b6 619 struct qrtr_cb *cb;
bdabad3e 620
e7044482 621 ipc = qrtr_port_lookup(to->sq_port);
bdabad3e
CC
622 if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
623 kfree_skb(skb);
624 return -ENODEV;
625 }
626
f507a9b6
BA
627 cb = (struct qrtr_cb *)skb->cb;
628 cb->src_node = from->sq_node;
629 cb->src_port = from->sq_port;
e7044482 630
bdabad3e
CC
631 if (sock_queue_rcv_skb(&ipc->sk, skb)) {
632 qrtr_port_put(ipc);
633 kfree_skb(skb);
634 return -ENOSPC;
635 }
636
637 qrtr_port_put(ipc);
638
639 return 0;
640}
641
642/* Queue packet for broadcast. */
e7044482
BA
643static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
644 int type, struct sockaddr_qrtr *from,
645 struct sockaddr_qrtr *to)
bdabad3e
CC
646{
647 struct sk_buff *skbn;
648
649 mutex_lock(&qrtr_node_lock);
650 list_for_each_entry(node, &qrtr_all_nodes, item) {
651 skbn = skb_clone(skb, GFP_KERNEL);
652 if (!skbn)
653 break;
654 skb_set_owner_w(skbn, skb->sk);
e7044482 655 qrtr_node_enqueue(node, skbn, type, from, to);
bdabad3e
CC
656 }
657 mutex_unlock(&qrtr_node_lock);
658
e7044482 659 qrtr_local_enqueue(node, skb, type, from, to);
bdabad3e
CC
660
661 return 0;
662}
663
664static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
665{
666 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
e7044482
BA
667 int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int,
668 struct sockaddr_qrtr *, struct sockaddr_qrtr *);
bdabad3e
CC
669 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
670 struct sock *sk = sock->sk;
671 struct qrtr_node *node;
bdabad3e
CC
672 struct sk_buff *skb;
673 size_t plen;
e7044482 674 u32 type = QRTR_TYPE_DATA;
bdabad3e
CC
675 int rc;
676
677 if (msg->msg_flags & ~(MSG_DONTWAIT))
678 return -EINVAL;
679
680 if (len > 65535)
681 return -EMSGSIZE;
682
683 lock_sock(sk);
684
685 if (addr) {
686 if (msg->msg_namelen < sizeof(*addr)) {
687 release_sock(sk);
688 return -EINVAL;
689 }
690
691 if (addr->sq_family != AF_QIPCRTR) {
692 release_sock(sk);
693 return -EINVAL;
694 }
695
696 rc = qrtr_autobind(sock);
697 if (rc) {
698 release_sock(sk);
699 return rc;
700 }
701 } else if (sk->sk_state == TCP_ESTABLISHED) {
702 addr = &ipc->peer;
703 } else {
704 release_sock(sk);
705 return -ENOTCONN;
706 }
707
708 node = NULL;
709 if (addr->sq_node == QRTR_NODE_BCAST) {
710 enqueue_fn = qrtr_bcast_enqueue;
711 } else if (addr->sq_node == ipc->us.sq_node) {
712 enqueue_fn = qrtr_local_enqueue;
713 } else {
714 enqueue_fn = qrtr_node_enqueue;
715 node = qrtr_node_lookup(addr->sq_node);
716 if (!node) {
717 release_sock(sk);
718 return -ECONNRESET;
719 }
720 }
721
722 plen = (len + 3) & ~3;
723 skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_SIZE,
724 msg->msg_flags & MSG_DONTWAIT, &rc);
725 if (!skb)
726 goto out_node;
727
e7044482 728 skb_reserve(skb, QRTR_HDR_SIZE);
bdabad3e 729
e7044482 730 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
bdabad3e
CC
731 if (rc) {
732 kfree_skb(skb);
733 goto out_node;
734 }
735
bdabad3e
CC
736 if (ipc->us.sq_port == QRTR_PORT_CTRL) {
737 if (len < 4) {
738 rc = -EINVAL;
739 kfree_skb(skb);
740 goto out_node;
741 }
742
743 /* control messages already require the type as 'command' */
e7044482
BA
744 skb_copy_bits(skb, 0, &type, 4);
745 type = le32_to_cpu(type);
bdabad3e
CC
746 }
747
e7044482 748 rc = enqueue_fn(node, skb, type, &ipc->us, addr);
bdabad3e
CC
749 if (rc >= 0)
750 rc = len;
751
752out_node:
753 qrtr_node_release(node);
754 release_sock(sk);
755
756 return rc;
757}
758
759static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
760 size_t size, int flags)
761{
762 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
bdabad3e
CC
763 struct sock *sk = sock->sk;
764 struct sk_buff *skb;
f507a9b6 765 struct qrtr_cb *cb;
bdabad3e
CC
766 int copied, rc;
767
768 lock_sock(sk);
769
770 if (sock_flag(sk, SOCK_ZAPPED)) {
771 release_sock(sk);
772 return -EADDRNOTAVAIL;
773 }
774
775 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
776 flags & MSG_DONTWAIT, &rc);
777 if (!skb) {
778 release_sock(sk);
779 return rc;
780 }
781
f507a9b6 782 copied = skb->len;
bdabad3e
CC
783 if (copied > size) {
784 copied = size;
785 msg->msg_flags |= MSG_TRUNC;
786 }
787
f507a9b6 788 rc = skb_copy_datagram_msg(skb, 0, msg, copied);
bdabad3e
CC
789 if (rc < 0)
790 goto out;
791 rc = copied;
792
793 if (addr) {
f507a9b6 794 cb = (struct qrtr_cb *)skb->cb;
bdabad3e 795 addr->sq_family = AF_QIPCRTR;
f507a9b6
BA
796 addr->sq_node = cb->src_node;
797 addr->sq_port = cb->src_port;
bdabad3e
CC
798 msg->msg_namelen = sizeof(*addr);
799 }
800
801out:
802 skb_free_datagram(sk, skb);
803 release_sock(sk);
804
805 return rc;
806}
807
808static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
809 int len, int flags)
810{
811 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
812 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
813 struct sock *sk = sock->sk;
814 int rc;
815
816 if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
817 return -EINVAL;
818
819 lock_sock(sk);
820
821 sk->sk_state = TCP_CLOSE;
822 sock->state = SS_UNCONNECTED;
823
824 rc = qrtr_autobind(sock);
825 if (rc) {
826 release_sock(sk);
827 return rc;
828 }
829
830 ipc->peer = *addr;
831 sock->state = SS_CONNECTED;
832 sk->sk_state = TCP_ESTABLISHED;
833
834 release_sock(sk);
835
836 return 0;
837}
838
839static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
840 int *len, int peer)
841{
842 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
843 struct sockaddr_qrtr qaddr;
844 struct sock *sk = sock->sk;
845
846 lock_sock(sk);
847 if (peer) {
848 if (sk->sk_state != TCP_ESTABLISHED) {
849 release_sock(sk);
850 return -ENOTCONN;
851 }
852
853 qaddr = ipc->peer;
854 } else {
855 qaddr = ipc->us;
856 }
857 release_sock(sk);
858
859 *len = sizeof(qaddr);
860 qaddr.sq_family = AF_QIPCRTR;
861
862 memcpy(saddr, &qaddr, sizeof(qaddr));
863
864 return 0;
865}
866
867static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
868{
869 void __user *argp = (void __user *)arg;
870 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
871 struct sock *sk = sock->sk;
872 struct sockaddr_qrtr *sq;
873 struct sk_buff *skb;
874 struct ifreq ifr;
875 long len = 0;
876 int rc = 0;
877
878 lock_sock(sk);
879
880 switch (cmd) {
881 case TIOCOUTQ:
882 len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
883 if (len < 0)
884 len = 0;
885 rc = put_user(len, (int __user *)argp);
886 break;
887 case TIOCINQ:
888 skb = skb_peek(&sk->sk_receive_queue);
889 if (skb)
f507a9b6 890 len = skb->len;
bdabad3e
CC
891 rc = put_user(len, (int __user *)argp);
892 break;
893 case SIOCGIFADDR:
894 if (copy_from_user(&ifr, argp, sizeof(ifr))) {
895 rc = -EFAULT;
896 break;
897 }
898
899 sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
900 *sq = ipc->us;
901 if (copy_to_user(argp, &ifr, sizeof(ifr))) {
902 rc = -EFAULT;
903 break;
904 }
905 break;
906 case SIOCGSTAMP:
907 rc = sock_get_timestamp(sk, argp);
908 break;
909 case SIOCADDRT:
910 case SIOCDELRT:
911 case SIOCSIFADDR:
912 case SIOCGIFDSTADDR:
913 case SIOCSIFDSTADDR:
914 case SIOCGIFBRDADDR:
915 case SIOCSIFBRDADDR:
916 case SIOCGIFNETMASK:
917 case SIOCSIFNETMASK:
918 rc = -EINVAL;
919 break;
920 default:
921 rc = -ENOIOCTLCMD;
922 break;
923 }
924
925 release_sock(sk);
926
927 return rc;
928}
929
930static int qrtr_release(struct socket *sock)
931{
932 struct sock *sk = sock->sk;
933 struct qrtr_sock *ipc;
934
935 if (!sk)
936 return 0;
937
938 lock_sock(sk);
939
940 ipc = qrtr_sk(sk);
941 sk->sk_shutdown = SHUTDOWN_MASK;
942 if (!sock_flag(sk, SOCK_DEAD))
943 sk->sk_state_change(sk);
944
945 sock_set_flag(sk, SOCK_DEAD);
946 sock->sk = NULL;
947
948 if (!sock_flag(sk, SOCK_ZAPPED))
949 qrtr_port_remove(ipc);
950
951 skb_queue_purge(&sk->sk_receive_queue);
952
953 release_sock(sk);
954 sock_put(sk);
955
956 return 0;
957}
958
959static const struct proto_ops qrtr_proto_ops = {
960 .owner = THIS_MODULE,
961 .family = AF_QIPCRTR,
962 .bind = qrtr_bind,
963 .connect = qrtr_connect,
964 .socketpair = sock_no_socketpair,
965 .accept = sock_no_accept,
966 .listen = sock_no_listen,
967 .sendmsg = qrtr_sendmsg,
968 .recvmsg = qrtr_recvmsg,
969 .getname = qrtr_getname,
970 .ioctl = qrtr_ioctl,
971 .poll = datagram_poll,
972 .shutdown = sock_no_shutdown,
973 .setsockopt = sock_no_setsockopt,
974 .getsockopt = sock_no_getsockopt,
975 .release = qrtr_release,
976 .mmap = sock_no_mmap,
977 .sendpage = sock_no_sendpage,
978};
979
980static struct proto qrtr_proto = {
981 .name = "QIPCRTR",
982 .owner = THIS_MODULE,
983 .obj_size = sizeof(struct qrtr_sock),
984};
985
986static int qrtr_create(struct net *net, struct socket *sock,
987 int protocol, int kern)
988{
989 struct qrtr_sock *ipc;
990 struct sock *sk;
991
992 if (sock->type != SOCK_DGRAM)
993 return -EPROTOTYPE;
994
995 sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
996 if (!sk)
997 return -ENOMEM;
998
999 sock_set_flag(sk, SOCK_ZAPPED);
1000
1001 sock_init_data(sock, sk);
1002 sock->ops = &qrtr_proto_ops;
1003
1004 ipc = qrtr_sk(sk);
1005 ipc->us.sq_family = AF_QIPCRTR;
1006 ipc->us.sq_node = qrtr_local_nid;
1007 ipc->us.sq_port = 0;
1008
1009 return 0;
1010}
1011
1012static const struct nla_policy qrtr_policy[IFA_MAX + 1] = {
1013 [IFA_LOCAL] = { .type = NLA_U32 },
1014};
1015
c21ef3e3
DA
1016static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1017 struct netlink_ext_ack *extack)
bdabad3e
CC
1018{
1019 struct nlattr *tb[IFA_MAX + 1];
1020 struct ifaddrmsg *ifm;
1021 int rc;
1022
1023 if (!netlink_capable(skb, CAP_NET_ADMIN))
1024 return -EPERM;
1025
1026 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1027 return -EPERM;
1028
1029 ASSERT_RTNL();
1030
c21ef3e3 1031 rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy, extack);
bdabad3e
CC
1032 if (rc < 0)
1033 return rc;
1034
1035 ifm = nlmsg_data(nlh);
1036 if (!tb[IFA_LOCAL])
1037 return -EINVAL;
1038
1039 qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]);
1040 return 0;
1041}
1042
1043static const struct net_proto_family qrtr_family = {
1044 .owner = THIS_MODULE,
1045 .family = AF_QIPCRTR,
1046 .create = qrtr_create,
1047};
1048
1049static int __init qrtr_proto_init(void)
1050{
1051 int rc;
1052
1053 rc = proto_register(&qrtr_proto, 1);
1054 if (rc)
1055 return rc;
1056
1057 rc = sock_register(&qrtr_family);
1058 if (rc) {
1059 proto_unregister(&qrtr_proto);
1060 return rc;
1061 }
1062
b97bac64 1063 rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, 0);
bdabad3e
CC
1064
1065 return 0;
1066}
1067module_init(qrtr_proto_init);
1068
1069static void __exit qrtr_proto_fini(void)
1070{
1071 rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR);
1072 sock_unregister(qrtr_family.family);
1073 proto_unregister(&qrtr_proto);
1074}
1075module_exit(qrtr_proto_fini);
1076
1077MODULE_DESCRIPTION("Qualcomm IPC-router driver");
1078MODULE_LICENSE("GPL v2");