]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - net/qrtr/qrtr.c
net: qrtr: Migrate nameservice to kernel from userspace
[mirror_ubuntu-hirsute-kernel.git] / net / qrtr / qrtr.c
CommitLineData
97fb5e8d 1// SPDX-License-Identifier: GPL-2.0-only
bdabad3e
CC
2/*
3 * Copyright (c) 2015, Sony Mobile Communications Inc.
4 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
bdabad3e
CC
5 */
6#include <linux/module.h>
7#include <linux/netlink.h>
8#include <linux/qrtr.h>
9#include <linux/termios.h> /* For TIOCINQ/OUTQ */
98fa15f3 10#include <linux/numa.h>
0a7e0d0e 11#include <linux/spinlock.h>
5fdeb0d3 12#include <linux/wait.h>
0c2204a4 13#include <linux/workqueue.h>
bdabad3e
CC
14
15#include <net/sock.h>
16
17#include "qrtr.h"
18
194ccc88
BA
19#define QRTR_PROTO_VER_1 1
20#define QRTR_PROTO_VER_2 3
bdabad3e
CC
21
22/* auto-bind range */
23#define QRTR_MIN_EPH_SOCKET 0x4000
24#define QRTR_MAX_EPH_SOCKET 0x7fff
25
bdabad3e 26/**
194ccc88 27 * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1
bdabad3e
CC
28 * @version: protocol version
29 * @type: packet type; one of QRTR_TYPE_*
30 * @src_node_id: source node
31 * @src_port_id: source port
32 * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
33 * @size: length of packet, excluding this header
34 * @dst_node_id: destination node
35 * @dst_port_id: destination port
36 */
194ccc88 37struct qrtr_hdr_v1 {
bdabad3e
CC
38 __le32 version;
39 __le32 type;
40 __le32 src_node_id;
41 __le32 src_port_id;
42 __le32 confirm_rx;
43 __le32 size;
44 __le32 dst_node_id;
45 __le32 dst_port_id;
46} __packed;
47
194ccc88
BA
48/**
49 * struct qrtr_hdr_v2 - (I|R)PCrouter packet header later versions
50 * @version: protocol version
51 * @type: packet type; one of QRTR_TYPE_*
52 * @flags: bitmask of QRTR_FLAGS_*
53 * @optlen: length of optional header data
54 * @size: length of packet, excluding this header and optlen
55 * @src_node_id: source node
56 * @src_port_id: source port
57 * @dst_node_id: destination node
58 * @dst_port_id: destination port
59 */
60struct qrtr_hdr_v2 {
61 u8 version;
62 u8 type;
63 u8 flags;
64 u8 optlen;
65 __le32 size;
66 __le16 src_node_id;
67 __le16 src_port_id;
68 __le16 dst_node_id;
69 __le16 dst_port_id;
70};
71
72#define QRTR_FLAGS_CONFIRM_RX BIT(0)
73
f507a9b6
BA
74struct qrtr_cb {
75 u32 src_node;
76 u32 src_port;
77 u32 dst_node;
78 u32 dst_port;
79
80 u8 type;
81 u8 confirm_rx;
82};
83
194ccc88
BA
84#define QRTR_HDR_MAX_SIZE max_t(size_t, sizeof(struct qrtr_hdr_v1), \
85 sizeof(struct qrtr_hdr_v2))
bdabad3e
CC
86
87struct qrtr_sock {
88 /* WARNING: sk must be the first member */
89 struct sock sk;
90 struct sockaddr_qrtr us;
91 struct sockaddr_qrtr peer;
92};
93
94static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
95{
96 BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
97 return container_of(sk, struct qrtr_sock, sk);
98}
99
98fa15f3 100static unsigned int qrtr_local_nid = NUMA_NO_NODE;
bdabad3e
CC
101
102/* for node ids */
0a7e0d0e
BA
103static RADIX_TREE(qrtr_nodes, GFP_ATOMIC);
104static DEFINE_SPINLOCK(qrtr_nodes_lock);
bdabad3e
CC
105/* broadcast list */
106static LIST_HEAD(qrtr_all_nodes);
0a7e0d0e 107/* lock for qrtr_all_nodes and node reference */
bdabad3e
CC
108static DEFINE_MUTEX(qrtr_node_lock);
109
110/* local port allocation management */
111static DEFINE_IDR(qrtr_ports);
112static DEFINE_MUTEX(qrtr_port_lock);
113
0c2204a4
MS
114static struct delayed_work qrtr_ns_work;
115
bdabad3e
CC
116/**
117 * struct qrtr_node - endpoint node
118 * @ep_lock: lock for endpoint management and callbacks
119 * @ep: endpoint
120 * @ref: reference count for node
121 * @nid: node id
5fdeb0d3
BA
122 * @qrtr_tx_flow: tree of qrtr_tx_flow, keyed by node << 32 | port
123 * @qrtr_tx_lock: lock for qrtr_tx_flow inserts
bdabad3e 124 * @rx_queue: receive queue
bdabad3e
CC
125 * @item: list item for broadcast list
126 */
127struct qrtr_node {
128 struct mutex ep_lock;
129 struct qrtr_endpoint *ep;
130 struct kref ref;
131 unsigned int nid;
132
5fdeb0d3
BA
133 struct radix_tree_root qrtr_tx_flow;
134 struct mutex qrtr_tx_lock; /* for qrtr_tx_flow */
135
bdabad3e 136 struct sk_buff_head rx_queue;
bdabad3e
CC
137 struct list_head item;
138};
139
5fdeb0d3
BA
140/**
141 * struct qrtr_tx_flow - tx flow control
142 * @resume_tx: waiters for a resume tx from the remote
143 * @pending: number of waiting senders
144 * @tx_failed: indicates that a message with confirm_rx flag was lost
145 */
146struct qrtr_tx_flow {
147 struct wait_queue_head resume_tx;
148 int pending;
149 int tx_failed;
150};
151
152#define QRTR_TX_FLOW_HIGH 10
153#define QRTR_TX_FLOW_LOW 5
154
e7044482
BA
155static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
156 int type, struct sockaddr_qrtr *from,
157 struct sockaddr_qrtr *to);
158static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
159 int type, struct sockaddr_qrtr *from,
160 struct sockaddr_qrtr *to);
e04df98a
BA
161static struct qrtr_sock *qrtr_port_lookup(int port);
162static void qrtr_port_put(struct qrtr_sock *ipc);
8acc8ee4 163
bdabad3e
CC
164/* Release node resources and free the node.
165 *
166 * Do not call directly, use qrtr_node_release. To be used with
167 * kref_put_mutex. As such, the node mutex is expected to be locked on call.
168 */
169static void __qrtr_node_release(struct kref *kref)
170{
171 struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
5fdeb0d3 172 struct radix_tree_iter iter;
0a7e0d0e 173 unsigned long flags;
5fdeb0d3 174 void __rcu **slot;
bdabad3e 175
0a7e0d0e 176 spin_lock_irqsave(&qrtr_nodes_lock, flags);
bdabad3e
CC
177 if (node->nid != QRTR_EP_NID_AUTO)
178 radix_tree_delete(&qrtr_nodes, node->nid);
0a7e0d0e 179 spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
bdabad3e
CC
180
181 list_del(&node->item);
182 mutex_unlock(&qrtr_node_lock);
183
184 skb_queue_purge(&node->rx_queue);
5fdeb0d3
BA
185
186 /* Free tx flow counters */
187 radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
188 radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot);
189 kfree(*slot);
190 }
bdabad3e
CC
191 kfree(node);
192}
193
194/* Increment reference to node. */
195static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
196{
197 if (node)
198 kref_get(&node->ref);
199 return node;
200}
201
202/* Decrement reference to node and release as necessary. */
203static void qrtr_node_release(struct qrtr_node *node)
204{
205 if (!node)
206 return;
207 kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
208}
209
5fdeb0d3
BA
210/**
211 * qrtr_tx_resume() - reset flow control counter
212 * @node: qrtr_node that the QRTR_TYPE_RESUME_TX packet arrived on
213 * @skb: resume_tx packet
214 */
215static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb)
216{
217 struct qrtr_ctrl_pkt *pkt = (struct qrtr_ctrl_pkt *)skb->data;
218 u64 remote_node = le32_to_cpu(pkt->client.node);
219 u32 remote_port = le32_to_cpu(pkt->client.port);
220 struct qrtr_tx_flow *flow;
221 unsigned long key;
222
223 key = remote_node << 32 | remote_port;
224
225 rcu_read_lock();
226 flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
227 rcu_read_unlock();
228 if (flow) {
229 spin_lock(&flow->resume_tx.lock);
230 flow->pending = 0;
231 spin_unlock(&flow->resume_tx.lock);
232 wake_up_interruptible_all(&flow->resume_tx);
233 }
234
235 consume_skb(skb);
236}
237
238/**
239 * qrtr_tx_wait() - flow control for outgoing packets
240 * @node: qrtr_node that the packet is to be send to
241 * @dest_node: node id of the destination
242 * @dest_port: port number of the destination
243 * @type: type of message
244 *
245 * The flow control scheme is based around the low and high "watermarks". When
246 * the low watermark is passed the confirm_rx flag is set on the outgoing
247 * message, which will trigger the remote to send a control message of the type
248 * QRTR_TYPE_RESUME_TX to reset the counter. If the high watermark is hit
249 * further transmision should be paused.
250 *
251 * Return: 1 if confirm_rx should be set, 0 otherwise or errno failure
252 */
253static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port,
254 int type)
255{
256 unsigned long key = (u64)dest_node << 32 | dest_port;
257 struct qrtr_tx_flow *flow;
258 int confirm_rx = 0;
259 int ret;
260
261 /* Never set confirm_rx on non-data packets */
262 if (type != QRTR_TYPE_DATA)
263 return 0;
264
265 mutex_lock(&node->qrtr_tx_lock);
266 flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
267 if (!flow) {
268 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
269 if (flow) {
270 init_waitqueue_head(&flow->resume_tx);
271 radix_tree_insert(&node->qrtr_tx_flow, key, flow);
272 }
273 }
274 mutex_unlock(&node->qrtr_tx_lock);
275
276 /* Set confirm_rx if we where unable to find and allocate a flow */
277 if (!flow)
278 return 1;
279
280 spin_lock_irq(&flow->resume_tx.lock);
281 ret = wait_event_interruptible_locked_irq(flow->resume_tx,
282 flow->pending < QRTR_TX_FLOW_HIGH ||
283 flow->tx_failed ||
284 !node->ep);
285 if (ret < 0) {
286 confirm_rx = ret;
287 } else if (!node->ep) {
288 confirm_rx = -EPIPE;
289 } else if (flow->tx_failed) {
290 flow->tx_failed = 0;
291 confirm_rx = 1;
292 } else {
293 flow->pending++;
294 confirm_rx = flow->pending == QRTR_TX_FLOW_LOW;
295 }
296 spin_unlock_irq(&flow->resume_tx.lock);
297
298 return confirm_rx;
299}
300
301/**
302 * qrtr_tx_flow_failed() - flag that tx of confirm_rx flagged messages failed
303 * @node: qrtr_node that the packet is to be send to
304 * @dest_node: node id of the destination
305 * @dest_port: port number of the destination
306 *
307 * Signal that the transmission of a message with confirm_rx flag failed. The
308 * flow's "pending" counter will keep incrementing towards QRTR_TX_FLOW_HIGH,
309 * at which point transmission would stall forever waiting for the resume TX
310 * message associated with the dropped confirm_rx message.
311 * Work around this by marking the flow as having a failed transmission and
312 * cause the next transmission attempt to be sent with the confirm_rx.
313 */
314static void qrtr_tx_flow_failed(struct qrtr_node *node, int dest_node,
315 int dest_port)
316{
317 unsigned long key = (u64)dest_node << 32 | dest_port;
318 struct qrtr_tx_flow *flow;
319
320 rcu_read_lock();
321 flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
322 rcu_read_unlock();
323 if (flow) {
324 spin_lock_irq(&flow->resume_tx.lock);
325 flow->tx_failed = 1;
326 spin_unlock_irq(&flow->resume_tx.lock);
327 }
328}
329
bdabad3e 330/* Pass an outgoing packet socket buffer to the endpoint driver. */
e7044482
BA
331static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
332 int type, struct sockaddr_qrtr *from,
333 struct sockaddr_qrtr *to)
bdabad3e 334{
194ccc88 335 struct qrtr_hdr_v1 *hdr;
e7044482 336 size_t len = skb->len;
bdabad3e 337 int rc = -ENODEV;
5fdeb0d3
BA
338 int confirm_rx;
339
340 confirm_rx = qrtr_tx_wait(node, to->sq_node, to->sq_port, type);
341 if (confirm_rx < 0) {
342 kfree_skb(skb);
343 return confirm_rx;
344 }
bdabad3e 345
194ccc88
BA
346 hdr = skb_push(skb, sizeof(*hdr));
347 hdr->version = cpu_to_le32(QRTR_PROTO_VER_1);
e7044482
BA
348 hdr->type = cpu_to_le32(type);
349 hdr->src_node_id = cpu_to_le32(from->sq_node);
350 hdr->src_port_id = cpu_to_le32(from->sq_port);
d27e77a3
AKN
351 if (to->sq_port == QRTR_PORT_CTRL) {
352 hdr->dst_node_id = cpu_to_le32(node->nid);
353 hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST);
354 } else {
355 hdr->dst_node_id = cpu_to_le32(to->sq_node);
356 hdr->dst_port_id = cpu_to_le32(to->sq_port);
357 }
e7044482
BA
358
359 hdr->size = cpu_to_le32(len);
5fdeb0d3 360 hdr->confirm_rx = !!confirm_rx;
e7044482 361
ce57785b 362 skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
e7044482 363
bdabad3e
CC
364 mutex_lock(&node->ep_lock);
365 if (node->ep)
366 rc = node->ep->xmit(node->ep, skb);
367 else
368 kfree_skb(skb);
369 mutex_unlock(&node->ep_lock);
370
5fdeb0d3
BA
371 /* Need to ensure that a subsequent message carries the otherwise lost
372 * confirm_rx flag if we dropped this one */
373 if (rc && confirm_rx)
374 qrtr_tx_flow_failed(node, to->sq_node, to->sq_port);
375
bdabad3e
CC
376 return rc;
377}
378
379/* Lookup node by id.
380 *
381 * callers must release with qrtr_node_release()
382 */
383static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
384{
385 struct qrtr_node *node;
0a7e0d0e 386 unsigned long flags;
bdabad3e 387
0a7e0d0e 388 spin_lock_irqsave(&qrtr_nodes_lock, flags);
bdabad3e
CC
389 node = radix_tree_lookup(&qrtr_nodes, nid);
390 node = qrtr_node_acquire(node);
0a7e0d0e 391 spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
bdabad3e
CC
392
393 return node;
394}
395
396/* Assign node id to node.
397 *
398 * This is mostly useful for automatic node id assignment, based on
399 * the source id in the incoming packet.
400 */
401static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
402{
0a7e0d0e
BA
403 unsigned long flags;
404
bdabad3e
CC
405 if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
406 return;
407
0a7e0d0e 408 spin_lock_irqsave(&qrtr_nodes_lock, flags);
bdabad3e
CC
409 radix_tree_insert(&qrtr_nodes, nid, node);
410 node->nid = nid;
0a7e0d0e 411 spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
bdabad3e
CC
412}
413
414/**
415 * qrtr_endpoint_post() - post incoming data
416 * @ep: endpoint handle
417 * @data: data pointer
418 * @len: size of data in bytes
419 *
420 * Return: 0 on success; negative error code on failure
421 */
422int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
423{
424 struct qrtr_node *node = ep->node;
194ccc88
BA
425 const struct qrtr_hdr_v1 *v1;
426 const struct qrtr_hdr_v2 *v2;
e04df98a 427 struct qrtr_sock *ipc;
bdabad3e 428 struct sk_buff *skb;
f507a9b6 429 struct qrtr_cb *cb;
bdabad3e 430 unsigned int size;
bdabad3e 431 unsigned int ver;
194ccc88 432 size_t hdrlen;
bdabad3e 433
194ccc88 434 if (len & 3)
bdabad3e
CC
435 return -EINVAL;
436
437 skb = netdev_alloc_skb(NULL, len);
438 if (!skb)
439 return -ENOMEM;
440
f507a9b6 441 cb = (struct qrtr_cb *)skb->cb;
f507a9b6 442
194ccc88
BA
443 /* Version field in v1 is little endian, so this works for both cases */
444 ver = *(u8*)data;
445
446 switch (ver) {
447 case QRTR_PROTO_VER_1:
448 v1 = data;
449 hdrlen = sizeof(*v1);
450
451 cb->type = le32_to_cpu(v1->type);
452 cb->src_node = le32_to_cpu(v1->src_node_id);
453 cb->src_port = le32_to_cpu(v1->src_port_id);
454 cb->confirm_rx = !!v1->confirm_rx;
455 cb->dst_node = le32_to_cpu(v1->dst_node_id);
456 cb->dst_port = le32_to_cpu(v1->dst_port_id);
457
458 size = le32_to_cpu(v1->size);
459 break;
460 case QRTR_PROTO_VER_2:
461 v2 = data;
462 hdrlen = sizeof(*v2) + v2->optlen;
463
464 cb->type = v2->type;
465 cb->confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX);
466 cb->src_node = le16_to_cpu(v2->src_node_id);
467 cb->src_port = le16_to_cpu(v2->src_port_id);
468 cb->dst_node = le16_to_cpu(v2->dst_node_id);
469 cb->dst_port = le16_to_cpu(v2->dst_port_id);
470
471 if (cb->src_port == (u16)QRTR_PORT_CTRL)
472 cb->src_port = QRTR_PORT_CTRL;
473 if (cb->dst_port == (u16)QRTR_PORT_CTRL)
474 cb->dst_port = QRTR_PORT_CTRL;
475
476 size = le32_to_cpu(v2->size);
477 break;
478 default:
479 pr_err("qrtr: Invalid version %d\n", ver);
480 goto err;
481 }
482
483 if (len != ALIGN(size, 4) + hdrlen)
484 goto err;
485
5fdeb0d3
BA
486 if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
487 cb->type != QRTR_TYPE_RESUME_TX)
194ccc88
BA
488 goto err;
489
490 skb_put_data(skb, data + hdrlen, size);
bdabad3e 491
e04df98a
BA
492 qrtr_node_assign(node, cb->src_node);
493
494 if (cb->type == QRTR_TYPE_RESUME_TX) {
495 qrtr_tx_resume(node, skb);
496 } else {
497 ipc = qrtr_port_lookup(cb->dst_port);
498 if (!ipc)
499 goto err;
500
501 if (sock_queue_rcv_skb(&ipc->sk, skb))
502 goto err;
503
504 qrtr_port_put(ipc);
505 }
bdabad3e
CC
506
507 return 0;
194ccc88
BA
508
509err:
510 kfree_skb(skb);
511 return -EINVAL;
512
bdabad3e
CC
513}
514EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
515
1a7959c7
BA
516/**
517 * qrtr_alloc_ctrl_packet() - allocate control packet skb
518 * @pkt: reference to qrtr_ctrl_pkt pointer
519 *
520 * Returns newly allocated sk_buff, or NULL on failure
521 *
522 * This function allocates a sk_buff large enough to carry a qrtr_ctrl_pkt and
523 * on success returns a reference to the control packet in @pkt.
524 */
525static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt)
bdabad3e 526{
1a7959c7 527 const int pkt_len = sizeof(struct qrtr_ctrl_pkt);
bdabad3e 528 struct sk_buff *skb;
bdabad3e 529
194ccc88 530 skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, GFP_KERNEL);
bdabad3e
CC
531 if (!skb)
532 return NULL;
bdabad3e 533
194ccc88 534 skb_reserve(skb, QRTR_HDR_MAX_SIZE);
1a7959c7 535 *pkt = skb_put_zero(skb, pkt_len);
1784473b
BA
536
537 return skb;
538}
539
bdabad3e
CC
540/**
541 * qrtr_endpoint_register() - register a new endpoint
542 * @ep: endpoint to register
543 * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
544 * Return: 0 on success; negative error code on failure
545 *
546 * The specified endpoint must have the xmit function pointer set on call.
547 */
548int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
549{
550 struct qrtr_node *node;
551
552 if (!ep || !ep->xmit)
553 return -EINVAL;
554
555 node = kzalloc(sizeof(*node), GFP_KERNEL);
556 if (!node)
557 return -ENOMEM;
558
bdabad3e
CC
559 kref_init(&node->ref);
560 mutex_init(&node->ep_lock);
561 skb_queue_head_init(&node->rx_queue);
562 node->nid = QRTR_EP_NID_AUTO;
563 node->ep = ep;
564
5fdeb0d3
BA
565 INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL);
566 mutex_init(&node->qrtr_tx_lock);
567
bdabad3e
CC
568 qrtr_node_assign(node, nid);
569
570 mutex_lock(&qrtr_node_lock);
571 list_add(&node->item, &qrtr_all_nodes);
572 mutex_unlock(&qrtr_node_lock);
573 ep->node = node;
574
575 return 0;
576}
577EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
578
579/**
580 * qrtr_endpoint_unregister - unregister endpoint
581 * @ep: endpoint to unregister
582 */
583void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
584{
585 struct qrtr_node *node = ep->node;
e7044482
BA
586 struct sockaddr_qrtr src = {AF_QIPCRTR, node->nid, QRTR_PORT_CTRL};
587 struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL};
5fdeb0d3 588 struct radix_tree_iter iter;
1a7959c7 589 struct qrtr_ctrl_pkt *pkt;
5fdeb0d3 590 struct qrtr_tx_flow *flow;
8acc8ee4 591 struct sk_buff *skb;
5fdeb0d3 592 void __rcu **slot;
bdabad3e
CC
593
594 mutex_lock(&node->ep_lock);
595 node->ep = NULL;
596 mutex_unlock(&node->ep_lock);
597
8acc8ee4 598 /* Notify the local controller about the event */
1a7959c7
BA
599 skb = qrtr_alloc_ctrl_packet(&pkt);
600 if (skb) {
601 pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE);
e7044482 602 qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst);
1a7959c7 603 }
8acc8ee4 604
5fdeb0d3
BA
605 /* Wake up any transmitters waiting for resume-tx from the node */
606 mutex_lock(&node->qrtr_tx_lock);
607 radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
608 flow = *slot;
609 wake_up_interruptible_all(&flow->resume_tx);
610 }
611 mutex_unlock(&node->qrtr_tx_lock);
612
bdabad3e
CC
613 qrtr_node_release(node);
614 ep->node = NULL;
615}
616EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
617
618/* Lookup socket by port.
619 *
620 * Callers must release with qrtr_port_put()
621 */
622static struct qrtr_sock *qrtr_port_lookup(int port)
623{
624 struct qrtr_sock *ipc;
625
626 if (port == QRTR_PORT_CTRL)
627 port = 0;
628
f16a4b26 629 rcu_read_lock();
bdabad3e
CC
630 ipc = idr_find(&qrtr_ports, port);
631 if (ipc)
632 sock_hold(&ipc->sk);
f16a4b26 633 rcu_read_unlock();
bdabad3e
CC
634
635 return ipc;
636}
637
638/* Release acquired socket. */
639static void qrtr_port_put(struct qrtr_sock *ipc)
640{
641 sock_put(&ipc->sk);
642}
643
644/* Remove port assignment. */
645static void qrtr_port_remove(struct qrtr_sock *ipc)
646{
1a7959c7 647 struct qrtr_ctrl_pkt *pkt;
1784473b 648 struct sk_buff *skb;
bdabad3e 649 int port = ipc->us.sq_port;
e7044482
BA
650 struct sockaddr_qrtr to;
651
652 to.sq_family = AF_QIPCRTR;
653 to.sq_node = QRTR_NODE_BCAST;
654 to.sq_port = QRTR_PORT_CTRL;
bdabad3e 655
1a7959c7 656 skb = qrtr_alloc_ctrl_packet(&pkt);
1784473b 657 if (skb) {
1a7959c7
BA
658 pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
659 pkt->client.node = cpu_to_le32(ipc->us.sq_node);
660 pkt->client.port = cpu_to_le32(ipc->us.sq_port);
661
1784473b 662 skb_set_owner_w(skb, &ipc->sk);
e7044482
BA
663 qrtr_bcast_enqueue(NULL, skb, QRTR_TYPE_DEL_CLIENT, &ipc->us,
664 &to);
1784473b
BA
665 }
666
bdabad3e
CC
667 if (port == QRTR_PORT_CTRL)
668 port = 0;
669
670 __sock_put(&ipc->sk);
671
672 mutex_lock(&qrtr_port_lock);
673 idr_remove(&qrtr_ports, port);
674 mutex_unlock(&qrtr_port_lock);
f16a4b26
BA
675
676 /* Ensure that if qrtr_port_lookup() did enter the RCU read section we
677 * wait for it to up increment the refcount */
678 synchronize_rcu();
bdabad3e
CC
679}
680
681/* Assign port number to socket.
682 *
683 * Specify port in the integer pointed to by port, and it will be adjusted
684 * on return as necesssary.
685 *
686 * Port may be:
687 * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
688 * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
689 * >QRTR_MIN_EPH_SOCKET: Specified; available to all
690 */
691static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
692{
693 int rc;
694
695 mutex_lock(&qrtr_port_lock);
696 if (!*port) {
697 rc = idr_alloc(&qrtr_ports, ipc,
698 QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
699 GFP_ATOMIC);
700 if (rc >= 0)
701 *port = rc;
702 } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
703 rc = -EACCES;
704 } else if (*port == QRTR_PORT_CTRL) {
705 rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
706 } else {
707 rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
708 if (rc >= 0)
709 *port = rc;
710 }
711 mutex_unlock(&qrtr_port_lock);
712
713 if (rc == -ENOSPC)
714 return -EADDRINUSE;
715 else if (rc < 0)
716 return rc;
717
718 sock_hold(&ipc->sk);
719
720 return 0;
721}
722
b24844b1
BA
723/* Reset all non-control ports */
724static void qrtr_reset_ports(void)
725{
726 struct qrtr_sock *ipc;
727 int id;
728
729 mutex_lock(&qrtr_port_lock);
730 idr_for_each_entry(&qrtr_ports, ipc, id) {
731 /* Don't reset control port */
732 if (id == 0)
733 continue;
734
735 sock_hold(&ipc->sk);
736 ipc->sk.sk_err = ENETRESET;
ae85bfa8 737 ipc->sk.sk_error_report(&ipc->sk);
b24844b1
BA
738 sock_put(&ipc->sk);
739 }
740 mutex_unlock(&qrtr_port_lock);
741}
742
bdabad3e
CC
743/* Bind socket to address.
744 *
745 * Socket should be locked upon call.
746 */
747static int __qrtr_bind(struct socket *sock,
748 const struct sockaddr_qrtr *addr, int zapped)
749{
750 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
751 struct sock *sk = sock->sk;
752 int port;
753 int rc;
754
755 /* rebinding ok */
756 if (!zapped && addr->sq_port == ipc->us.sq_port)
757 return 0;
758
759 port = addr->sq_port;
760 rc = qrtr_port_assign(ipc, &port);
761 if (rc)
762 return rc;
763
764 /* unbind previous, if any */
765 if (!zapped)
766 qrtr_port_remove(ipc);
767 ipc->us.sq_port = port;
768
769 sock_reset_flag(sk, SOCK_ZAPPED);
770
b24844b1
BA
771 /* Notify all open ports about the new controller */
772 if (port == QRTR_PORT_CTRL)
773 qrtr_reset_ports();
774
bdabad3e
CC
775 return 0;
776}
777
778/* Auto bind to an ephemeral port. */
779static int qrtr_autobind(struct socket *sock)
780{
781 struct sock *sk = sock->sk;
782 struct sockaddr_qrtr addr;
783
784 if (!sock_flag(sk, SOCK_ZAPPED))
785 return 0;
786
787 addr.sq_family = AF_QIPCRTR;
788 addr.sq_node = qrtr_local_nid;
789 addr.sq_port = 0;
790
791 return __qrtr_bind(sock, &addr, 1);
792}
793
794/* Bind socket to specified sockaddr. */
795static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
796{
797 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
798 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
799 struct sock *sk = sock->sk;
800 int rc;
801
802 if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
803 return -EINVAL;
804
805 if (addr->sq_node != ipc->us.sq_node)
806 return -EINVAL;
807
808 lock_sock(sk);
809 rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
810 release_sock(sk);
811
812 return rc;
813}
814
815/* Queue packet to local peer socket. */
e7044482
BA
816static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
817 int type, struct sockaddr_qrtr *from,
818 struct sockaddr_qrtr *to)
bdabad3e 819{
bdabad3e 820 struct qrtr_sock *ipc;
f507a9b6 821 struct qrtr_cb *cb;
bdabad3e 822
e7044482 823 ipc = qrtr_port_lookup(to->sq_port);
bdabad3e
CC
824 if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
825 kfree_skb(skb);
826 return -ENODEV;
827 }
828
f507a9b6
BA
829 cb = (struct qrtr_cb *)skb->cb;
830 cb->src_node = from->sq_node;
831 cb->src_port = from->sq_port;
e7044482 832
bdabad3e
CC
833 if (sock_queue_rcv_skb(&ipc->sk, skb)) {
834 qrtr_port_put(ipc);
835 kfree_skb(skb);
836 return -ENOSPC;
837 }
838
839 qrtr_port_put(ipc);
840
841 return 0;
842}
843
844/* Queue packet for broadcast. */
e7044482
BA
845static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
846 int type, struct sockaddr_qrtr *from,
847 struct sockaddr_qrtr *to)
bdabad3e
CC
848{
849 struct sk_buff *skbn;
850
851 mutex_lock(&qrtr_node_lock);
852 list_for_each_entry(node, &qrtr_all_nodes, item) {
853 skbn = skb_clone(skb, GFP_KERNEL);
854 if (!skbn)
855 break;
856 skb_set_owner_w(skbn, skb->sk);
e7044482 857 qrtr_node_enqueue(node, skbn, type, from, to);
bdabad3e
CC
858 }
859 mutex_unlock(&qrtr_node_lock);
860
e7044482 861 qrtr_local_enqueue(node, skb, type, from, to);
bdabad3e
CC
862
863 return 0;
864}
865
866static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
867{
868 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
e7044482
BA
869 int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int,
870 struct sockaddr_qrtr *, struct sockaddr_qrtr *);
8f5e2451 871 __le32 qrtr_type = cpu_to_le32(QRTR_TYPE_DATA);
bdabad3e
CC
872 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
873 struct sock *sk = sock->sk;
874 struct qrtr_node *node;
bdabad3e
CC
875 struct sk_buff *skb;
876 size_t plen;
7036e621 877 u32 type;
bdabad3e
CC
878 int rc;
879
880 if (msg->msg_flags & ~(MSG_DONTWAIT))
881 return -EINVAL;
882
883 if (len > 65535)
884 return -EMSGSIZE;
885
886 lock_sock(sk);
887
888 if (addr) {
889 if (msg->msg_namelen < sizeof(*addr)) {
890 release_sock(sk);
891 return -EINVAL;
892 }
893
894 if (addr->sq_family != AF_QIPCRTR) {
895 release_sock(sk);
896 return -EINVAL;
897 }
898
899 rc = qrtr_autobind(sock);
900 if (rc) {
901 release_sock(sk);
902 return rc;
903 }
904 } else if (sk->sk_state == TCP_ESTABLISHED) {
905 addr = &ipc->peer;
906 } else {
907 release_sock(sk);
908 return -ENOTCONN;
909 }
910
911 node = NULL;
912 if (addr->sq_node == QRTR_NODE_BCAST) {
913 enqueue_fn = qrtr_bcast_enqueue;
fdf5fd39
AKN
914 if (addr->sq_port != QRTR_PORT_CTRL) {
915 release_sock(sk);
916 return -ENOTCONN;
917 }
bdabad3e
CC
918 } else if (addr->sq_node == ipc->us.sq_node) {
919 enqueue_fn = qrtr_local_enqueue;
920 } else {
921 enqueue_fn = qrtr_node_enqueue;
922 node = qrtr_node_lookup(addr->sq_node);
923 if (!node) {
924 release_sock(sk);
925 return -ECONNRESET;
926 }
927 }
928
929 plen = (len + 3) & ~3;
194ccc88 930 skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE,
bdabad3e
CC
931 msg->msg_flags & MSG_DONTWAIT, &rc);
932 if (!skb)
933 goto out_node;
934
194ccc88 935 skb_reserve(skb, QRTR_HDR_MAX_SIZE);
bdabad3e 936
e7044482 937 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
bdabad3e
CC
938 if (rc) {
939 kfree_skb(skb);
940 goto out_node;
941 }
942
bdabad3e
CC
943 if (ipc->us.sq_port == QRTR_PORT_CTRL) {
944 if (len < 4) {
945 rc = -EINVAL;
946 kfree_skb(skb);
947 goto out_node;
948 }
949
950 /* control messages already require the type as 'command' */
8f5e2451 951 skb_copy_bits(skb, 0, &qrtr_type, 4);
bdabad3e
CC
952 }
953
7036e621 954 type = le32_to_cpu(qrtr_type);
e7044482 955 rc = enqueue_fn(node, skb, type, &ipc->us, addr);
bdabad3e
CC
956 if (rc >= 0)
957 rc = len;
958
959out_node:
960 qrtr_node_release(node);
961 release_sock(sk);
962
963 return rc;
964}
965
cb6530b9
BA
966static int qrtr_send_resume_tx(struct qrtr_cb *cb)
967{
968 struct sockaddr_qrtr remote = { AF_QIPCRTR, cb->src_node, cb->src_port };
969 struct sockaddr_qrtr local = { AF_QIPCRTR, cb->dst_node, cb->dst_port };
970 struct qrtr_ctrl_pkt *pkt;
971 struct qrtr_node *node;
972 struct sk_buff *skb;
973 int ret;
974
975 node = qrtr_node_lookup(remote.sq_node);
976 if (!node)
977 return -EINVAL;
978
979 skb = qrtr_alloc_ctrl_packet(&pkt);
980 if (!skb)
981 return -ENOMEM;
982
983 pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX);
984 pkt->client.node = cpu_to_le32(cb->dst_node);
985 pkt->client.port = cpu_to_le32(cb->dst_port);
986
987 ret = qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX, &local, &remote);
988
989 qrtr_node_release(node);
990
991 return ret;
992}
993
bdabad3e
CC
994static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
995 size_t size, int flags)
996{
997 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
bdabad3e
CC
998 struct sock *sk = sock->sk;
999 struct sk_buff *skb;
f507a9b6 1000 struct qrtr_cb *cb;
bdabad3e
CC
1001 int copied, rc;
1002
1003 lock_sock(sk);
1004
1005 if (sock_flag(sk, SOCK_ZAPPED)) {
1006 release_sock(sk);
1007 return -EADDRNOTAVAIL;
1008 }
1009
1010 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1011 flags & MSG_DONTWAIT, &rc);
1012 if (!skb) {
1013 release_sock(sk);
1014 return rc;
1015 }
cb6530b9 1016 cb = (struct qrtr_cb *)skb->cb;
bdabad3e 1017
f507a9b6 1018 copied = skb->len;
bdabad3e
CC
1019 if (copied > size) {
1020 copied = size;
1021 msg->msg_flags |= MSG_TRUNC;
1022 }
1023
f507a9b6 1024 rc = skb_copy_datagram_msg(skb, 0, msg, copied);
bdabad3e
CC
1025 if (rc < 0)
1026 goto out;
1027 rc = copied;
1028
1029 if (addr) {
1030 addr->sq_family = AF_QIPCRTR;
f507a9b6
BA
1031 addr->sq_node = cb->src_node;
1032 addr->sq_port = cb->src_port;
bdabad3e
CC
1033 msg->msg_namelen = sizeof(*addr);
1034 }
1035
1036out:
cb6530b9
BA
1037 if (cb->confirm_rx)
1038 qrtr_send_resume_tx(cb);
1039
bdabad3e
CC
1040 skb_free_datagram(sk, skb);
1041 release_sock(sk);
1042
1043 return rc;
1044}
1045
1046static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
1047 int len, int flags)
1048{
1049 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
1050 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
1051 struct sock *sk = sock->sk;
1052 int rc;
1053
1054 if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
1055 return -EINVAL;
1056
1057 lock_sock(sk);
1058
1059 sk->sk_state = TCP_CLOSE;
1060 sock->state = SS_UNCONNECTED;
1061
1062 rc = qrtr_autobind(sock);
1063 if (rc) {
1064 release_sock(sk);
1065 return rc;
1066 }
1067
1068 ipc->peer = *addr;
1069 sock->state = SS_CONNECTED;
1070 sk->sk_state = TCP_ESTABLISHED;
1071
1072 release_sock(sk);
1073
1074 return 0;
1075}
1076
1077static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
9b2c45d4 1078 int peer)
bdabad3e
CC
1079{
1080 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
1081 struct sockaddr_qrtr qaddr;
1082 struct sock *sk = sock->sk;
1083
1084 lock_sock(sk);
1085 if (peer) {
1086 if (sk->sk_state != TCP_ESTABLISHED) {
1087 release_sock(sk);
1088 return -ENOTCONN;
1089 }
1090
1091 qaddr = ipc->peer;
1092 } else {
1093 qaddr = ipc->us;
1094 }
1095 release_sock(sk);
1096
bdabad3e
CC
1097 qaddr.sq_family = AF_QIPCRTR;
1098
1099 memcpy(saddr, &qaddr, sizeof(qaddr));
1100
9b2c45d4 1101 return sizeof(qaddr);
bdabad3e
CC
1102}
1103
1104static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1105{
1106 void __user *argp = (void __user *)arg;
1107 struct qrtr_sock *ipc = qrtr_sk(sock->sk);
1108 struct sock *sk = sock->sk;
1109 struct sockaddr_qrtr *sq;
1110 struct sk_buff *skb;
1111 struct ifreq ifr;
1112 long len = 0;
1113 int rc = 0;
1114
1115 lock_sock(sk);
1116
1117 switch (cmd) {
1118 case TIOCOUTQ:
1119 len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1120 if (len < 0)
1121 len = 0;
1122 rc = put_user(len, (int __user *)argp);
1123 break;
1124 case TIOCINQ:
1125 skb = skb_peek(&sk->sk_receive_queue);
1126 if (skb)
f507a9b6 1127 len = skb->len;
bdabad3e
CC
1128 rc = put_user(len, (int __user *)argp);
1129 break;
1130 case SIOCGIFADDR:
1131 if (copy_from_user(&ifr, argp, sizeof(ifr))) {
1132 rc = -EFAULT;
1133 break;
1134 }
1135
1136 sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
1137 *sq = ipc->us;
1138 if (copy_to_user(argp, &ifr, sizeof(ifr))) {
1139 rc = -EFAULT;
1140 break;
1141 }
1142 break;
bdabad3e
CC
1143 case SIOCADDRT:
1144 case SIOCDELRT:
1145 case SIOCSIFADDR:
1146 case SIOCGIFDSTADDR:
1147 case SIOCSIFDSTADDR:
1148 case SIOCGIFBRDADDR:
1149 case SIOCSIFBRDADDR:
1150 case SIOCGIFNETMASK:
1151 case SIOCSIFNETMASK:
1152 rc = -EINVAL;
1153 break;
1154 default:
1155 rc = -ENOIOCTLCMD;
1156 break;
1157 }
1158
1159 release_sock(sk);
1160
1161 return rc;
1162}
1163
1164static int qrtr_release(struct socket *sock)
1165{
1166 struct sock *sk = sock->sk;
1167 struct qrtr_sock *ipc;
1168
1169 if (!sk)
1170 return 0;
1171
1172 lock_sock(sk);
1173
1174 ipc = qrtr_sk(sk);
1175 sk->sk_shutdown = SHUTDOWN_MASK;
1176 if (!sock_flag(sk, SOCK_DEAD))
1177 sk->sk_state_change(sk);
1178
1179 sock_set_flag(sk, SOCK_DEAD);
1180 sock->sk = NULL;
1181
1182 if (!sock_flag(sk, SOCK_ZAPPED))
1183 qrtr_port_remove(ipc);
1184
1185 skb_queue_purge(&sk->sk_receive_queue);
1186
1187 release_sock(sk);
1188 sock_put(sk);
1189
1190 return 0;
1191}
1192
1193static const struct proto_ops qrtr_proto_ops = {
1194 .owner = THIS_MODULE,
1195 .family = AF_QIPCRTR,
1196 .bind = qrtr_bind,
1197 .connect = qrtr_connect,
1198 .socketpair = sock_no_socketpair,
1199 .accept = sock_no_accept,
1200 .listen = sock_no_listen,
1201 .sendmsg = qrtr_sendmsg,
1202 .recvmsg = qrtr_recvmsg,
1203 .getname = qrtr_getname,
1204 .ioctl = qrtr_ioctl,
c7cbdbf2 1205 .gettstamp = sock_gettstamp,
a11e1d43 1206 .poll = datagram_poll,
bdabad3e
CC
1207 .shutdown = sock_no_shutdown,
1208 .setsockopt = sock_no_setsockopt,
1209 .getsockopt = sock_no_getsockopt,
1210 .release = qrtr_release,
1211 .mmap = sock_no_mmap,
1212 .sendpage = sock_no_sendpage,
1213};
1214
1215static struct proto qrtr_proto = {
1216 .name = "QIPCRTR",
1217 .owner = THIS_MODULE,
1218 .obj_size = sizeof(struct qrtr_sock),
1219};
1220
1221static int qrtr_create(struct net *net, struct socket *sock,
1222 int protocol, int kern)
1223{
1224 struct qrtr_sock *ipc;
1225 struct sock *sk;
1226
1227 if (sock->type != SOCK_DGRAM)
1228 return -EPROTOTYPE;
1229
1230 sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
1231 if (!sk)
1232 return -ENOMEM;
1233
1234 sock_set_flag(sk, SOCK_ZAPPED);
1235
1236 sock_init_data(sock, sk);
1237 sock->ops = &qrtr_proto_ops;
1238
1239 ipc = qrtr_sk(sk);
1240 ipc->us.sq_family = AF_QIPCRTR;
1241 ipc->us.sq_node = qrtr_local_nid;
1242 ipc->us.sq_port = 0;
1243
1244 return 0;
1245}
1246
bdabad3e
CC
1247static const struct net_proto_family qrtr_family = {
1248 .owner = THIS_MODULE,
1249 .family = AF_QIPCRTR,
1250 .create = qrtr_create,
1251};
1252
1253static int __init qrtr_proto_init(void)
1254{
1255 int rc;
1256
1257 rc = proto_register(&qrtr_proto, 1);
1258 if (rc)
1259 return rc;
1260
1261 rc = sock_register(&qrtr_family);
1262 if (rc) {
1263 proto_unregister(&qrtr_proto);
1264 return rc;
1265 }
1266
0c2204a4
MS
1267 /* FIXME: Currently, this 2s delay is required to catch the NEW_SERVER
1268 * messages from routers. But the fix could be somewhere else.
1269 */
1270 INIT_DELAYED_WORK(&qrtr_ns_work, qrtr_ns_init);
1271 schedule_delayed_work(&qrtr_ns_work, msecs_to_jiffies(2000));
bdabad3e 1272
c1c502b5 1273 return rc;
bdabad3e 1274}
b7e732fa 1275postcore_initcall(qrtr_proto_init);
bdabad3e
CC
1276
1277static void __exit qrtr_proto_fini(void)
1278{
0c2204a4
MS
1279 cancel_delayed_work_sync(&qrtr_ns_work);
1280 qrtr_ns_remove();
bdabad3e
CC
1281 sock_unregister(qrtr_family.family);
1282 proto_unregister(&qrtr_proto);
1283}
1284module_exit(qrtr_proto_fini);
1285
1286MODULE_DESCRIPTION("Qualcomm IPC-router driver");
1287MODULE_LICENSE("GPL v2");
77ac725e 1288MODULE_ALIAS_NETPROTO(PF_QIPCRTR);