]>
Commit | Line | Data |
---|---|---|
bdabad3e CC |
1 | /* |
2 | * Copyright (c) 2015, Sony Mobile Communications Inc. | |
3 | * Copyright (c) 2013, The Linux Foundation. All rights reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 and | |
7 | * only version 2 as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | */ | |
14 | #include <linux/module.h> | |
15 | #include <linux/netlink.h> | |
16 | #include <linux/qrtr.h> | |
17 | #include <linux/termios.h> /* For TIOCINQ/OUTQ */ | |
18 | ||
19 | #include <net/sock.h> | |
20 | ||
21 | #include "qrtr.h" | |
22 | ||
23 | #define QRTR_PROTO_VER 1 | |
24 | ||
25 | /* auto-bind range */ | |
26 | #define QRTR_MIN_EPH_SOCKET 0x4000 | |
27 | #define QRTR_MAX_EPH_SOCKET 0x7fff | |
28 | ||
29 | enum qrtr_pkt_type { | |
30 | QRTR_TYPE_DATA = 1, | |
31 | QRTR_TYPE_HELLO = 2, | |
32 | QRTR_TYPE_BYE = 3, | |
33 | QRTR_TYPE_NEW_SERVER = 4, | |
34 | QRTR_TYPE_DEL_SERVER = 5, | |
35 | QRTR_TYPE_DEL_CLIENT = 6, | |
36 | QRTR_TYPE_RESUME_TX = 7, | |
37 | QRTR_TYPE_EXIT = 8, | |
38 | QRTR_TYPE_PING = 9, | |
39 | }; | |
40 | ||
41 | /** | |
42 | * struct qrtr_hdr - (I|R)PCrouter packet header | |
43 | * @version: protocol version | |
44 | * @type: packet type; one of QRTR_TYPE_* | |
45 | * @src_node_id: source node | |
46 | * @src_port_id: source port | |
47 | * @confirm_rx: boolean; whether a resume-tx packet should be send in reply | |
48 | * @size: length of packet, excluding this header | |
49 | * @dst_node_id: destination node | |
50 | * @dst_port_id: destination port | |
51 | */ | |
52 | struct qrtr_hdr { | |
53 | __le32 version; | |
54 | __le32 type; | |
55 | __le32 src_node_id; | |
56 | __le32 src_port_id; | |
57 | __le32 confirm_rx; | |
58 | __le32 size; | |
59 | __le32 dst_node_id; | |
60 | __le32 dst_port_id; | |
61 | } __packed; | |
62 | ||
63 | #define QRTR_HDR_SIZE sizeof(struct qrtr_hdr) | |
64 | #define QRTR_NODE_BCAST ((unsigned int)-1) | |
65 | #define QRTR_PORT_CTRL ((unsigned int)-2) | |
66 | ||
67 | struct qrtr_sock { | |
68 | /* WARNING: sk must be the first member */ | |
69 | struct sock sk; | |
70 | struct sockaddr_qrtr us; | |
71 | struct sockaddr_qrtr peer; | |
72 | }; | |
73 | ||
74 | static inline struct qrtr_sock *qrtr_sk(struct sock *sk) | |
75 | { | |
76 | BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0); | |
77 | return container_of(sk, struct qrtr_sock, sk); | |
78 | } | |
79 | ||
80 | static unsigned int qrtr_local_nid = -1; | |
81 | ||
82 | /* for node ids */ | |
83 | static RADIX_TREE(qrtr_nodes, GFP_KERNEL); | |
84 | /* broadcast list */ | |
85 | static LIST_HEAD(qrtr_all_nodes); | |
86 | /* lock for qrtr_nodes, qrtr_all_nodes and node reference */ | |
87 | static DEFINE_MUTEX(qrtr_node_lock); | |
88 | ||
89 | /* local port allocation management */ | |
90 | static DEFINE_IDR(qrtr_ports); | |
91 | static DEFINE_MUTEX(qrtr_port_lock); | |
92 | ||
93 | /** | |
94 | * struct qrtr_node - endpoint node | |
95 | * @ep_lock: lock for endpoint management and callbacks | |
96 | * @ep: endpoint | |
97 | * @ref: reference count for node | |
98 | * @nid: node id | |
99 | * @rx_queue: receive queue | |
100 | * @work: scheduled work struct for recv work | |
101 | * @item: list item for broadcast list | |
102 | */ | |
103 | struct qrtr_node { | |
104 | struct mutex ep_lock; | |
105 | struct qrtr_endpoint *ep; | |
106 | struct kref ref; | |
107 | unsigned int nid; | |
108 | ||
109 | struct sk_buff_head rx_queue; | |
110 | struct work_struct work; | |
111 | struct list_head item; | |
112 | }; | |
113 | ||
8acc8ee4 BA |
114 | static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb); |
115 | ||
bdabad3e CC |
116 | /* Release node resources and free the node. |
117 | * | |
118 | * Do not call directly, use qrtr_node_release. To be used with | |
119 | * kref_put_mutex. As such, the node mutex is expected to be locked on call. | |
120 | */ | |
121 | static void __qrtr_node_release(struct kref *kref) | |
122 | { | |
123 | struct qrtr_node *node = container_of(kref, struct qrtr_node, ref); | |
124 | ||
125 | if (node->nid != QRTR_EP_NID_AUTO) | |
126 | radix_tree_delete(&qrtr_nodes, node->nid); | |
127 | ||
128 | list_del(&node->item); | |
129 | mutex_unlock(&qrtr_node_lock); | |
130 | ||
131 | skb_queue_purge(&node->rx_queue); | |
132 | kfree(node); | |
133 | } | |
134 | ||
135 | /* Increment reference to node. */ | |
136 | static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node) | |
137 | { | |
138 | if (node) | |
139 | kref_get(&node->ref); | |
140 | return node; | |
141 | } | |
142 | ||
143 | /* Decrement reference to node and release as necessary. */ | |
144 | static void qrtr_node_release(struct qrtr_node *node) | |
145 | { | |
146 | if (!node) | |
147 | return; | |
148 | kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock); | |
149 | } | |
150 | ||
151 | /* Pass an outgoing packet socket buffer to the endpoint driver. */ | |
152 | static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb) | |
153 | { | |
154 | int rc = -ENODEV; | |
155 | ||
156 | mutex_lock(&node->ep_lock); | |
157 | if (node->ep) | |
158 | rc = node->ep->xmit(node->ep, skb); | |
159 | else | |
160 | kfree_skb(skb); | |
161 | mutex_unlock(&node->ep_lock); | |
162 | ||
163 | return rc; | |
164 | } | |
165 | ||
166 | /* Lookup node by id. | |
167 | * | |
168 | * callers must release with qrtr_node_release() | |
169 | */ | |
170 | static struct qrtr_node *qrtr_node_lookup(unsigned int nid) | |
171 | { | |
172 | struct qrtr_node *node; | |
173 | ||
174 | mutex_lock(&qrtr_node_lock); | |
175 | node = radix_tree_lookup(&qrtr_nodes, nid); | |
176 | node = qrtr_node_acquire(node); | |
177 | mutex_unlock(&qrtr_node_lock); | |
178 | ||
179 | return node; | |
180 | } | |
181 | ||
182 | /* Assign node id to node. | |
183 | * | |
184 | * This is mostly useful for automatic node id assignment, based on | |
185 | * the source id in the incoming packet. | |
186 | */ | |
187 | static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid) | |
188 | { | |
189 | if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO) | |
190 | return; | |
191 | ||
192 | mutex_lock(&qrtr_node_lock); | |
193 | radix_tree_insert(&qrtr_nodes, nid, node); | |
194 | node->nid = nid; | |
195 | mutex_unlock(&qrtr_node_lock); | |
196 | } | |
197 | ||
198 | /** | |
199 | * qrtr_endpoint_post() - post incoming data | |
200 | * @ep: endpoint handle | |
201 | * @data: data pointer | |
202 | * @len: size of data in bytes | |
203 | * | |
204 | * Return: 0 on success; negative error code on failure | |
205 | */ | |
206 | int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) | |
207 | { | |
208 | struct qrtr_node *node = ep->node; | |
209 | const struct qrtr_hdr *phdr = data; | |
210 | struct sk_buff *skb; | |
211 | unsigned int psize; | |
212 | unsigned int size; | |
213 | unsigned int type; | |
214 | unsigned int ver; | |
215 | unsigned int dst; | |
216 | ||
217 | if (len < QRTR_HDR_SIZE || len & 3) | |
218 | return -EINVAL; | |
219 | ||
220 | ver = le32_to_cpu(phdr->version); | |
221 | size = le32_to_cpu(phdr->size); | |
222 | type = le32_to_cpu(phdr->type); | |
223 | dst = le32_to_cpu(phdr->dst_port_id); | |
224 | ||
225 | psize = (size + 3) & ~3; | |
226 | ||
227 | if (ver != QRTR_PROTO_VER) | |
228 | return -EINVAL; | |
229 | ||
230 | if (len != psize + QRTR_HDR_SIZE) | |
231 | return -EINVAL; | |
232 | ||
233 | if (dst != QRTR_PORT_CTRL && type != QRTR_TYPE_DATA) | |
234 | return -EINVAL; | |
235 | ||
236 | skb = netdev_alloc_skb(NULL, len); | |
237 | if (!skb) | |
238 | return -ENOMEM; | |
239 | ||
240 | skb_reset_transport_header(skb); | |
241 | memcpy(skb_put(skb, len), data, len); | |
242 | ||
243 | skb_queue_tail(&node->rx_queue, skb); | |
244 | schedule_work(&node->work); | |
245 | ||
246 | return 0; | |
247 | } | |
248 | EXPORT_SYMBOL_GPL(qrtr_endpoint_post); | |
249 | ||
64f9eca0 BA |
250 | static struct sk_buff *qrtr_alloc_ctrl_packet(u32 type, size_t pkt_len, |
251 | u32 src_node, u32 dst_node) | |
bdabad3e | 252 | { |
bdabad3e CC |
253 | struct qrtr_hdr *hdr; |
254 | struct sk_buff *skb; | |
bdabad3e CC |
255 | |
256 | skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL); | |
257 | if (!skb) | |
258 | return NULL; | |
259 | skb_reset_transport_header(skb); | |
260 | ||
261 | hdr = (struct qrtr_hdr *)skb_put(skb, QRTR_HDR_SIZE); | |
262 | hdr->version = cpu_to_le32(QRTR_PROTO_VER); | |
64f9eca0 | 263 | hdr->type = cpu_to_le32(type); |
bdabad3e CC |
264 | hdr->src_node_id = cpu_to_le32(src_node); |
265 | hdr->src_port_id = cpu_to_le32(QRTR_PORT_CTRL); | |
266 | hdr->confirm_rx = cpu_to_le32(0); | |
267 | hdr->size = cpu_to_le32(pkt_len); | |
268 | hdr->dst_node_id = cpu_to_le32(dst_node); | |
269 | hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL); | |
270 | ||
64f9eca0 BA |
271 | return skb; |
272 | } | |
273 | ||
274 | /* Allocate and construct a resume-tx packet. */ | |
275 | static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node, | |
276 | u32 dst_node, u32 port) | |
277 | { | |
278 | const int pkt_len = 20; | |
279 | struct sk_buff *skb; | |
280 | __le32 *buf; | |
281 | ||
282 | skb = qrtr_alloc_ctrl_packet(QRTR_TYPE_RESUME_TX, pkt_len, | |
283 | src_node, dst_node); | |
284 | if (!skb) | |
285 | return NULL; | |
286 | ||
3512a1ad | 287 | buf = (__le32 *)skb_put(skb, pkt_len); |
bdabad3e CC |
288 | memset(buf, 0, pkt_len); |
289 | buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX); | |
290 | buf[1] = cpu_to_le32(src_node); | |
291 | buf[2] = cpu_to_le32(port); | |
292 | ||
293 | return skb; | |
294 | } | |
295 | ||
8acc8ee4 BA |
296 | /* Allocate and construct a BYE message to signal remote termination */ |
297 | static struct sk_buff *qrtr_alloc_local_bye(u32 src_node) | |
298 | { | |
299 | const int pkt_len = 20; | |
300 | struct sk_buff *skb; | |
301 | __le32 *buf; | |
302 | ||
303 | skb = qrtr_alloc_ctrl_packet(QRTR_TYPE_BYE, pkt_len, | |
304 | src_node, qrtr_local_nid); | |
305 | if (!skb) | |
306 | return NULL; | |
307 | ||
308 | buf = (__le32 *)skb_put(skb, pkt_len); | |
309 | memset(buf, 0, pkt_len); | |
310 | buf[0] = cpu_to_le32(QRTR_TYPE_BYE); | |
311 | ||
312 | return skb; | |
313 | } | |
314 | ||
bdabad3e CC |
315 | static struct qrtr_sock *qrtr_port_lookup(int port); |
316 | static void qrtr_port_put(struct qrtr_sock *ipc); | |
317 | ||
318 | /* Handle and route a received packet. | |
319 | * | |
320 | * This will auto-reply with resume-tx packet as necessary. | |
321 | */ | |
322 | static void qrtr_node_rx_work(struct work_struct *work) | |
323 | { | |
324 | struct qrtr_node *node = container_of(work, struct qrtr_node, work); | |
325 | struct sk_buff *skb; | |
326 | ||
327 | while ((skb = skb_dequeue(&node->rx_queue)) != NULL) { | |
328 | const struct qrtr_hdr *phdr; | |
329 | u32 dst_node, dst_port; | |
330 | struct qrtr_sock *ipc; | |
331 | u32 src_node; | |
332 | int confirm; | |
333 | ||
334 | phdr = (const struct qrtr_hdr *)skb_transport_header(skb); | |
335 | src_node = le32_to_cpu(phdr->src_node_id); | |
336 | dst_node = le32_to_cpu(phdr->dst_node_id); | |
337 | dst_port = le32_to_cpu(phdr->dst_port_id); | |
338 | confirm = !!phdr->confirm_rx; | |
339 | ||
340 | qrtr_node_assign(node, src_node); | |
341 | ||
342 | ipc = qrtr_port_lookup(dst_port); | |
343 | if (!ipc) { | |
344 | kfree_skb(skb); | |
345 | } else { | |
346 | if (sock_queue_rcv_skb(&ipc->sk, skb)) | |
347 | kfree_skb(skb); | |
348 | ||
349 | qrtr_port_put(ipc); | |
350 | } | |
351 | ||
352 | if (confirm) { | |
353 | skb = qrtr_alloc_resume_tx(dst_node, node->nid, dst_port); | |
354 | if (!skb) | |
355 | break; | |
356 | if (qrtr_node_enqueue(node, skb)) | |
357 | break; | |
358 | } | |
359 | } | |
360 | } | |
361 | ||
362 | /** | |
363 | * qrtr_endpoint_register() - register a new endpoint | |
364 | * @ep: endpoint to register | |
365 | * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment | |
366 | * Return: 0 on success; negative error code on failure | |
367 | * | |
368 | * The specified endpoint must have the xmit function pointer set on call. | |
369 | */ | |
370 | int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid) | |
371 | { | |
372 | struct qrtr_node *node; | |
373 | ||
374 | if (!ep || !ep->xmit) | |
375 | return -EINVAL; | |
376 | ||
377 | node = kzalloc(sizeof(*node), GFP_KERNEL); | |
378 | if (!node) | |
379 | return -ENOMEM; | |
380 | ||
381 | INIT_WORK(&node->work, qrtr_node_rx_work); | |
382 | kref_init(&node->ref); | |
383 | mutex_init(&node->ep_lock); | |
384 | skb_queue_head_init(&node->rx_queue); | |
385 | node->nid = QRTR_EP_NID_AUTO; | |
386 | node->ep = ep; | |
387 | ||
388 | qrtr_node_assign(node, nid); | |
389 | ||
390 | mutex_lock(&qrtr_node_lock); | |
391 | list_add(&node->item, &qrtr_all_nodes); | |
392 | mutex_unlock(&qrtr_node_lock); | |
393 | ep->node = node; | |
394 | ||
395 | return 0; | |
396 | } | |
397 | EXPORT_SYMBOL_GPL(qrtr_endpoint_register); | |
398 | ||
399 | /** | |
400 | * qrtr_endpoint_unregister - unregister endpoint | |
401 | * @ep: endpoint to unregister | |
402 | */ | |
403 | void qrtr_endpoint_unregister(struct qrtr_endpoint *ep) | |
404 | { | |
405 | struct qrtr_node *node = ep->node; | |
8acc8ee4 | 406 | struct sk_buff *skb; |
bdabad3e CC |
407 | |
408 | mutex_lock(&node->ep_lock); | |
409 | node->ep = NULL; | |
410 | mutex_unlock(&node->ep_lock); | |
411 | ||
8acc8ee4 BA |
412 | /* Notify the local controller about the event */ |
413 | skb = qrtr_alloc_local_bye(node->nid); | |
414 | if (skb) | |
415 | qrtr_local_enqueue(NULL, skb); | |
416 | ||
bdabad3e CC |
417 | qrtr_node_release(node); |
418 | ep->node = NULL; | |
419 | } | |
420 | EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister); | |
421 | ||
422 | /* Lookup socket by port. | |
423 | * | |
424 | * Callers must release with qrtr_port_put() | |
425 | */ | |
426 | static struct qrtr_sock *qrtr_port_lookup(int port) | |
427 | { | |
428 | struct qrtr_sock *ipc; | |
429 | ||
430 | if (port == QRTR_PORT_CTRL) | |
431 | port = 0; | |
432 | ||
433 | mutex_lock(&qrtr_port_lock); | |
434 | ipc = idr_find(&qrtr_ports, port); | |
435 | if (ipc) | |
436 | sock_hold(&ipc->sk); | |
437 | mutex_unlock(&qrtr_port_lock); | |
438 | ||
439 | return ipc; | |
440 | } | |
441 | ||
442 | /* Release acquired socket. */ | |
443 | static void qrtr_port_put(struct qrtr_sock *ipc) | |
444 | { | |
445 | sock_put(&ipc->sk); | |
446 | } | |
447 | ||
448 | /* Remove port assignment. */ | |
449 | static void qrtr_port_remove(struct qrtr_sock *ipc) | |
450 | { | |
451 | int port = ipc->us.sq_port; | |
452 | ||
453 | if (port == QRTR_PORT_CTRL) | |
454 | port = 0; | |
455 | ||
456 | __sock_put(&ipc->sk); | |
457 | ||
458 | mutex_lock(&qrtr_port_lock); | |
459 | idr_remove(&qrtr_ports, port); | |
460 | mutex_unlock(&qrtr_port_lock); | |
461 | } | |
462 | ||
463 | /* Assign port number to socket. | |
464 | * | |
465 | * Specify port in the integer pointed to by port, and it will be adjusted | |
466 | * on return as necesssary. | |
467 | * | |
468 | * Port may be: | |
469 | * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET] | |
470 | * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN | |
471 | * >QRTR_MIN_EPH_SOCKET: Specified; available to all | |
472 | */ | |
473 | static int qrtr_port_assign(struct qrtr_sock *ipc, int *port) | |
474 | { | |
475 | int rc; | |
476 | ||
477 | mutex_lock(&qrtr_port_lock); | |
478 | if (!*port) { | |
479 | rc = idr_alloc(&qrtr_ports, ipc, | |
480 | QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1, | |
481 | GFP_ATOMIC); | |
482 | if (rc >= 0) | |
483 | *port = rc; | |
484 | } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) { | |
485 | rc = -EACCES; | |
486 | } else if (*port == QRTR_PORT_CTRL) { | |
487 | rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC); | |
488 | } else { | |
489 | rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC); | |
490 | if (rc >= 0) | |
491 | *port = rc; | |
492 | } | |
493 | mutex_unlock(&qrtr_port_lock); | |
494 | ||
495 | if (rc == -ENOSPC) | |
496 | return -EADDRINUSE; | |
497 | else if (rc < 0) | |
498 | return rc; | |
499 | ||
500 | sock_hold(&ipc->sk); | |
501 | ||
502 | return 0; | |
503 | } | |
504 | ||
505 | /* Bind socket to address. | |
506 | * | |
507 | * Socket should be locked upon call. | |
508 | */ | |
509 | static int __qrtr_bind(struct socket *sock, | |
510 | const struct sockaddr_qrtr *addr, int zapped) | |
511 | { | |
512 | struct qrtr_sock *ipc = qrtr_sk(sock->sk); | |
513 | struct sock *sk = sock->sk; | |
514 | int port; | |
515 | int rc; | |
516 | ||
517 | /* rebinding ok */ | |
518 | if (!zapped && addr->sq_port == ipc->us.sq_port) | |
519 | return 0; | |
520 | ||
521 | port = addr->sq_port; | |
522 | rc = qrtr_port_assign(ipc, &port); | |
523 | if (rc) | |
524 | return rc; | |
525 | ||
526 | /* unbind previous, if any */ | |
527 | if (!zapped) | |
528 | qrtr_port_remove(ipc); | |
529 | ipc->us.sq_port = port; | |
530 | ||
531 | sock_reset_flag(sk, SOCK_ZAPPED); | |
532 | ||
533 | return 0; | |
534 | } | |
535 | ||
536 | /* Auto bind to an ephemeral port. */ | |
537 | static int qrtr_autobind(struct socket *sock) | |
538 | { | |
539 | struct sock *sk = sock->sk; | |
540 | struct sockaddr_qrtr addr; | |
541 | ||
542 | if (!sock_flag(sk, SOCK_ZAPPED)) | |
543 | return 0; | |
544 | ||
545 | addr.sq_family = AF_QIPCRTR; | |
546 | addr.sq_node = qrtr_local_nid; | |
547 | addr.sq_port = 0; | |
548 | ||
549 | return __qrtr_bind(sock, &addr, 1); | |
550 | } | |
551 | ||
552 | /* Bind socket to specified sockaddr. */ | |
553 | static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len) | |
554 | { | |
555 | DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr); | |
556 | struct qrtr_sock *ipc = qrtr_sk(sock->sk); | |
557 | struct sock *sk = sock->sk; | |
558 | int rc; | |
559 | ||
560 | if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR) | |
561 | return -EINVAL; | |
562 | ||
563 | if (addr->sq_node != ipc->us.sq_node) | |
564 | return -EINVAL; | |
565 | ||
566 | lock_sock(sk); | |
567 | rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED)); | |
568 | release_sock(sk); | |
569 | ||
570 | return rc; | |
571 | } | |
572 | ||
573 | /* Queue packet to local peer socket. */ | |
574 | static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb) | |
575 | { | |
576 | const struct qrtr_hdr *phdr; | |
577 | struct qrtr_sock *ipc; | |
578 | ||
579 | phdr = (const struct qrtr_hdr *)skb_transport_header(skb); | |
580 | ||
581 | ipc = qrtr_port_lookup(le32_to_cpu(phdr->dst_port_id)); | |
582 | if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */ | |
583 | kfree_skb(skb); | |
584 | return -ENODEV; | |
585 | } | |
586 | ||
587 | if (sock_queue_rcv_skb(&ipc->sk, skb)) { | |
588 | qrtr_port_put(ipc); | |
589 | kfree_skb(skb); | |
590 | return -ENOSPC; | |
591 | } | |
592 | ||
593 | qrtr_port_put(ipc); | |
594 | ||
595 | return 0; | |
596 | } | |
597 | ||
598 | /* Queue packet for broadcast. */ | |
599 | static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb) | |
600 | { | |
601 | struct sk_buff *skbn; | |
602 | ||
603 | mutex_lock(&qrtr_node_lock); | |
604 | list_for_each_entry(node, &qrtr_all_nodes, item) { | |
605 | skbn = skb_clone(skb, GFP_KERNEL); | |
606 | if (!skbn) | |
607 | break; | |
608 | skb_set_owner_w(skbn, skb->sk); | |
609 | qrtr_node_enqueue(node, skbn); | |
610 | } | |
611 | mutex_unlock(&qrtr_node_lock); | |
612 | ||
613 | qrtr_local_enqueue(node, skb); | |
614 | ||
615 | return 0; | |
616 | } | |
617 | ||
618 | static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) | |
619 | { | |
620 | DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name); | |
621 | int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *); | |
622 | struct qrtr_sock *ipc = qrtr_sk(sock->sk); | |
623 | struct sock *sk = sock->sk; | |
624 | struct qrtr_node *node; | |
625 | struct qrtr_hdr *hdr; | |
626 | struct sk_buff *skb; | |
627 | size_t plen; | |
628 | int rc; | |
629 | ||
630 | if (msg->msg_flags & ~(MSG_DONTWAIT)) | |
631 | return -EINVAL; | |
632 | ||
633 | if (len > 65535) | |
634 | return -EMSGSIZE; | |
635 | ||
636 | lock_sock(sk); | |
637 | ||
638 | if (addr) { | |
639 | if (msg->msg_namelen < sizeof(*addr)) { | |
640 | release_sock(sk); | |
641 | return -EINVAL; | |
642 | } | |
643 | ||
644 | if (addr->sq_family != AF_QIPCRTR) { | |
645 | release_sock(sk); | |
646 | return -EINVAL; | |
647 | } | |
648 | ||
649 | rc = qrtr_autobind(sock); | |
650 | if (rc) { | |
651 | release_sock(sk); | |
652 | return rc; | |
653 | } | |
654 | } else if (sk->sk_state == TCP_ESTABLISHED) { | |
655 | addr = &ipc->peer; | |
656 | } else { | |
657 | release_sock(sk); | |
658 | return -ENOTCONN; | |
659 | } | |
660 | ||
661 | node = NULL; | |
662 | if (addr->sq_node == QRTR_NODE_BCAST) { | |
663 | enqueue_fn = qrtr_bcast_enqueue; | |
664 | } else if (addr->sq_node == ipc->us.sq_node) { | |
665 | enqueue_fn = qrtr_local_enqueue; | |
666 | } else { | |
667 | enqueue_fn = qrtr_node_enqueue; | |
668 | node = qrtr_node_lookup(addr->sq_node); | |
669 | if (!node) { | |
670 | release_sock(sk); | |
671 | return -ECONNRESET; | |
672 | } | |
673 | } | |
674 | ||
675 | plen = (len + 3) & ~3; | |
676 | skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_SIZE, | |
677 | msg->msg_flags & MSG_DONTWAIT, &rc); | |
678 | if (!skb) | |
679 | goto out_node; | |
680 | ||
681 | skb_reset_transport_header(skb); | |
682 | skb_put(skb, len + QRTR_HDR_SIZE); | |
683 | ||
684 | hdr = (struct qrtr_hdr *)skb_transport_header(skb); | |
685 | hdr->version = cpu_to_le32(QRTR_PROTO_VER); | |
686 | hdr->src_node_id = cpu_to_le32(ipc->us.sq_node); | |
687 | hdr->src_port_id = cpu_to_le32(ipc->us.sq_port); | |
688 | hdr->confirm_rx = cpu_to_le32(0); | |
689 | hdr->size = cpu_to_le32(len); | |
690 | hdr->dst_node_id = cpu_to_le32(addr->sq_node); | |
691 | hdr->dst_port_id = cpu_to_le32(addr->sq_port); | |
692 | ||
693 | rc = skb_copy_datagram_from_iter(skb, QRTR_HDR_SIZE, | |
694 | &msg->msg_iter, len); | |
695 | if (rc) { | |
696 | kfree_skb(skb); | |
697 | goto out_node; | |
698 | } | |
699 | ||
700 | if (plen != len) { | |
6f60f438 DC |
701 | rc = skb_pad(skb, plen - len); |
702 | if (rc) | |
703 | goto out_node; | |
bdabad3e CC |
704 | skb_put(skb, plen - len); |
705 | } | |
706 | ||
707 | if (ipc->us.sq_port == QRTR_PORT_CTRL) { | |
708 | if (len < 4) { | |
709 | rc = -EINVAL; | |
710 | kfree_skb(skb); | |
711 | goto out_node; | |
712 | } | |
713 | ||
714 | /* control messages already require the type as 'command' */ | |
715 | skb_copy_bits(skb, QRTR_HDR_SIZE, &hdr->type, 4); | |
716 | } else { | |
717 | hdr->type = cpu_to_le32(QRTR_TYPE_DATA); | |
718 | } | |
719 | ||
720 | rc = enqueue_fn(node, skb); | |
721 | if (rc >= 0) | |
722 | rc = len; | |
723 | ||
724 | out_node: | |
725 | qrtr_node_release(node); | |
726 | release_sock(sk); | |
727 | ||
728 | return rc; | |
729 | } | |
730 | ||
731 | static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg, | |
732 | size_t size, int flags) | |
733 | { | |
734 | DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name); | |
735 | const struct qrtr_hdr *phdr; | |
736 | struct sock *sk = sock->sk; | |
737 | struct sk_buff *skb; | |
738 | int copied, rc; | |
739 | ||
740 | lock_sock(sk); | |
741 | ||
742 | if (sock_flag(sk, SOCK_ZAPPED)) { | |
743 | release_sock(sk); | |
744 | return -EADDRNOTAVAIL; | |
745 | } | |
746 | ||
747 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | |
748 | flags & MSG_DONTWAIT, &rc); | |
749 | if (!skb) { | |
750 | release_sock(sk); | |
751 | return rc; | |
752 | } | |
753 | ||
754 | phdr = (const struct qrtr_hdr *)skb_transport_header(skb); | |
755 | copied = le32_to_cpu(phdr->size); | |
756 | if (copied > size) { | |
757 | copied = size; | |
758 | msg->msg_flags |= MSG_TRUNC; | |
759 | } | |
760 | ||
761 | rc = skb_copy_datagram_msg(skb, QRTR_HDR_SIZE, msg, copied); | |
762 | if (rc < 0) | |
763 | goto out; | |
764 | rc = copied; | |
765 | ||
766 | if (addr) { | |
767 | addr->sq_family = AF_QIPCRTR; | |
768 | addr->sq_node = le32_to_cpu(phdr->src_node_id); | |
769 | addr->sq_port = le32_to_cpu(phdr->src_port_id); | |
770 | msg->msg_namelen = sizeof(*addr); | |
771 | } | |
772 | ||
773 | out: | |
774 | skb_free_datagram(sk, skb); | |
775 | release_sock(sk); | |
776 | ||
777 | return rc; | |
778 | } | |
779 | ||
780 | static int qrtr_connect(struct socket *sock, struct sockaddr *saddr, | |
781 | int len, int flags) | |
782 | { | |
783 | DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr); | |
784 | struct qrtr_sock *ipc = qrtr_sk(sock->sk); | |
785 | struct sock *sk = sock->sk; | |
786 | int rc; | |
787 | ||
788 | if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR) | |
789 | return -EINVAL; | |
790 | ||
791 | lock_sock(sk); | |
792 | ||
793 | sk->sk_state = TCP_CLOSE; | |
794 | sock->state = SS_UNCONNECTED; | |
795 | ||
796 | rc = qrtr_autobind(sock); | |
797 | if (rc) { | |
798 | release_sock(sk); | |
799 | return rc; | |
800 | } | |
801 | ||
802 | ipc->peer = *addr; | |
803 | sock->state = SS_CONNECTED; | |
804 | sk->sk_state = TCP_ESTABLISHED; | |
805 | ||
806 | release_sock(sk); | |
807 | ||
808 | return 0; | |
809 | } | |
810 | ||
811 | static int qrtr_getname(struct socket *sock, struct sockaddr *saddr, | |
812 | int *len, int peer) | |
813 | { | |
814 | struct qrtr_sock *ipc = qrtr_sk(sock->sk); | |
815 | struct sockaddr_qrtr qaddr; | |
816 | struct sock *sk = sock->sk; | |
817 | ||
818 | lock_sock(sk); | |
819 | if (peer) { | |
820 | if (sk->sk_state != TCP_ESTABLISHED) { | |
821 | release_sock(sk); | |
822 | return -ENOTCONN; | |
823 | } | |
824 | ||
825 | qaddr = ipc->peer; | |
826 | } else { | |
827 | qaddr = ipc->us; | |
828 | } | |
829 | release_sock(sk); | |
830 | ||
831 | *len = sizeof(qaddr); | |
832 | qaddr.sq_family = AF_QIPCRTR; | |
833 | ||
834 | memcpy(saddr, &qaddr, sizeof(qaddr)); | |
835 | ||
836 | return 0; | |
837 | } | |
838 | ||
839 | static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |
840 | { | |
841 | void __user *argp = (void __user *)arg; | |
842 | struct qrtr_sock *ipc = qrtr_sk(sock->sk); | |
843 | struct sock *sk = sock->sk; | |
844 | struct sockaddr_qrtr *sq; | |
845 | struct sk_buff *skb; | |
846 | struct ifreq ifr; | |
847 | long len = 0; | |
848 | int rc = 0; | |
849 | ||
850 | lock_sock(sk); | |
851 | ||
852 | switch (cmd) { | |
853 | case TIOCOUTQ: | |
854 | len = sk->sk_sndbuf - sk_wmem_alloc_get(sk); | |
855 | if (len < 0) | |
856 | len = 0; | |
857 | rc = put_user(len, (int __user *)argp); | |
858 | break; | |
859 | case TIOCINQ: | |
860 | skb = skb_peek(&sk->sk_receive_queue); | |
861 | if (skb) | |
862 | len = skb->len - QRTR_HDR_SIZE; | |
863 | rc = put_user(len, (int __user *)argp); | |
864 | break; | |
865 | case SIOCGIFADDR: | |
866 | if (copy_from_user(&ifr, argp, sizeof(ifr))) { | |
867 | rc = -EFAULT; | |
868 | break; | |
869 | } | |
870 | ||
871 | sq = (struct sockaddr_qrtr *)&ifr.ifr_addr; | |
872 | *sq = ipc->us; | |
873 | if (copy_to_user(argp, &ifr, sizeof(ifr))) { | |
874 | rc = -EFAULT; | |
875 | break; | |
876 | } | |
877 | break; | |
878 | case SIOCGSTAMP: | |
879 | rc = sock_get_timestamp(sk, argp); | |
880 | break; | |
881 | case SIOCADDRT: | |
882 | case SIOCDELRT: | |
883 | case SIOCSIFADDR: | |
884 | case SIOCGIFDSTADDR: | |
885 | case SIOCSIFDSTADDR: | |
886 | case SIOCGIFBRDADDR: | |
887 | case SIOCSIFBRDADDR: | |
888 | case SIOCGIFNETMASK: | |
889 | case SIOCSIFNETMASK: | |
890 | rc = -EINVAL; | |
891 | break; | |
892 | default: | |
893 | rc = -ENOIOCTLCMD; | |
894 | break; | |
895 | } | |
896 | ||
897 | release_sock(sk); | |
898 | ||
899 | return rc; | |
900 | } | |
901 | ||
902 | static int qrtr_release(struct socket *sock) | |
903 | { | |
904 | struct sock *sk = sock->sk; | |
905 | struct qrtr_sock *ipc; | |
906 | ||
907 | if (!sk) | |
908 | return 0; | |
909 | ||
910 | lock_sock(sk); | |
911 | ||
912 | ipc = qrtr_sk(sk); | |
913 | sk->sk_shutdown = SHUTDOWN_MASK; | |
914 | if (!sock_flag(sk, SOCK_DEAD)) | |
915 | sk->sk_state_change(sk); | |
916 | ||
917 | sock_set_flag(sk, SOCK_DEAD); | |
918 | sock->sk = NULL; | |
919 | ||
920 | if (!sock_flag(sk, SOCK_ZAPPED)) | |
921 | qrtr_port_remove(ipc); | |
922 | ||
923 | skb_queue_purge(&sk->sk_receive_queue); | |
924 | ||
925 | release_sock(sk); | |
926 | sock_put(sk); | |
927 | ||
928 | return 0; | |
929 | } | |
930 | ||
931 | static const struct proto_ops qrtr_proto_ops = { | |
932 | .owner = THIS_MODULE, | |
933 | .family = AF_QIPCRTR, | |
934 | .bind = qrtr_bind, | |
935 | .connect = qrtr_connect, | |
936 | .socketpair = sock_no_socketpair, | |
937 | .accept = sock_no_accept, | |
938 | .listen = sock_no_listen, | |
939 | .sendmsg = qrtr_sendmsg, | |
940 | .recvmsg = qrtr_recvmsg, | |
941 | .getname = qrtr_getname, | |
942 | .ioctl = qrtr_ioctl, | |
943 | .poll = datagram_poll, | |
944 | .shutdown = sock_no_shutdown, | |
945 | .setsockopt = sock_no_setsockopt, | |
946 | .getsockopt = sock_no_getsockopt, | |
947 | .release = qrtr_release, | |
948 | .mmap = sock_no_mmap, | |
949 | .sendpage = sock_no_sendpage, | |
950 | }; | |
951 | ||
952 | static struct proto qrtr_proto = { | |
953 | .name = "QIPCRTR", | |
954 | .owner = THIS_MODULE, | |
955 | .obj_size = sizeof(struct qrtr_sock), | |
956 | }; | |
957 | ||
958 | static int qrtr_create(struct net *net, struct socket *sock, | |
959 | int protocol, int kern) | |
960 | { | |
961 | struct qrtr_sock *ipc; | |
962 | struct sock *sk; | |
963 | ||
964 | if (sock->type != SOCK_DGRAM) | |
965 | return -EPROTOTYPE; | |
966 | ||
967 | sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern); | |
968 | if (!sk) | |
969 | return -ENOMEM; | |
970 | ||
971 | sock_set_flag(sk, SOCK_ZAPPED); | |
972 | ||
973 | sock_init_data(sock, sk); | |
974 | sock->ops = &qrtr_proto_ops; | |
975 | ||
976 | ipc = qrtr_sk(sk); | |
977 | ipc->us.sq_family = AF_QIPCRTR; | |
978 | ipc->us.sq_node = qrtr_local_nid; | |
979 | ipc->us.sq_port = 0; | |
980 | ||
981 | return 0; | |
982 | } | |
983 | ||
984 | static const struct nla_policy qrtr_policy[IFA_MAX + 1] = { | |
985 | [IFA_LOCAL] = { .type = NLA_U32 }, | |
986 | }; | |
987 | ||
c21ef3e3 DA |
988 | static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, |
989 | struct netlink_ext_ack *extack) | |
bdabad3e CC |
990 | { |
991 | struct nlattr *tb[IFA_MAX + 1]; | |
992 | struct ifaddrmsg *ifm; | |
993 | int rc; | |
994 | ||
995 | if (!netlink_capable(skb, CAP_NET_ADMIN)) | |
996 | return -EPERM; | |
997 | ||
998 | if (!netlink_capable(skb, CAP_SYS_ADMIN)) | |
999 | return -EPERM; | |
1000 | ||
1001 | ASSERT_RTNL(); | |
1002 | ||
c21ef3e3 | 1003 | rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy, extack); |
bdabad3e CC |
1004 | if (rc < 0) |
1005 | return rc; | |
1006 | ||
1007 | ifm = nlmsg_data(nlh); | |
1008 | if (!tb[IFA_LOCAL]) | |
1009 | return -EINVAL; | |
1010 | ||
1011 | qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]); | |
1012 | return 0; | |
1013 | } | |
1014 | ||
1015 | static const struct net_proto_family qrtr_family = { | |
1016 | .owner = THIS_MODULE, | |
1017 | .family = AF_QIPCRTR, | |
1018 | .create = qrtr_create, | |
1019 | }; | |
1020 | ||
1021 | static int __init qrtr_proto_init(void) | |
1022 | { | |
1023 | int rc; | |
1024 | ||
1025 | rc = proto_register(&qrtr_proto, 1); | |
1026 | if (rc) | |
1027 | return rc; | |
1028 | ||
1029 | rc = sock_register(&qrtr_family); | |
1030 | if (rc) { | |
1031 | proto_unregister(&qrtr_proto); | |
1032 | return rc; | |
1033 | } | |
1034 | ||
1035 | rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, NULL); | |
1036 | ||
1037 | return 0; | |
1038 | } | |
1039 | module_init(qrtr_proto_init); | |
1040 | ||
1041 | static void __exit qrtr_proto_fini(void) | |
1042 | { | |
1043 | rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR); | |
1044 | sock_unregister(qrtr_family.family); | |
1045 | proto_unregister(&qrtr_proto); | |
1046 | } | |
1047 | module_exit(qrtr_proto_fini); | |
1048 | ||
1049 | MODULE_DESCRIPTION("Qualcomm IPC-router driver"); | |
1050 | MODULE_LICENSE("GPL v2"); |