]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/kcm/kcmsock.c
kcm: Splice support
[mirror_ubuntu-artful-kernel.git] / net / kcm / kcmsock.c
CommitLineData
ab7ac4eb
TH
1#include <linux/bpf.h>
2#include <linux/errno.h>
3#include <linux/errqueue.h>
4#include <linux/file.h>
5#include <linux/in.h>
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/net.h>
9#include <linux/netdevice.h>
10#include <linux/poll.h>
11#include <linux/rculist.h>
12#include <linux/skbuff.h>
13#include <linux/socket.h>
14#include <linux/uaccess.h>
15#include <linux/workqueue.h>
16#include <net/kcm.h>
17#include <net/netns/generic.h>
18#include <net/sock.h>
19#include <net/tcp.h>
20#include <uapi/linux/kcm.h>
21
22unsigned int kcm_net_id;
23
24static struct kmem_cache *kcm_psockp __read_mostly;
25static struct kmem_cache *kcm_muxp __read_mostly;
26static struct workqueue_struct *kcm_wq;
27
28static inline struct kcm_sock *kcm_sk(const struct sock *sk)
29{
30 return (struct kcm_sock *)sk;
31}
32
33static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
34{
35 return (struct kcm_tx_msg *)skb->cb;
36}
37
38static inline struct kcm_rx_msg *kcm_rx_msg(struct sk_buff *skb)
39{
40 return (struct kcm_rx_msg *)((void *)skb->cb +
41 offsetof(struct qdisc_skb_cb, data));
42}
43
44static void report_csk_error(struct sock *csk, int err)
45{
46 csk->sk_err = EPIPE;
47 csk->sk_error_report(csk);
48}
49
50/* Callback lock held */
51static void kcm_abort_rx_psock(struct kcm_psock *psock, int err,
52 struct sk_buff *skb)
53{
54 struct sock *csk = psock->sk;
55
56 /* Unrecoverable error in receive */
57
58 if (psock->rx_stopped)
59 return;
60
61 psock->rx_stopped = 1;
cd6e111b 62 KCM_STATS_INCR(psock->stats.rx_aborts);
ab7ac4eb
TH
63
64 /* Report an error on the lower socket */
65 report_csk_error(csk, err);
66}
67
68static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
69 bool wakeup_kcm)
70{
71 struct sock *csk = psock->sk;
72 struct kcm_mux *mux = psock->mux;
73
74 /* Unrecoverable error in transmit */
75
76 spin_lock_bh(&mux->lock);
77
78 if (psock->tx_stopped) {
79 spin_unlock_bh(&mux->lock);
80 return;
81 }
82
83 psock->tx_stopped = 1;
cd6e111b 84 KCM_STATS_INCR(psock->stats.tx_aborts);
ab7ac4eb
TH
85
86 if (!psock->tx_kcm) {
87 /* Take off psocks_avail list */
88 list_del(&psock->psock_avail_list);
89 } else if (wakeup_kcm) {
90 /* In this case psock is being aborted while outside of
91 * write_msgs and psock is reserved. Schedule tx_work
92 * to handle the failure there. Need to commit tx_stopped
93 * before queuing work.
94 */
95 smp_mb();
96
97 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
98 }
99
100 spin_unlock_bh(&mux->lock);
101
102 /* Report error on lower socket */
103 report_csk_error(csk, err);
104}
105
cd6e111b
TH
106/* RX mux lock held. */
107static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
108 struct kcm_psock *psock)
109{
110 KCM_STATS_ADD(mux->stats.rx_bytes,
111 psock->stats.rx_bytes - psock->saved_rx_bytes);
112 mux->stats.rx_msgs +=
113 psock->stats.rx_msgs - psock->saved_rx_msgs;
114 psock->saved_rx_msgs = psock->stats.rx_msgs;
115 psock->saved_rx_bytes = psock->stats.rx_bytes;
116}
117
118static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
119 struct kcm_psock *psock)
120{
121 KCM_STATS_ADD(mux->stats.tx_bytes,
122 psock->stats.tx_bytes - psock->saved_tx_bytes);
123 mux->stats.tx_msgs +=
124 psock->stats.tx_msgs - psock->saved_tx_msgs;
125 psock->saved_tx_msgs = psock->stats.tx_msgs;
126 psock->saved_tx_bytes = psock->stats.tx_bytes;
127}
128
ab7ac4eb
TH
129static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
130
131/* KCM is ready to receive messages on its queue-- either the KCM is new or
132 * has become unblocked after being blocked on full socket buffer. Queue any
133 * pending ready messages on a psock. RX mux lock held.
134 */
135static void kcm_rcv_ready(struct kcm_sock *kcm)
136{
137 struct kcm_mux *mux = kcm->mux;
138 struct kcm_psock *psock;
139 struct sk_buff *skb;
140
141 if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
142 return;
143
144 while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
145 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
146 /* Assuming buffer limit has been reached */
147 skb_queue_head(&mux->rx_hold_queue, skb);
148 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
149 return;
150 }
151 }
152
153 while (!list_empty(&mux->psocks_ready)) {
154 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
155 psock_ready_list);
156
157 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
158 /* Assuming buffer limit has been reached */
159 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
160 return;
161 }
162
163 /* Consumed the ready message on the psock. Schedule rx_work to
164 * get more messages.
165 */
166 list_del(&psock->psock_ready_list);
167 psock->ready_rx_msg = NULL;
168
169 /* Commit clearing of ready_rx_msg for queuing work */
170 smp_mb();
171
172 queue_work(kcm_wq, &psock->rx_work);
173 }
174
175 /* Buffer limit is okay now, add to ready list */
176 list_add_tail(&kcm->wait_rx_list,
177 &kcm->mux->kcm_rx_waiters);
178 kcm->rx_wait = true;
179}
180
181static void kcm_rfree(struct sk_buff *skb)
182{
183 struct sock *sk = skb->sk;
184 struct kcm_sock *kcm = kcm_sk(sk);
185 struct kcm_mux *mux = kcm->mux;
186 unsigned int len = skb->truesize;
187
188 sk_mem_uncharge(sk, len);
189 atomic_sub(len, &sk->sk_rmem_alloc);
190
191 /* For reading rx_wait and rx_psock without holding lock */
192 smp_mb__after_atomic();
193
194 if (!kcm->rx_wait && !kcm->rx_psock &&
195 sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
196 spin_lock_bh(&mux->rx_lock);
197 kcm_rcv_ready(kcm);
198 spin_unlock_bh(&mux->rx_lock);
199 }
200}
201
202static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
203{
204 struct sk_buff_head *list = &sk->sk_receive_queue;
205
206 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
207 return -ENOMEM;
208
209 if (!sk_rmem_schedule(sk, skb, skb->truesize))
210 return -ENOBUFS;
211
212 skb->dev = NULL;
213
214 skb_orphan(skb);
215 skb->sk = sk;
216 skb->destructor = kcm_rfree;
217 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
218 sk_mem_charge(sk, skb->truesize);
219
220 skb_queue_tail(list, skb);
221
222 if (!sock_flag(sk, SOCK_DEAD))
223 sk->sk_data_ready(sk);
224
225 return 0;
226}
227
228/* Requeue received messages for a kcm socket to other kcm sockets. This is
229 * called with a kcm socket is receive disabled.
230 * RX mux lock held.
231 */
232static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
233{
234 struct sk_buff *skb;
235 struct kcm_sock *kcm;
236
237 while ((skb = __skb_dequeue(head))) {
238 /* Reset destructor to avoid calling kcm_rcv_ready */
239 skb->destructor = sock_rfree;
240 skb_orphan(skb);
241try_again:
242 if (list_empty(&mux->kcm_rx_waiters)) {
243 skb_queue_tail(&mux->rx_hold_queue, skb);
244 continue;
245 }
246
247 kcm = list_first_entry(&mux->kcm_rx_waiters,
248 struct kcm_sock, wait_rx_list);
249
250 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
251 /* Should mean socket buffer full */
252 list_del(&kcm->wait_rx_list);
253 kcm->rx_wait = false;
254
255 /* Commit rx_wait to read in kcm_free */
256 smp_wmb();
257
258 goto try_again;
259 }
260 }
261}
262
263/* Lower sock lock held */
264static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
265 struct sk_buff *head)
266{
267 struct kcm_mux *mux = psock->mux;
268 struct kcm_sock *kcm;
269
270 WARN_ON(psock->ready_rx_msg);
271
272 if (psock->rx_kcm)
273 return psock->rx_kcm;
274
275 spin_lock_bh(&mux->rx_lock);
276
277 if (psock->rx_kcm) {
278 spin_unlock_bh(&mux->rx_lock);
279 return psock->rx_kcm;
280 }
281
cd6e111b
TH
282 kcm_update_rx_mux_stats(mux, psock);
283
ab7ac4eb
TH
284 if (list_empty(&mux->kcm_rx_waiters)) {
285 psock->ready_rx_msg = head;
286 list_add_tail(&psock->psock_ready_list,
287 &mux->psocks_ready);
288 spin_unlock_bh(&mux->rx_lock);
289 return NULL;
290 }
291
292 kcm = list_first_entry(&mux->kcm_rx_waiters,
293 struct kcm_sock, wait_rx_list);
294 list_del(&kcm->wait_rx_list);
295 kcm->rx_wait = false;
296
297 psock->rx_kcm = kcm;
298 kcm->rx_psock = psock;
299
300 spin_unlock_bh(&mux->rx_lock);
301
302 return kcm;
303}
304
305static void kcm_done(struct kcm_sock *kcm);
306
307static void kcm_done_work(struct work_struct *w)
308{
309 kcm_done(container_of(w, struct kcm_sock, done_work));
310}
311
312/* Lower sock held */
313static void unreserve_rx_kcm(struct kcm_psock *psock,
314 bool rcv_ready)
315{
316 struct kcm_sock *kcm = psock->rx_kcm;
317 struct kcm_mux *mux = psock->mux;
318
319 if (!kcm)
320 return;
321
322 spin_lock_bh(&mux->rx_lock);
323
324 psock->rx_kcm = NULL;
325 kcm->rx_psock = NULL;
326
327 /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
328 * kcm_rfree
329 */
330 smp_mb();
331
332 if (unlikely(kcm->done)) {
333 spin_unlock_bh(&mux->rx_lock);
334
335 /* Need to run kcm_done in a task since we need to qcquire
336 * callback locks which may already be held here.
337 */
338 INIT_WORK(&kcm->done_work, kcm_done_work);
339 schedule_work(&kcm->done_work);
340 return;
341 }
342
343 if (unlikely(kcm->rx_disabled)) {
344 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
345 } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
346 /* Check for degenerative race with rx_wait that all
347 * data was dequeued (accounted for in kcm_rfree).
348 */
349 kcm_rcv_ready(kcm);
350 }
351 spin_unlock_bh(&mux->rx_lock);
352}
353
354/* Macro to invoke filter function. */
355#define KCM_RUN_FILTER(prog, ctx) \
356 (*prog->bpf_func)(ctx, prog->insnsi)
357
358/* Lower socket lock held */
359static int kcm_tcp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
360 unsigned int orig_offset, size_t orig_len)
361{
362 struct kcm_psock *psock = (struct kcm_psock *)desc->arg.data;
363 struct kcm_rx_msg *rxm;
364 struct kcm_sock *kcm;
365 struct sk_buff *head, *skb;
366 size_t eaten = 0, cand_len;
367 ssize_t extra;
368 int err;
369 bool cloned_orig = false;
370
371 if (psock->ready_rx_msg)
372 return 0;
373
374 head = psock->rx_skb_head;
375 if (head) {
376 /* Message already in progress */
377
378 if (unlikely(orig_offset)) {
379 /* Getting data with a non-zero offset when a message is
380 * in progress is not expected. If it does happen, we
381 * need to clone and pull since we can't deal with
382 * offsets in the skbs for a message expect in the head.
383 */
384 orig_skb = skb_clone(orig_skb, GFP_ATOMIC);
385 if (!orig_skb) {
cd6e111b 386 KCM_STATS_INCR(psock->stats.rx_mem_fail);
ab7ac4eb
TH
387 desc->error = -ENOMEM;
388 return 0;
389 }
390 if (!pskb_pull(orig_skb, orig_offset)) {
cd6e111b 391 KCM_STATS_INCR(psock->stats.rx_mem_fail);
ab7ac4eb
TH
392 kfree_skb(orig_skb);
393 desc->error = -ENOMEM;
394 return 0;
395 }
396 cloned_orig = true;
397 orig_offset = 0;
398 }
399
400 if (!psock->rx_skb_nextp) {
401 /* We are going to append to the frags_list of head.
402 * Need to unshare the frag_list.
403 */
404 err = skb_unclone(head, GFP_ATOMIC);
405 if (err) {
cd6e111b 406 KCM_STATS_INCR(psock->stats.rx_mem_fail);
ab7ac4eb
TH
407 desc->error = err;
408 return 0;
409 }
410
411 if (unlikely(skb_shinfo(head)->frag_list)) {
412 /* We can't append to an sk_buff that already
413 * has a frag_list. We create a new head, point
414 * the frag_list of that to the old head, and
415 * then are able to use the old head->next for
416 * appending to the message.
417 */
418 if (WARN_ON(head->next)) {
419 desc->error = -EINVAL;
420 return 0;
421 }
422
423 skb = alloc_skb(0, GFP_ATOMIC);
424 if (!skb) {
cd6e111b 425 KCM_STATS_INCR(psock->stats.rx_mem_fail);
ab7ac4eb
TH
426 desc->error = -ENOMEM;
427 return 0;
428 }
429 skb->len = head->len;
430 skb->data_len = head->len;
431 skb->truesize = head->truesize;
432 *kcm_rx_msg(skb) = *kcm_rx_msg(head);
433 psock->rx_skb_nextp = &head->next;
434 skb_shinfo(skb)->frag_list = head;
435 psock->rx_skb_head = skb;
436 head = skb;
437 } else {
438 psock->rx_skb_nextp =
439 &skb_shinfo(head)->frag_list;
440 }
441 }
442 }
443
444 while (eaten < orig_len) {
445 /* Always clone since we will consume something */
446 skb = skb_clone(orig_skb, GFP_ATOMIC);
447 if (!skb) {
cd6e111b 448 KCM_STATS_INCR(psock->stats.rx_mem_fail);
ab7ac4eb
TH
449 desc->error = -ENOMEM;
450 break;
451 }
452
453 cand_len = orig_len - eaten;
454
455 head = psock->rx_skb_head;
456 if (!head) {
457 head = skb;
458 psock->rx_skb_head = head;
459 /* Will set rx_skb_nextp on next packet if needed */
460 psock->rx_skb_nextp = NULL;
461 rxm = kcm_rx_msg(head);
462 memset(rxm, 0, sizeof(*rxm));
463 rxm->offset = orig_offset + eaten;
464 } else {
465 /* Unclone since we may be appending to an skb that we
466 * already share a frag_list with.
467 */
468 err = skb_unclone(skb, GFP_ATOMIC);
469 if (err) {
cd6e111b 470 KCM_STATS_INCR(psock->stats.rx_mem_fail);
ab7ac4eb
TH
471 desc->error = err;
472 break;
473 }
474
475 rxm = kcm_rx_msg(head);
476 *psock->rx_skb_nextp = skb;
477 psock->rx_skb_nextp = &skb->next;
478 head->data_len += skb->len;
479 head->len += skb->len;
480 head->truesize += skb->truesize;
481 }
482
483 if (!rxm->full_len) {
484 ssize_t len;
485
486 len = KCM_RUN_FILTER(psock->bpf_prog, head);
487
488 if (!len) {
489 /* Need more header to determine length */
490 rxm->accum_len += cand_len;
491 eaten += cand_len;
cd6e111b 492 KCM_STATS_INCR(psock->stats.rx_need_more_hdr);
ab7ac4eb
TH
493 WARN_ON(eaten != orig_len);
494 break;
495 } else if (len <= (ssize_t)head->len -
496 skb->len - rxm->offset) {
497 /* Length must be into new skb (and also
498 * greater than zero)
499 */
cd6e111b 500 KCM_STATS_INCR(psock->stats.rx_bad_hdr_len);
ab7ac4eb
TH
501 desc->error = -EPROTO;
502 psock->rx_skb_head = NULL;
503 kcm_abort_rx_psock(psock, EPROTO, head);
504 break;
505 }
506
507 rxm->full_len = len;
508 }
509
510 extra = (ssize_t)(rxm->accum_len + cand_len) - rxm->full_len;
511
512 if (extra < 0) {
513 /* Message not complete yet. */
514 rxm->accum_len += cand_len;
515 eaten += cand_len;
516 WARN_ON(eaten != orig_len);
517 break;
518 }
519
520 /* Positive extra indicates ore bytes than needed for the
521 * message
522 */
523
524 WARN_ON(extra > cand_len);
525
526 eaten += (cand_len - extra);
527
528 /* Hurray, we have a new message! */
529 psock->rx_skb_head = NULL;
cd6e111b 530 KCM_STATS_INCR(psock->stats.rx_msgs);
ab7ac4eb
TH
531
532try_queue:
533 kcm = reserve_rx_kcm(psock, head);
534 if (!kcm) {
535 /* Unable to reserve a KCM, message is held in psock. */
536 break;
537 }
538
539 if (kcm_queue_rcv_skb(&kcm->sk, head)) {
540 /* Should mean socket buffer full */
541 unreserve_rx_kcm(psock, false);
542 goto try_queue;
543 }
544 }
545
546 if (cloned_orig)
547 kfree_skb(orig_skb);
548
cd6e111b
TH
549 KCM_STATS_ADD(psock->stats.rx_bytes, eaten);
550
ab7ac4eb
TH
551 return eaten;
552}
553
554/* Called with lock held on lower socket */
555static int psock_tcp_read_sock(struct kcm_psock *psock)
556{
557 read_descriptor_t desc;
558
559 desc.arg.data = psock;
560 desc.error = 0;
561 desc.count = 1; /* give more than one skb per call */
562
563 /* sk should be locked here, so okay to do tcp_read_sock */
564 tcp_read_sock(psock->sk, &desc, kcm_tcp_recv);
565
566 unreserve_rx_kcm(psock, true);
567
568 return desc.error;
569}
570
571/* Lower sock lock held */
572static void psock_tcp_data_ready(struct sock *sk)
573{
574 struct kcm_psock *psock;
575
576 read_lock_bh(&sk->sk_callback_lock);
577
578 psock = (struct kcm_psock *)sk->sk_user_data;
579 if (unlikely(!psock || psock->rx_stopped))
580 goto out;
581
582 if (psock->ready_rx_msg)
583 goto out;
584
585 if (psock_tcp_read_sock(psock) == -ENOMEM)
586 queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
587
588out:
589 read_unlock_bh(&sk->sk_callback_lock);
590}
591
592static void do_psock_rx_work(struct kcm_psock *psock)
593{
594 read_descriptor_t rd_desc;
595 struct sock *csk = psock->sk;
596
597 /* We need the read lock to synchronize with psock_tcp_data_ready. We
598 * need the socket lock for calling tcp_read_sock.
599 */
600 lock_sock(csk);
601 read_lock_bh(&csk->sk_callback_lock);
602
603 if (unlikely(csk->sk_user_data != psock))
604 goto out;
605
606 if (unlikely(psock->rx_stopped))
607 goto out;
608
609 if (psock->ready_rx_msg)
610 goto out;
611
612 rd_desc.arg.data = psock;
613
614 if (psock_tcp_read_sock(psock) == -ENOMEM)
615 queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
616
617out:
618 read_unlock_bh(&csk->sk_callback_lock);
619 release_sock(csk);
620}
621
622static void psock_rx_work(struct work_struct *w)
623{
624 do_psock_rx_work(container_of(w, struct kcm_psock, rx_work));
625}
626
627static void psock_rx_delayed_work(struct work_struct *w)
628{
629 do_psock_rx_work(container_of(w, struct kcm_psock,
630 rx_delayed_work.work));
631}
632
633static void psock_tcp_state_change(struct sock *sk)
634{
635 /* TCP only does a POLLIN for a half close. Do a POLLHUP here
636 * since application will normally not poll with POLLIN
637 * on the TCP sockets.
638 */
639
640 report_csk_error(sk, EPIPE);
641}
642
643static void psock_tcp_write_space(struct sock *sk)
644{
645 struct kcm_psock *psock;
646 struct kcm_mux *mux;
647 struct kcm_sock *kcm;
648
649 read_lock_bh(&sk->sk_callback_lock);
650
651 psock = (struct kcm_psock *)sk->sk_user_data;
652 if (unlikely(!psock))
653 goto out;
654
655 mux = psock->mux;
656
657 spin_lock_bh(&mux->lock);
658
659 /* Check if the socket is reserved so someone is waiting for sending. */
660 kcm = psock->tx_kcm;
661 if (kcm)
662 queue_work(kcm_wq, &kcm->tx_work);
663
664 spin_unlock_bh(&mux->lock);
665out:
666 read_unlock_bh(&sk->sk_callback_lock);
667}
668
669static void unreserve_psock(struct kcm_sock *kcm);
670
671/* kcm sock is locked. */
672static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
673{
674 struct kcm_mux *mux = kcm->mux;
675 struct kcm_psock *psock;
676
677 psock = kcm->tx_psock;
678
679 smp_rmb(); /* Must read tx_psock before tx_wait */
680
681 if (psock) {
682 WARN_ON(kcm->tx_wait);
683 if (unlikely(psock->tx_stopped))
684 unreserve_psock(kcm);
685 else
686 return kcm->tx_psock;
687 }
688
689 spin_lock_bh(&mux->lock);
690
691 /* Check again under lock to see if psock was reserved for this
692 * psock via psock_unreserve.
693 */
694 psock = kcm->tx_psock;
695 if (unlikely(psock)) {
696 WARN_ON(kcm->tx_wait);
697 spin_unlock_bh(&mux->lock);
698 return kcm->tx_psock;
699 }
700
701 if (!list_empty(&mux->psocks_avail)) {
702 psock = list_first_entry(&mux->psocks_avail,
703 struct kcm_psock,
704 psock_avail_list);
705 list_del(&psock->psock_avail_list);
706 if (kcm->tx_wait) {
707 list_del(&kcm->wait_psock_list);
708 kcm->tx_wait = false;
709 }
710 kcm->tx_psock = psock;
711 psock->tx_kcm = kcm;
cd6e111b 712 KCM_STATS_INCR(psock->stats.reserved);
ab7ac4eb
TH
713 } else if (!kcm->tx_wait) {
714 list_add_tail(&kcm->wait_psock_list,
715 &mux->kcm_tx_waiters);
716 kcm->tx_wait = true;
717 }
718
719 spin_unlock_bh(&mux->lock);
720
721 return psock;
722}
723
724/* mux lock held */
725static void psock_now_avail(struct kcm_psock *psock)
726{
727 struct kcm_mux *mux = psock->mux;
728 struct kcm_sock *kcm;
729
730 if (list_empty(&mux->kcm_tx_waiters)) {
731 list_add_tail(&psock->psock_avail_list,
732 &mux->psocks_avail);
733 } else {
734 kcm = list_first_entry(&mux->kcm_tx_waiters,
735 struct kcm_sock,
736 wait_psock_list);
737 list_del(&kcm->wait_psock_list);
738 kcm->tx_wait = false;
739 psock->tx_kcm = kcm;
740
741 /* Commit before changing tx_psock since that is read in
742 * reserve_psock before queuing work.
743 */
744 smp_mb();
745
746 kcm->tx_psock = psock;
cd6e111b 747 KCM_STATS_INCR(psock->stats.reserved);
ab7ac4eb
TH
748 queue_work(kcm_wq, &kcm->tx_work);
749 }
750}
751
752/* kcm sock is locked. */
753static void unreserve_psock(struct kcm_sock *kcm)
754{
755 struct kcm_psock *psock;
756 struct kcm_mux *mux = kcm->mux;
757
758 spin_lock_bh(&mux->lock);
759
760 psock = kcm->tx_psock;
761
762 if (WARN_ON(!psock)) {
763 spin_unlock_bh(&mux->lock);
764 return;
765 }
766
767 smp_rmb(); /* Read tx_psock before tx_wait */
768
cd6e111b
TH
769 kcm_update_tx_mux_stats(mux, psock);
770
ab7ac4eb
TH
771 WARN_ON(kcm->tx_wait);
772
773 kcm->tx_psock = NULL;
774 psock->tx_kcm = NULL;
cd6e111b 775 KCM_STATS_INCR(psock->stats.unreserved);
ab7ac4eb
TH
776
777 if (unlikely(psock->tx_stopped)) {
778 if (psock->done) {
779 /* Deferred free */
780 list_del(&psock->psock_list);
781 mux->psocks_cnt--;
782 sock_put(psock->sk);
783 fput(psock->sk->sk_socket->file);
784 kmem_cache_free(kcm_psockp, psock);
785 }
786
787 /* Don't put back on available list */
788
789 spin_unlock_bh(&mux->lock);
790
791 return;
792 }
793
794 psock_now_avail(psock);
795
796 spin_unlock_bh(&mux->lock);
797}
798
cd6e111b
TH
799static void kcm_report_tx_retry(struct kcm_sock *kcm)
800{
801 struct kcm_mux *mux = kcm->mux;
802
803 spin_lock_bh(&mux->lock);
804 KCM_STATS_INCR(mux->stats.tx_retries);
805 spin_unlock_bh(&mux->lock);
806}
807
ab7ac4eb
TH
808/* Write any messages ready on the kcm socket. Called with kcm sock lock
809 * held. Return bytes actually sent or error.
810 */
811static int kcm_write_msgs(struct kcm_sock *kcm)
812{
813 struct sock *sk = &kcm->sk;
814 struct kcm_psock *psock;
815 struct sk_buff *skb, *head;
816 struct kcm_tx_msg *txm;
817 unsigned short fragidx, frag_offset;
818 unsigned int sent, total_sent = 0;
819 int ret = 0;
820
821 kcm->tx_wait_more = false;
822 psock = kcm->tx_psock;
823 if (unlikely(psock && psock->tx_stopped)) {
824 /* A reserved psock was aborted asynchronously. Unreserve
825 * it and we'll retry the message.
826 */
827 unreserve_psock(kcm);
cd6e111b 828 kcm_report_tx_retry(kcm);
ab7ac4eb
TH
829 if (skb_queue_empty(&sk->sk_write_queue))
830 return 0;
831
832 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
833
834 } else if (skb_queue_empty(&sk->sk_write_queue)) {
835 return 0;
836 }
837
838 head = skb_peek(&sk->sk_write_queue);
839 txm = kcm_tx_msg(head);
840
841 if (txm->sent) {
842 /* Send of first skbuff in queue already in progress */
843 if (WARN_ON(!psock)) {
844 ret = -EINVAL;
845 goto out;
846 }
847 sent = txm->sent;
848 frag_offset = txm->frag_offset;
849 fragidx = txm->fragidx;
850 skb = txm->frag_skb;
851
852 goto do_frag;
853 }
854
855try_again:
856 psock = reserve_psock(kcm);
857 if (!psock)
858 goto out;
859
860 do {
861 skb = head;
862 txm = kcm_tx_msg(head);
863 sent = 0;
864
865do_frag_list:
866 if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
867 ret = -EINVAL;
868 goto out;
869 }
870
871 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
872 fragidx++) {
873 skb_frag_t *frag;
874
875 frag_offset = 0;
876do_frag:
877 frag = &skb_shinfo(skb)->frags[fragidx];
878 if (WARN_ON(!frag->size)) {
879 ret = -EINVAL;
880 goto out;
881 }
882
883 ret = kernel_sendpage(psock->sk->sk_socket,
884 frag->page.p,
885 frag->page_offset + frag_offset,
886 frag->size - frag_offset,
887 MSG_DONTWAIT);
888 if (ret <= 0) {
889 if (ret == -EAGAIN) {
890 /* Save state to try again when there's
891 * write space on the socket
892 */
893 txm->sent = sent;
894 txm->frag_offset = frag_offset;
895 txm->fragidx = fragidx;
896 txm->frag_skb = skb;
897
898 ret = 0;
899 goto out;
900 }
901
902 /* Hard failure in sending message, abort this
903 * psock since it has lost framing
904 * synchonization and retry sending the
905 * message from the beginning.
906 */
907 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
908 true);
909 unreserve_psock(kcm);
910
911 txm->sent = 0;
cd6e111b 912 kcm_report_tx_retry(kcm);
ab7ac4eb
TH
913 ret = 0;
914
915 goto try_again;
916 }
917
918 sent += ret;
919 frag_offset += ret;
cd6e111b 920 KCM_STATS_ADD(psock->stats.tx_bytes, ret);
ab7ac4eb
TH
921 if (frag_offset < frag->size) {
922 /* Not finished with this frag */
923 goto do_frag;
924 }
925 }
926
927 if (skb == head) {
928 if (skb_has_frag_list(skb)) {
929 skb = skb_shinfo(skb)->frag_list;
930 goto do_frag_list;
931 }
932 } else if (skb->next) {
933 skb = skb->next;
934 goto do_frag_list;
935 }
936
937 /* Successfully sent the whole packet, account for it. */
938 skb_dequeue(&sk->sk_write_queue);
939 kfree_skb(head);
940 sk->sk_wmem_queued -= sent;
941 total_sent += sent;
cd6e111b 942 KCM_STATS_INCR(psock->stats.tx_msgs);
ab7ac4eb
TH
943 } while ((head = skb_peek(&sk->sk_write_queue)));
944out:
945 if (!head) {
946 /* Done with all queued messages. */
947 WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
948 unreserve_psock(kcm);
949 }
950
951 /* Check if write space is available */
952 sk->sk_write_space(sk);
953
954 return total_sent ? : ret;
955}
956
957static void kcm_tx_work(struct work_struct *w)
958{
959 struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
960 struct sock *sk = &kcm->sk;
961 int err;
962
963 lock_sock(sk);
964
965 /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
966 * aborts
967 */
968 err = kcm_write_msgs(kcm);
969 if (err < 0) {
970 /* Hard failure in write, report error on KCM socket */
971 pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
972 report_csk_error(&kcm->sk, -err);
973 goto out;
974 }
975
976 /* Primarily for SOCK_SEQPACKET sockets */
977 if (likely(sk->sk_socket) &&
978 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
979 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
980 sk->sk_write_space(sk);
981 }
982
983out:
984 release_sock(sk);
985}
986
987static void kcm_push(struct kcm_sock *kcm)
988{
989 if (kcm->tx_wait_more)
990 kcm_write_msgs(kcm);
991}
992
993static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
994{
995 struct sock *sk = sock->sk;
996 struct kcm_sock *kcm = kcm_sk(sk);
997 struct sk_buff *skb = NULL, *head = NULL;
998 size_t copy, copied = 0;
999 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1000 int eor = (sock->type == SOCK_DGRAM) ?
1001 !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
1002 int err = -EPIPE;
1003
1004 lock_sock(sk);
1005
1006 /* Per tcp_sendmsg this should be in poll */
1007 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1008
1009 if (sk->sk_err)
1010 goto out_error;
1011
1012 if (kcm->seq_skb) {
1013 /* Previously opened message */
1014 head = kcm->seq_skb;
1015 skb = kcm_tx_msg(head)->last_skb;
1016 goto start;
1017 }
1018
1019 /* Call the sk_stream functions to manage the sndbuf mem. */
1020 if (!sk_stream_memory_free(sk)) {
1021 kcm_push(kcm);
1022 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1023 err = sk_stream_wait_memory(sk, &timeo);
1024 if (err)
1025 goto out_error;
1026 }
1027
1028 /* New message, alloc head skb */
1029 head = alloc_skb(0, sk->sk_allocation);
1030 while (!head) {
1031 kcm_push(kcm);
1032 err = sk_stream_wait_memory(sk, &timeo);
1033 if (err)
1034 goto out_error;
1035
1036 head = alloc_skb(0, sk->sk_allocation);
1037 }
1038
1039 skb = head;
1040
1041 /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
1042 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
1043 */
1044 skb->ip_summed = CHECKSUM_UNNECESSARY;
1045
1046start:
1047 while (msg_data_left(msg)) {
1048 bool merge = true;
1049 int i = skb_shinfo(skb)->nr_frags;
1050 struct page_frag *pfrag = sk_page_frag(sk);
1051
1052 if (!sk_page_frag_refill(sk, pfrag))
1053 goto wait_for_memory;
1054
1055 if (!skb_can_coalesce(skb, i, pfrag->page,
1056 pfrag->offset)) {
1057 if (i == MAX_SKB_FRAGS) {
1058 struct sk_buff *tskb;
1059
1060 tskb = alloc_skb(0, sk->sk_allocation);
1061 if (!tskb)
1062 goto wait_for_memory;
1063
1064 if (head == skb)
1065 skb_shinfo(head)->frag_list = tskb;
1066 else
1067 skb->next = tskb;
1068
1069 skb = tskb;
1070 skb->ip_summed = CHECKSUM_UNNECESSARY;
1071 continue;
1072 }
1073 merge = false;
1074 }
1075
1076 copy = min_t(int, msg_data_left(msg),
1077 pfrag->size - pfrag->offset);
1078
1079 if (!sk_wmem_schedule(sk, copy))
1080 goto wait_for_memory;
1081
1082 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
1083 pfrag->page,
1084 pfrag->offset,
1085 copy);
1086 if (err)
1087 goto out_error;
1088
1089 /* Update the skb. */
1090 if (merge) {
1091 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1092 } else {
1093 skb_fill_page_desc(skb, i, pfrag->page,
1094 pfrag->offset, copy);
1095 get_page(pfrag->page);
1096 }
1097
1098 pfrag->offset += copy;
1099 copied += copy;
1100 if (head != skb) {
1101 head->len += copy;
1102 head->data_len += copy;
1103 }
1104
1105 continue;
1106
1107wait_for_memory:
1108 kcm_push(kcm);
1109 err = sk_stream_wait_memory(sk, &timeo);
1110 if (err)
1111 goto out_error;
1112 }
1113
1114 if (eor) {
1115 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1116
1117 /* Message complete, queue it on send buffer */
1118 __skb_queue_tail(&sk->sk_write_queue, head);
1119 kcm->seq_skb = NULL;
cd6e111b 1120 KCM_STATS_INCR(kcm->stats.tx_msgs);
ab7ac4eb
TH
1121
1122 if (msg->msg_flags & MSG_BATCH) {
1123 kcm->tx_wait_more = true;
1124 } else if (kcm->tx_wait_more || not_busy) {
1125 err = kcm_write_msgs(kcm);
1126 if (err < 0) {
1127 /* We got a hard error in write_msgs but have
1128 * already queued this message. Report an error
1129 * in the socket, but don't affect return value
1130 * from sendmsg
1131 */
1132 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1133 report_csk_error(&kcm->sk, -err);
1134 }
1135 }
1136 } else {
1137 /* Message not complete, save state */
1138partial_message:
1139 kcm->seq_skb = head;
1140 kcm_tx_msg(head)->last_skb = skb;
1141 }
1142
cd6e111b
TH
1143 KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1144
ab7ac4eb
TH
1145 release_sock(sk);
1146 return copied;
1147
1148out_error:
1149 kcm_push(kcm);
1150
1151 if (copied && sock->type == SOCK_SEQPACKET) {
1152 /* Wrote some bytes before encountering an
1153 * error, return partial success.
1154 */
1155 goto partial_message;
1156 }
1157
1158 if (head != kcm->seq_skb)
1159 kfree_skb(head);
1160
1161 err = sk_stream_error(sk, msg->msg_flags, err);
1162
1163 /* make sure we wake any epoll edge trigger waiter */
1164 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1165 sk->sk_write_space(sk);
1166
1167 release_sock(sk);
1168 return err;
1169}
1170
1171static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
1172 long timeo, int *err)
1173{
1174 struct sk_buff *skb;
1175
1176 while (!(skb = skb_peek(&sk->sk_receive_queue))) {
1177 if (sk->sk_err) {
1178 *err = sock_error(sk);
1179 return NULL;
1180 }
1181
1182 if (sock_flag(sk, SOCK_DONE))
1183 return NULL;
1184
1185 if ((flags & MSG_DONTWAIT) || !timeo) {
1186 *err = -EAGAIN;
1187 return NULL;
1188 }
1189
1190 sk_wait_data(sk, &timeo, NULL);
1191
1192 /* Handle signals */
1193 if (signal_pending(current)) {
1194 *err = sock_intr_errno(timeo);
1195 return NULL;
1196 }
1197 }
1198
1199 return skb;
1200}
1201
1202static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1203 size_t len, int flags)
1204{
1205 struct sock *sk = sock->sk;
cd6e111b 1206 struct kcm_sock *kcm = kcm_sk(sk);
ab7ac4eb
TH
1207 int err = 0;
1208 long timeo;
1209 struct kcm_rx_msg *rxm;
1210 int copied = 0;
1211 struct sk_buff *skb;
1212
1213 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1214
1215 lock_sock(sk);
1216
1217 skb = kcm_wait_data(sk, flags, timeo, &err);
1218 if (!skb)
1219 goto out;
1220
1221 /* Okay, have a message on the receive queue */
1222
1223 rxm = kcm_rx_msg(skb);
1224
1225 if (len > rxm->full_len)
1226 len = rxm->full_len;
1227
1228 err = skb_copy_datagram_msg(skb, rxm->offset, msg, len);
1229 if (err < 0)
1230 goto out;
1231
1232 copied = len;
1233 if (likely(!(flags & MSG_PEEK))) {
cd6e111b 1234 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
ab7ac4eb
TH
1235 if (copied < rxm->full_len) {
1236 if (sock->type == SOCK_DGRAM) {
1237 /* Truncated message */
1238 msg->msg_flags |= MSG_TRUNC;
1239 goto msg_finished;
1240 }
1241 rxm->offset += copied;
1242 rxm->full_len -= copied;
1243 } else {
1244msg_finished:
1245 /* Finished with message */
1246 msg->msg_flags |= MSG_EOR;
cd6e111b 1247 KCM_STATS_INCR(kcm->stats.rx_msgs);
ab7ac4eb
TH
1248 skb_unlink(skb, &sk->sk_receive_queue);
1249 kfree_skb(skb);
1250 }
1251 }
1252
1253out:
1254 release_sock(sk);
1255
1256 return copied ? : err;
1257}
1258
91687355
TH
1259static ssize_t kcm_sock_splice(struct sock *sk,
1260 struct pipe_inode_info *pipe,
1261 struct splice_pipe_desc *spd)
1262{
1263 int ret;
1264
1265 release_sock(sk);
1266 ret = splice_to_pipe(pipe, spd);
1267 lock_sock(sk);
1268
1269 return ret;
1270}
1271
1272static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1273 struct pipe_inode_info *pipe, size_t len,
1274 unsigned int flags)
1275{
1276 struct sock *sk = sock->sk;
1277 struct kcm_sock *kcm = kcm_sk(sk);
1278 long timeo;
1279 struct kcm_rx_msg *rxm;
1280 int err = 0;
1281 size_t copied;
1282 struct sk_buff *skb;
1283
1284 /* Only support splice for SOCKSEQPACKET */
1285
1286 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1287
1288 lock_sock(sk);
1289
1290 skb = kcm_wait_data(sk, flags, timeo, &err);
1291 if (!skb)
1292 goto err_out;
1293
1294 /* Okay, have a message on the receive queue */
1295
1296 rxm = kcm_rx_msg(skb);
1297
1298 if (len > rxm->full_len)
1299 len = rxm->full_len;
1300
1301 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, len, flags,
1302 kcm_sock_splice);
1303 if (copied < 0) {
1304 err = copied;
1305 goto err_out;
1306 }
1307
1308 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1309
1310 rxm->offset += copied;
1311 rxm->full_len -= copied;
1312
1313 /* We have no way to return MSG_EOR. If all the bytes have been
1314 * read we still leave the message in the receive socket buffer.
1315 * A subsequent recvmsg needs to be done to return MSG_EOR and
1316 * finish reading the message.
1317 */
1318
1319 release_sock(sk);
1320
1321 return copied;
1322
1323err_out:
1324 release_sock(sk);
1325
1326 return err;
1327}
1328
ab7ac4eb
TH
1329/* kcm sock lock held */
1330static void kcm_recv_disable(struct kcm_sock *kcm)
1331{
1332 struct kcm_mux *mux = kcm->mux;
1333
1334 if (kcm->rx_disabled)
1335 return;
1336
1337 spin_lock_bh(&mux->rx_lock);
1338
1339 kcm->rx_disabled = 1;
1340
1341 /* If a psock is reserved we'll do cleanup in unreserve */
1342 if (!kcm->rx_psock) {
1343 if (kcm->rx_wait) {
1344 list_del(&kcm->wait_rx_list);
1345 kcm->rx_wait = false;
1346 }
1347
1348 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1349 }
1350
1351 spin_unlock_bh(&mux->rx_lock);
1352}
1353
1354/* kcm sock lock held */
1355static void kcm_recv_enable(struct kcm_sock *kcm)
1356{
1357 struct kcm_mux *mux = kcm->mux;
1358
1359 if (!kcm->rx_disabled)
1360 return;
1361
1362 spin_lock_bh(&mux->rx_lock);
1363
1364 kcm->rx_disabled = 0;
1365 kcm_rcv_ready(kcm);
1366
1367 spin_unlock_bh(&mux->rx_lock);
1368}
1369
1370static int kcm_setsockopt(struct socket *sock, int level, int optname,
1371 char __user *optval, unsigned int optlen)
1372{
1373 struct kcm_sock *kcm = kcm_sk(sock->sk);
1374 int val, valbool;
1375 int err = 0;
1376
1377 if (level != SOL_KCM)
1378 return -ENOPROTOOPT;
1379
1380 if (optlen < sizeof(int))
1381 return -EINVAL;
1382
1383 if (get_user(val, (int __user *)optval))
1384 return -EINVAL;
1385
1386 valbool = val ? 1 : 0;
1387
1388 switch (optname) {
1389 case KCM_RECV_DISABLE:
1390 lock_sock(&kcm->sk);
1391 if (valbool)
1392 kcm_recv_disable(kcm);
1393 else
1394 kcm_recv_enable(kcm);
1395 release_sock(&kcm->sk);
1396 break;
1397 default:
1398 err = -ENOPROTOOPT;
1399 }
1400
1401 return err;
1402}
1403
1404static int kcm_getsockopt(struct socket *sock, int level, int optname,
1405 char __user *optval, int __user *optlen)
1406{
1407 struct kcm_sock *kcm = kcm_sk(sock->sk);
1408 int val, len;
1409
1410 if (level != SOL_KCM)
1411 return -ENOPROTOOPT;
1412
1413 if (get_user(len, optlen))
1414 return -EFAULT;
1415
1416 len = min_t(unsigned int, len, sizeof(int));
1417 if (len < 0)
1418 return -EINVAL;
1419
1420 switch (optname) {
1421 case KCM_RECV_DISABLE:
1422 val = kcm->rx_disabled;
1423 break;
1424 default:
1425 return -ENOPROTOOPT;
1426 }
1427
1428 if (put_user(len, optlen))
1429 return -EFAULT;
1430 if (copy_to_user(optval, &val, len))
1431 return -EFAULT;
1432 return 0;
1433}
1434
1435static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1436{
1437 struct kcm_sock *tkcm;
1438 struct list_head *head;
1439 int index = 0;
1440
1441 /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1442 * we set sk_state, otherwise epoll_wait always returns right away with
1443 * POLLHUP
1444 */
1445 kcm->sk.sk_state = TCP_ESTABLISHED;
1446
1447 /* Add to mux's kcm sockets list */
1448 kcm->mux = mux;
1449 spin_lock_bh(&mux->lock);
1450
1451 head = &mux->kcm_socks;
1452 list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1453 if (tkcm->index != index)
1454 break;
1455 head = &tkcm->kcm_sock_list;
1456 index++;
1457 }
1458
1459 list_add(&kcm->kcm_sock_list, head);
1460 kcm->index = index;
1461
1462 mux->kcm_socks_cnt++;
1463 spin_unlock_bh(&mux->lock);
1464
1465 INIT_WORK(&kcm->tx_work, kcm_tx_work);
1466
1467 spin_lock_bh(&mux->rx_lock);
1468 kcm_rcv_ready(kcm);
1469 spin_unlock_bh(&mux->rx_lock);
1470}
1471
1472static int kcm_attach(struct socket *sock, struct socket *csock,
1473 struct bpf_prog *prog)
1474{
1475 struct kcm_sock *kcm = kcm_sk(sock->sk);
1476 struct kcm_mux *mux = kcm->mux;
1477 struct sock *csk;
1478 struct kcm_psock *psock = NULL, *tpsock;
1479 struct list_head *head;
1480 int index = 0;
1481
1482 if (csock->ops->family != PF_INET &&
1483 csock->ops->family != PF_INET6)
1484 return -EINVAL;
1485
1486 csk = csock->sk;
1487 if (!csk)
1488 return -EINVAL;
1489
1490 /* Only support TCP for now */
1491 if (csk->sk_protocol != IPPROTO_TCP)
1492 return -EINVAL;
1493
1494 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1495 if (!psock)
1496 return -ENOMEM;
1497
1498 psock->mux = mux;
1499 psock->sk = csk;
1500 psock->bpf_prog = prog;
1501 INIT_WORK(&psock->rx_work, psock_rx_work);
1502 INIT_DELAYED_WORK(&psock->rx_delayed_work, psock_rx_delayed_work);
1503
1504 sock_hold(csk);
1505
1506 write_lock_bh(&csk->sk_callback_lock);
1507 psock->save_data_ready = csk->sk_data_ready;
1508 psock->save_write_space = csk->sk_write_space;
1509 psock->save_state_change = csk->sk_state_change;
1510 csk->sk_user_data = psock;
1511 csk->sk_data_ready = psock_tcp_data_ready;
1512 csk->sk_write_space = psock_tcp_write_space;
1513 csk->sk_state_change = psock_tcp_state_change;
1514 write_unlock_bh(&csk->sk_callback_lock);
1515
1516 /* Finished initialization, now add the psock to the MUX. */
1517 spin_lock_bh(&mux->lock);
1518 head = &mux->psocks;
1519 list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1520 if (tpsock->index != index)
1521 break;
1522 head = &tpsock->psock_list;
1523 index++;
1524 }
1525
1526 list_add(&psock->psock_list, head);
1527 psock->index = index;
1528
cd6e111b 1529 KCM_STATS_INCR(mux->stats.psock_attach);
ab7ac4eb
TH
1530 mux->psocks_cnt++;
1531 psock_now_avail(psock);
1532 spin_unlock_bh(&mux->lock);
1533
1534 /* Schedule RX work in case there are already bytes queued */
1535 queue_work(kcm_wq, &psock->rx_work);
1536
1537 return 0;
1538}
1539
1540static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1541{
1542 struct socket *csock;
1543 struct bpf_prog *prog;
1544 int err;
1545
1546 csock = sockfd_lookup(info->fd, &err);
1547 if (!csock)
1548 return -ENOENT;
1549
1550 prog = bpf_prog_get(info->bpf_fd);
1551 if (IS_ERR(prog)) {
1552 err = PTR_ERR(prog);
1553 goto out;
1554 }
1555
1556 if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) {
1557 bpf_prog_put(prog);
1558 err = -EINVAL;
1559 goto out;
1560 }
1561
1562 err = kcm_attach(sock, csock, prog);
1563 if (err) {
1564 bpf_prog_put(prog);
1565 goto out;
1566 }
1567
1568 /* Keep reference on file also */
1569
1570 return 0;
1571out:
1572 fput(csock->file);
1573 return err;
1574}
1575
1576static void kcm_unattach(struct kcm_psock *psock)
1577{
1578 struct sock *csk = psock->sk;
1579 struct kcm_mux *mux = psock->mux;
1580
1581 /* Stop getting callbacks from TCP socket. After this there should
1582 * be no way to reserve a kcm for this psock.
1583 */
1584 write_lock_bh(&csk->sk_callback_lock);
1585 csk->sk_user_data = NULL;
1586 csk->sk_data_ready = psock->save_data_ready;
1587 csk->sk_write_space = psock->save_write_space;
1588 csk->sk_state_change = psock->save_state_change;
1589 psock->rx_stopped = 1;
1590
1591 if (WARN_ON(psock->rx_kcm)) {
1592 write_unlock_bh(&csk->sk_callback_lock);
1593 return;
1594 }
1595
1596 spin_lock_bh(&mux->rx_lock);
1597
1598 /* Stop receiver activities. After this point psock should not be
1599 * able to get onto ready list either through callbacks or work.
1600 */
1601 if (psock->ready_rx_msg) {
1602 list_del(&psock->psock_ready_list);
1603 kfree_skb(psock->ready_rx_msg);
1604 psock->ready_rx_msg = NULL;
cd6e111b 1605 KCM_STATS_INCR(mux->stats.rx_ready_drops);
ab7ac4eb
TH
1606 }
1607
1608 spin_unlock_bh(&mux->rx_lock);
1609
1610 write_unlock_bh(&csk->sk_callback_lock);
1611
1612 cancel_work_sync(&psock->rx_work);
1613 cancel_delayed_work_sync(&psock->rx_delayed_work);
1614
1615 bpf_prog_put(psock->bpf_prog);
1616
1617 kfree_skb(psock->rx_skb_head);
1618 psock->rx_skb_head = NULL;
1619
1620 spin_lock_bh(&mux->lock);
1621
cd6e111b
TH
1622 aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1623
1624 KCM_STATS_INCR(mux->stats.psock_unattach);
1625
ab7ac4eb
TH
1626 if (psock->tx_kcm) {
1627 /* psock was reserved. Just mark it finished and we will clean
1628 * up in the kcm paths, we need kcm lock which can not be
1629 * acquired here.
1630 */
cd6e111b 1631 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
ab7ac4eb
TH
1632 spin_unlock_bh(&mux->lock);
1633
1634 /* We are unattaching a socket that is reserved. Abort the
1635 * socket since we may be out of sync in sending on it. We need
1636 * to do this without the mux lock.
1637 */
1638 kcm_abort_tx_psock(psock, EPIPE, false);
1639
1640 spin_lock_bh(&mux->lock);
1641 if (!psock->tx_kcm) {
1642 /* psock now unreserved in window mux was unlocked */
1643 goto no_reserved;
1644 }
1645 psock->done = 1;
1646
1647 /* Commit done before queuing work to process it */
1648 smp_mb();
1649
1650 /* Queue tx work to make sure psock->done is handled */
1651 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1652 spin_unlock_bh(&mux->lock);
1653 } else {
1654no_reserved:
1655 if (!psock->tx_stopped)
1656 list_del(&psock->psock_avail_list);
1657 list_del(&psock->psock_list);
1658 mux->psocks_cnt--;
1659 spin_unlock_bh(&mux->lock);
1660
1661 sock_put(csk);
1662 fput(csk->sk_socket->file);
1663 kmem_cache_free(kcm_psockp, psock);
1664 }
1665}
1666
1667static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1668{
1669 struct kcm_sock *kcm = kcm_sk(sock->sk);
1670 struct kcm_mux *mux = kcm->mux;
1671 struct kcm_psock *psock;
1672 struct socket *csock;
1673 struct sock *csk;
1674 int err;
1675
1676 csock = sockfd_lookup(info->fd, &err);
1677 if (!csock)
1678 return -ENOENT;
1679
1680 csk = csock->sk;
1681 if (!csk) {
1682 err = -EINVAL;
1683 goto out;
1684 }
1685
1686 err = -ENOENT;
1687
1688 spin_lock_bh(&mux->lock);
1689
1690 list_for_each_entry(psock, &mux->psocks, psock_list) {
1691 if (psock->sk != csk)
1692 continue;
1693
1694 /* Found the matching psock */
1695
1696 if (psock->unattaching || WARN_ON(psock->done)) {
1697 err = -EALREADY;
1698 break;
1699 }
1700
1701 psock->unattaching = 1;
1702
1703 spin_unlock_bh(&mux->lock);
1704
1705 kcm_unattach(psock);
1706
1707 err = 0;
1708 goto out;
1709 }
1710
1711 spin_unlock_bh(&mux->lock);
1712
1713out:
1714 fput(csock->file);
1715 return err;
1716}
1717
1718static struct proto kcm_proto = {
1719 .name = "KCM",
1720 .owner = THIS_MODULE,
1721 .obj_size = sizeof(struct kcm_sock),
1722};
1723
1724/* Clone a kcm socket. */
1725static int kcm_clone(struct socket *osock, struct kcm_clone *info,
1726 struct socket **newsockp)
1727{
1728 struct socket *newsock;
1729 struct sock *newsk;
1730 struct file *newfile;
1731 int err, newfd;
1732
1733 err = -ENFILE;
1734 newsock = sock_alloc();
1735 if (!newsock)
1736 goto out;
1737
1738 newsock->type = osock->type;
1739 newsock->ops = osock->ops;
1740
1741 __module_get(newsock->ops->owner);
1742
1743 newfd = get_unused_fd_flags(0);
1744 if (unlikely(newfd < 0)) {
1745 err = newfd;
1746 goto out_fd_fail;
1747 }
1748
1749 newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1750 if (unlikely(IS_ERR(newfile))) {
1751 err = PTR_ERR(newfile);
1752 goto out_sock_alloc_fail;
1753 }
1754
1755 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1756 &kcm_proto, true);
1757 if (!newsk) {
1758 err = -ENOMEM;
1759 goto out_sk_alloc_fail;
1760 }
1761
1762 sock_init_data(newsock, newsk);
1763 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1764
1765 fd_install(newfd, newfile);
1766 *newsockp = newsock;
1767 info->fd = newfd;
1768
1769 return 0;
1770
1771out_sk_alloc_fail:
1772 fput(newfile);
1773out_sock_alloc_fail:
1774 put_unused_fd(newfd);
1775out_fd_fail:
1776 sock_release(newsock);
1777out:
1778 return err;
1779}
1780
1781static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1782{
1783 int err;
1784
1785 switch (cmd) {
1786 case SIOCKCMATTACH: {
1787 struct kcm_attach info;
1788
1789 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1790 err = -EFAULT;
1791
1792 err = kcm_attach_ioctl(sock, &info);
1793
1794 break;
1795 }
1796 case SIOCKCMUNATTACH: {
1797 struct kcm_unattach info;
1798
1799 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1800 err = -EFAULT;
1801
1802 err = kcm_unattach_ioctl(sock, &info);
1803
1804 break;
1805 }
1806 case SIOCKCMCLONE: {
1807 struct kcm_clone info;
1808 struct socket *newsock = NULL;
1809
1810 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1811 err = -EFAULT;
1812
1813 err = kcm_clone(sock, &info, &newsock);
1814
1815 if (!err) {
1816 if (copy_to_user((void __user *)arg, &info,
1817 sizeof(info))) {
1818 err = -EFAULT;
1819 sock_release(newsock);
1820 }
1821 }
1822
1823 break;
1824 }
1825 default:
1826 err = -ENOIOCTLCMD;
1827 break;
1828 }
1829
1830 return err;
1831}
1832
1833static void free_mux(struct rcu_head *rcu)
1834{
1835 struct kcm_mux *mux = container_of(rcu,
1836 struct kcm_mux, rcu);
1837
1838 kmem_cache_free(kcm_muxp, mux);
1839}
1840
1841static void release_mux(struct kcm_mux *mux)
1842{
1843 struct kcm_net *knet = mux->knet;
1844 struct kcm_psock *psock, *tmp_psock;
1845
1846 /* Release psocks */
1847 list_for_each_entry_safe(psock, tmp_psock,
1848 &mux->psocks, psock_list) {
1849 if (!WARN_ON(psock->unattaching))
1850 kcm_unattach(psock);
1851 }
1852
1853 if (WARN_ON(mux->psocks_cnt))
1854 return;
1855
1856 __skb_queue_purge(&mux->rx_hold_queue);
1857
1858 mutex_lock(&knet->mutex);
cd6e111b
TH
1859 aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1860 aggregate_psock_stats(&mux->aggregate_psock_stats,
1861 &knet->aggregate_psock_stats);
ab7ac4eb
TH
1862 list_del_rcu(&mux->kcm_mux_list);
1863 knet->count--;
1864 mutex_unlock(&knet->mutex);
1865
1866 call_rcu(&mux->rcu, free_mux);
1867}
1868
1869static void kcm_done(struct kcm_sock *kcm)
1870{
1871 struct kcm_mux *mux = kcm->mux;
1872 struct sock *sk = &kcm->sk;
1873 int socks_cnt;
1874
1875 spin_lock_bh(&mux->rx_lock);
1876 if (kcm->rx_psock) {
1877 /* Cleanup in unreserve_rx_kcm */
1878 WARN_ON(kcm->done);
1879 kcm->rx_disabled = 1;
1880 kcm->done = 1;
1881 spin_unlock_bh(&mux->rx_lock);
1882 return;
1883 }
1884
1885 if (kcm->rx_wait) {
1886 list_del(&kcm->wait_rx_list);
1887 kcm->rx_wait = false;
1888 }
1889 /* Move any pending receive messages to other kcm sockets */
1890 requeue_rx_msgs(mux, &sk->sk_receive_queue);
1891
1892 spin_unlock_bh(&mux->rx_lock);
1893
1894 if (WARN_ON(sk_rmem_alloc_get(sk)))
1895 return;
1896
1897 /* Detach from MUX */
1898 spin_lock_bh(&mux->lock);
1899
1900 list_del(&kcm->kcm_sock_list);
1901 mux->kcm_socks_cnt--;
1902 socks_cnt = mux->kcm_socks_cnt;
1903
1904 spin_unlock_bh(&mux->lock);
1905
1906 if (!socks_cnt) {
1907 /* We are done with the mux now. */
1908 release_mux(mux);
1909 }
1910
1911 WARN_ON(kcm->rx_wait);
1912
1913 sock_put(&kcm->sk);
1914}
1915
1916/* Called by kcm_release to close a KCM socket.
1917 * If this is the last KCM socket on the MUX, destroy the MUX.
1918 */
1919static int kcm_release(struct socket *sock)
1920{
1921 struct sock *sk = sock->sk;
1922 struct kcm_sock *kcm;
1923 struct kcm_mux *mux;
1924 struct kcm_psock *psock;
1925
1926 if (!sk)
1927 return 0;
1928
1929 kcm = kcm_sk(sk);
1930 mux = kcm->mux;
1931
1932 sock_orphan(sk);
1933 kfree_skb(kcm->seq_skb);
1934
1935 lock_sock(sk);
1936 /* Purge queue under lock to avoid race condition with tx_work trying
1937 * to act when queue is nonempty. If tx_work runs after this point
1938 * it will just return.
1939 */
1940 __skb_queue_purge(&sk->sk_write_queue);
1941 release_sock(sk);
1942
1943 spin_lock_bh(&mux->lock);
1944 if (kcm->tx_wait) {
1945 /* Take of tx_wait list, after this point there should be no way
1946 * that a psock will be assigned to this kcm.
1947 */
1948 list_del(&kcm->wait_psock_list);
1949 kcm->tx_wait = false;
1950 }
1951 spin_unlock_bh(&mux->lock);
1952
1953 /* Cancel work. After this point there should be no outside references
1954 * to the kcm socket.
1955 */
1956 cancel_work_sync(&kcm->tx_work);
1957
1958 lock_sock(sk);
1959 psock = kcm->tx_psock;
1960 if (psock) {
1961 /* A psock was reserved, so we need to kill it since it
1962 * may already have some bytes queued from a message. We
1963 * need to do this after removing kcm from tx_wait list.
1964 */
1965 kcm_abort_tx_psock(psock, EPIPE, false);
1966 unreserve_psock(kcm);
1967 }
1968 release_sock(sk);
1969
1970 WARN_ON(kcm->tx_wait);
1971 WARN_ON(kcm->tx_psock);
1972
1973 sock->sk = NULL;
1974
1975 kcm_done(kcm);
1976
1977 return 0;
1978}
1979
91687355 1980static const struct proto_ops kcm_dgram_ops = {
ab7ac4eb
TH
1981 .family = PF_KCM,
1982 .owner = THIS_MODULE,
1983 .release = kcm_release,
1984 .bind = sock_no_bind,
1985 .connect = sock_no_connect,
1986 .socketpair = sock_no_socketpair,
1987 .accept = sock_no_accept,
1988 .getname = sock_no_getname,
1989 .poll = datagram_poll,
1990 .ioctl = kcm_ioctl,
1991 .listen = sock_no_listen,
1992 .shutdown = sock_no_shutdown,
1993 .setsockopt = kcm_setsockopt,
1994 .getsockopt = kcm_getsockopt,
1995 .sendmsg = kcm_sendmsg,
1996 .recvmsg = kcm_recvmsg,
1997 .mmap = sock_no_mmap,
1998 .sendpage = sock_no_sendpage,
1999};
2000
91687355
TH
2001static const struct proto_ops kcm_seqpacket_ops = {
2002 .family = PF_KCM,
2003 .owner = THIS_MODULE,
2004 .release = kcm_release,
2005 .bind = sock_no_bind,
2006 .connect = sock_no_connect,
2007 .socketpair = sock_no_socketpair,
2008 .accept = sock_no_accept,
2009 .getname = sock_no_getname,
2010 .poll = datagram_poll,
2011 .ioctl = kcm_ioctl,
2012 .listen = sock_no_listen,
2013 .shutdown = sock_no_shutdown,
2014 .setsockopt = kcm_setsockopt,
2015 .getsockopt = kcm_getsockopt,
2016 .sendmsg = kcm_sendmsg,
2017 .recvmsg = kcm_recvmsg,
2018 .mmap = sock_no_mmap,
2019 .sendpage = sock_no_sendpage,
2020 .splice_read = kcm_splice_read,
2021};
2022
ab7ac4eb
TH
2023/* Create proto operation for kcm sockets */
2024static int kcm_create(struct net *net, struct socket *sock,
2025 int protocol, int kern)
2026{
2027 struct kcm_net *knet = net_generic(net, kcm_net_id);
2028 struct sock *sk;
2029 struct kcm_mux *mux;
2030
2031 switch (sock->type) {
2032 case SOCK_DGRAM:
91687355
TH
2033 sock->ops = &kcm_dgram_ops;
2034 break;
ab7ac4eb 2035 case SOCK_SEQPACKET:
91687355 2036 sock->ops = &kcm_seqpacket_ops;
ab7ac4eb
TH
2037 break;
2038 default:
2039 return -ESOCKTNOSUPPORT;
2040 }
2041
2042 if (protocol != KCMPROTO_CONNECTED)
2043 return -EPROTONOSUPPORT;
2044
2045 sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
2046 if (!sk)
2047 return -ENOMEM;
2048
2049 /* Allocate a kcm mux, shared between KCM sockets */
2050 mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
2051 if (!mux) {
2052 sk_free(sk);
2053 return -ENOMEM;
2054 }
2055
2056 spin_lock_init(&mux->lock);
2057 spin_lock_init(&mux->rx_lock);
2058 INIT_LIST_HEAD(&mux->kcm_socks);
2059 INIT_LIST_HEAD(&mux->kcm_rx_waiters);
2060 INIT_LIST_HEAD(&mux->kcm_tx_waiters);
2061
2062 INIT_LIST_HEAD(&mux->psocks);
2063 INIT_LIST_HEAD(&mux->psocks_ready);
2064 INIT_LIST_HEAD(&mux->psocks_avail);
2065
2066 mux->knet = knet;
2067
2068 /* Add new MUX to list */
2069 mutex_lock(&knet->mutex);
2070 list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
2071 knet->count++;
2072 mutex_unlock(&knet->mutex);
2073
2074 skb_queue_head_init(&mux->rx_hold_queue);
2075
2076 /* Init KCM socket */
2077 sock_init_data(sock, sk);
2078 init_kcm_sock(kcm_sk(sk), mux);
2079
2080 return 0;
2081}
2082
2083static struct net_proto_family kcm_family_ops = {
2084 .family = PF_KCM,
2085 .create = kcm_create,
2086 .owner = THIS_MODULE,
2087};
2088
2089static __net_init int kcm_init_net(struct net *net)
2090{
2091 struct kcm_net *knet = net_generic(net, kcm_net_id);
2092
2093 INIT_LIST_HEAD_RCU(&knet->mux_list);
2094 mutex_init(&knet->mutex);
2095
2096 return 0;
2097}
2098
2099static __net_exit void kcm_exit_net(struct net *net)
2100{
2101 struct kcm_net *knet = net_generic(net, kcm_net_id);
2102
2103 /* All KCM sockets should be closed at this point, which should mean
2104 * that all multiplexors and psocks have been destroyed.
2105 */
2106 WARN_ON(!list_empty(&knet->mux_list));
2107}
2108
2109static struct pernet_operations kcm_net_ops = {
2110 .init = kcm_init_net,
2111 .exit = kcm_exit_net,
2112 .id = &kcm_net_id,
2113 .size = sizeof(struct kcm_net),
2114};
2115
2116static int __init kcm_init(void)
2117{
2118 int err = -ENOMEM;
2119
2120 kcm_muxp = kmem_cache_create("kcm_mux_cache",
2121 sizeof(struct kcm_mux), 0,
2122 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2123 if (!kcm_muxp)
2124 goto fail;
2125
2126 kcm_psockp = kmem_cache_create("kcm_psock_cache",
2127 sizeof(struct kcm_psock), 0,
2128 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2129 if (!kcm_psockp)
2130 goto fail;
2131
2132 kcm_wq = create_singlethread_workqueue("kkcmd");
2133 if (!kcm_wq)
2134 goto fail;
2135
2136 err = proto_register(&kcm_proto, 1);
2137 if (err)
2138 goto fail;
2139
2140 err = sock_register(&kcm_family_ops);
2141 if (err)
2142 goto sock_register_fail;
2143
2144 err = register_pernet_device(&kcm_net_ops);
2145 if (err)
2146 goto net_ops_fail;
2147
cd6e111b
TH
2148 err = kcm_proc_init();
2149 if (err)
2150 goto proc_init_fail;
2151
ab7ac4eb
TH
2152 return 0;
2153
cd6e111b
TH
2154proc_init_fail:
2155 unregister_pernet_device(&kcm_net_ops);
2156
ab7ac4eb
TH
2157net_ops_fail:
2158 sock_unregister(PF_KCM);
2159
2160sock_register_fail:
2161 proto_unregister(&kcm_proto);
2162
2163fail:
2164 kmem_cache_destroy(kcm_muxp);
2165 kmem_cache_destroy(kcm_psockp);
2166
2167 if (kcm_wq)
2168 destroy_workqueue(kcm_wq);
2169
2170 return err;
2171}
2172
2173static void __exit kcm_exit(void)
2174{
cd6e111b 2175 kcm_proc_exit();
ab7ac4eb
TH
2176 unregister_pernet_device(&kcm_net_ops);
2177 sock_unregister(PF_KCM);
2178 proto_unregister(&kcm_proto);
2179 destroy_workqueue(kcm_wq);
2180
2181 kmem_cache_destroy(kcm_muxp);
2182 kmem_cache_destroy(kcm_psockp);
2183}
2184
2185module_init(kcm_init);
2186module_exit(kcm_exit);
2187
2188MODULE_LICENSE("GPL");
2189MODULE_ALIAS_NETPROTO(PF_KCM);
2190