2 * Kernel Connection Multiplexor
4 * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
11 #include <linux/bpf.h>
12 #include <linux/errno.h>
13 #include <linux/errqueue.h>
14 #include <linux/file.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/net.h>
19 #include <linux/netdevice.h>
20 #include <linux/poll.h>
21 #include <linux/rculist.h>
22 #include <linux/skbuff.h>
23 #include <linux/socket.h>
24 #include <linux/uaccess.h>
25 #include <linux/workqueue.h>
26 #include <linux/syscalls.h>
28 #include <net/netns/generic.h>
30 #include <uapi/linux/kcm.h>
32 unsigned int kcm_net_id
;
34 static struct kmem_cache
*kcm_psockp __read_mostly
;
35 static struct kmem_cache
*kcm_muxp __read_mostly
;
36 static struct workqueue_struct
*kcm_wq
;
38 static inline struct kcm_sock
*kcm_sk(const struct sock
*sk
)
40 return (struct kcm_sock
*)sk
;
43 static inline struct kcm_tx_msg
*kcm_tx_msg(struct sk_buff
*skb
)
45 return (struct kcm_tx_msg
*)skb
->cb
;
48 static void report_csk_error(struct sock
*csk
, int err
)
51 csk
->sk_error_report(csk
);
54 static void kcm_abort_tx_psock(struct kcm_psock
*psock
, int err
,
57 struct sock
*csk
= psock
->sk
;
58 struct kcm_mux
*mux
= psock
->mux
;
60 /* Unrecoverable error in transmit */
62 spin_lock_bh(&mux
->lock
);
64 if (psock
->tx_stopped
) {
65 spin_unlock_bh(&mux
->lock
);
69 psock
->tx_stopped
= 1;
70 KCM_STATS_INCR(psock
->stats
.tx_aborts
);
73 /* Take off psocks_avail list */
74 list_del(&psock
->psock_avail_list
);
75 } else if (wakeup_kcm
) {
76 /* In this case psock is being aborted while outside of
77 * write_msgs and psock is reserved. Schedule tx_work
78 * to handle the failure there. Need to commit tx_stopped
79 * before queuing work.
83 queue_work(kcm_wq
, &psock
->tx_kcm
->tx_work
);
86 spin_unlock_bh(&mux
->lock
);
88 /* Report error on lower socket */
89 report_csk_error(csk
, err
);
92 /* RX mux lock held. */
93 static void kcm_update_rx_mux_stats(struct kcm_mux
*mux
,
94 struct kcm_psock
*psock
)
96 STRP_STATS_ADD(mux
->stats
.rx_bytes
,
97 psock
->strp
.stats
.rx_bytes
-
98 psock
->saved_rx_bytes
);
100 psock
->strp
.stats
.rx_msgs
- psock
->saved_rx_msgs
;
101 psock
->saved_rx_msgs
= psock
->strp
.stats
.rx_msgs
;
102 psock
->saved_rx_bytes
= psock
->strp
.stats
.rx_bytes
;
105 static void kcm_update_tx_mux_stats(struct kcm_mux
*mux
,
106 struct kcm_psock
*psock
)
108 KCM_STATS_ADD(mux
->stats
.tx_bytes
,
109 psock
->stats
.tx_bytes
- psock
->saved_tx_bytes
);
110 mux
->stats
.tx_msgs
+=
111 psock
->stats
.tx_msgs
- psock
->saved_tx_msgs
;
112 psock
->saved_tx_msgs
= psock
->stats
.tx_msgs
;
113 psock
->saved_tx_bytes
= psock
->stats
.tx_bytes
;
116 static int kcm_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
);
118 /* KCM is ready to receive messages on its queue-- either the KCM is new or
119 * has become unblocked after being blocked on full socket buffer. Queue any
120 * pending ready messages on a psock. RX mux lock held.
122 static void kcm_rcv_ready(struct kcm_sock
*kcm
)
124 struct kcm_mux
*mux
= kcm
->mux
;
125 struct kcm_psock
*psock
;
128 if (unlikely(kcm
->rx_wait
|| kcm
->rx_psock
|| kcm
->rx_disabled
))
131 while (unlikely((skb
= __skb_dequeue(&mux
->rx_hold_queue
)))) {
132 if (kcm_queue_rcv_skb(&kcm
->sk
, skb
)) {
133 /* Assuming buffer limit has been reached */
134 skb_queue_head(&mux
->rx_hold_queue
, skb
);
135 WARN_ON(!sk_rmem_alloc_get(&kcm
->sk
));
140 while (!list_empty(&mux
->psocks_ready
)) {
141 psock
= list_first_entry(&mux
->psocks_ready
, struct kcm_psock
,
144 if (kcm_queue_rcv_skb(&kcm
->sk
, psock
->ready_rx_msg
)) {
145 /* Assuming buffer limit has been reached */
146 WARN_ON(!sk_rmem_alloc_get(&kcm
->sk
));
150 /* Consumed the ready message on the psock. Schedule rx_work to
153 list_del(&psock
->psock_ready_list
);
154 psock
->ready_rx_msg
= NULL
;
155 /* Commit clearing of ready_rx_msg for queuing work */
158 strp_unpause(&psock
->strp
);
159 strp_check_rcv(&psock
->strp
);
162 /* Buffer limit is okay now, add to ready list */
163 list_add_tail(&kcm
->wait_rx_list
,
164 &kcm
->mux
->kcm_rx_waiters
);
168 static void kcm_rfree(struct sk_buff
*skb
)
170 struct sock
*sk
= skb
->sk
;
171 struct kcm_sock
*kcm
= kcm_sk(sk
);
172 struct kcm_mux
*mux
= kcm
->mux
;
173 unsigned int len
= skb
->truesize
;
175 sk_mem_uncharge(sk
, len
);
176 atomic_sub(len
, &sk
->sk_rmem_alloc
);
178 /* For reading rx_wait and rx_psock without holding lock */
179 smp_mb__after_atomic();
181 if (!kcm
->rx_wait
&& !kcm
->rx_psock
&&
182 sk_rmem_alloc_get(sk
) < sk
->sk_rcvlowat
) {
183 spin_lock_bh(&mux
->rx_lock
);
185 spin_unlock_bh(&mux
->rx_lock
);
189 static int kcm_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
191 struct sk_buff_head
*list
= &sk
->sk_receive_queue
;
193 if (atomic_read(&sk
->sk_rmem_alloc
) >= sk
->sk_rcvbuf
)
196 if (!sk_rmem_schedule(sk
, skb
, skb
->truesize
))
203 skb
->destructor
= kcm_rfree
;
204 atomic_add(skb
->truesize
, &sk
->sk_rmem_alloc
);
205 sk_mem_charge(sk
, skb
->truesize
);
207 skb_queue_tail(list
, skb
);
209 if (!sock_flag(sk
, SOCK_DEAD
))
210 sk
->sk_data_ready(sk
);
215 /* Requeue received messages for a kcm socket to other kcm sockets. This is
216 * called with a kcm socket is receive disabled.
219 static void requeue_rx_msgs(struct kcm_mux
*mux
, struct sk_buff_head
*head
)
222 struct kcm_sock
*kcm
;
224 while ((skb
= __skb_dequeue(head
))) {
225 /* Reset destructor to avoid calling kcm_rcv_ready */
226 skb
->destructor
= sock_rfree
;
229 if (list_empty(&mux
->kcm_rx_waiters
)) {
230 skb_queue_tail(&mux
->rx_hold_queue
, skb
);
234 kcm
= list_first_entry(&mux
->kcm_rx_waiters
,
235 struct kcm_sock
, wait_rx_list
);
237 if (kcm_queue_rcv_skb(&kcm
->sk
, skb
)) {
238 /* Should mean socket buffer full */
239 list_del(&kcm
->wait_rx_list
);
240 kcm
->rx_wait
= false;
242 /* Commit rx_wait to read in kcm_free */
250 /* Lower sock lock held */
251 static struct kcm_sock
*reserve_rx_kcm(struct kcm_psock
*psock
,
252 struct sk_buff
*head
)
254 struct kcm_mux
*mux
= psock
->mux
;
255 struct kcm_sock
*kcm
;
257 WARN_ON(psock
->ready_rx_msg
);
260 return psock
->rx_kcm
;
262 spin_lock_bh(&mux
->rx_lock
);
265 spin_unlock_bh(&mux
->rx_lock
);
266 return psock
->rx_kcm
;
269 kcm_update_rx_mux_stats(mux
, psock
);
271 if (list_empty(&mux
->kcm_rx_waiters
)) {
272 psock
->ready_rx_msg
= head
;
273 strp_pause(&psock
->strp
);
274 list_add_tail(&psock
->psock_ready_list
,
276 spin_unlock_bh(&mux
->rx_lock
);
280 kcm
= list_first_entry(&mux
->kcm_rx_waiters
,
281 struct kcm_sock
, wait_rx_list
);
282 list_del(&kcm
->wait_rx_list
);
283 kcm
->rx_wait
= false;
286 kcm
->rx_psock
= psock
;
288 spin_unlock_bh(&mux
->rx_lock
);
293 static void kcm_done(struct kcm_sock
*kcm
);
295 static void kcm_done_work(struct work_struct
*w
)
297 kcm_done(container_of(w
, struct kcm_sock
, done_work
));
300 /* Lower sock held */
301 static void unreserve_rx_kcm(struct kcm_psock
*psock
,
304 struct kcm_sock
*kcm
= psock
->rx_kcm
;
305 struct kcm_mux
*mux
= psock
->mux
;
310 spin_lock_bh(&mux
->rx_lock
);
312 psock
->rx_kcm
= NULL
;
313 kcm
->rx_psock
= NULL
;
315 /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
320 if (unlikely(kcm
->done
)) {
321 spin_unlock_bh(&mux
->rx_lock
);
323 /* Need to run kcm_done in a task since we need to qcquire
324 * callback locks which may already be held here.
326 INIT_WORK(&kcm
->done_work
, kcm_done_work
);
327 schedule_work(&kcm
->done_work
);
331 if (unlikely(kcm
->rx_disabled
)) {
332 requeue_rx_msgs(mux
, &kcm
->sk
.sk_receive_queue
);
333 } else if (rcv_ready
|| unlikely(!sk_rmem_alloc_get(&kcm
->sk
))) {
334 /* Check for degenerative race with rx_wait that all
335 * data was dequeued (accounted for in kcm_rfree).
339 spin_unlock_bh(&mux
->rx_lock
);
342 /* Lower sock lock held */
343 static void psock_data_ready(struct sock
*sk
)
345 struct kcm_psock
*psock
;
347 read_lock_bh(&sk
->sk_callback_lock
);
349 psock
= (struct kcm_psock
*)sk
->sk_user_data
;
351 strp_data_ready(&psock
->strp
);
353 read_unlock_bh(&sk
->sk_callback_lock
);
356 /* Called with lower sock held */
357 static void kcm_rcv_strparser(struct strparser
*strp
, struct sk_buff
*skb
)
359 struct kcm_psock
*psock
= container_of(strp
, struct kcm_psock
, strp
);
360 struct kcm_sock
*kcm
;
363 kcm
= reserve_rx_kcm(psock
, skb
);
365 /* Unable to reserve a KCM, message is held in psock and strp
371 if (kcm_queue_rcv_skb(&kcm
->sk
, skb
)) {
372 /* Should mean socket buffer full */
373 unreserve_rx_kcm(psock
, false);
378 static int kcm_parse_func_strparser(struct strparser
*strp
, struct sk_buff
*skb
)
380 struct kcm_psock
*psock
= container_of(strp
, struct kcm_psock
, strp
);
381 struct bpf_prog
*prog
= psock
->bpf_prog
;
383 return (*prog
->bpf_func
)(skb
, prog
->insnsi
);
386 static int kcm_read_sock_done(struct strparser
*strp
, int err
)
388 struct kcm_psock
*psock
= container_of(strp
, struct kcm_psock
, strp
);
390 unreserve_rx_kcm(psock
, true);
395 static void psock_state_change(struct sock
*sk
)
397 /* TCP only does a POLLIN for a half close. Do a POLLHUP here
398 * since application will normally not poll with POLLIN
399 * on the TCP sockets.
402 report_csk_error(sk
, EPIPE
);
405 static void psock_write_space(struct sock
*sk
)
407 struct kcm_psock
*psock
;
409 struct kcm_sock
*kcm
;
411 read_lock_bh(&sk
->sk_callback_lock
);
413 psock
= (struct kcm_psock
*)sk
->sk_user_data
;
414 if (unlikely(!psock
))
418 spin_lock_bh(&mux
->lock
);
420 /* Check if the socket is reserved so someone is waiting for sending. */
422 if (kcm
&& !unlikely(kcm
->tx_stopped
))
423 queue_work(kcm_wq
, &kcm
->tx_work
);
425 spin_unlock_bh(&mux
->lock
);
427 read_unlock_bh(&sk
->sk_callback_lock
);
430 static void unreserve_psock(struct kcm_sock
*kcm
);
432 /* kcm sock is locked. */
433 static struct kcm_psock
*reserve_psock(struct kcm_sock
*kcm
)
435 struct kcm_mux
*mux
= kcm
->mux
;
436 struct kcm_psock
*psock
;
438 psock
= kcm
->tx_psock
;
440 smp_rmb(); /* Must read tx_psock before tx_wait */
443 WARN_ON(kcm
->tx_wait
);
444 if (unlikely(psock
->tx_stopped
))
445 unreserve_psock(kcm
);
447 return kcm
->tx_psock
;
450 spin_lock_bh(&mux
->lock
);
452 /* Check again under lock to see if psock was reserved for this
453 * psock via psock_unreserve.
455 psock
= kcm
->tx_psock
;
456 if (unlikely(psock
)) {
457 WARN_ON(kcm
->tx_wait
);
458 spin_unlock_bh(&mux
->lock
);
459 return kcm
->tx_psock
;
462 if (!list_empty(&mux
->psocks_avail
)) {
463 psock
= list_first_entry(&mux
->psocks_avail
,
466 list_del(&psock
->psock_avail_list
);
468 list_del(&kcm
->wait_psock_list
);
469 kcm
->tx_wait
= false;
471 kcm
->tx_psock
= psock
;
473 KCM_STATS_INCR(psock
->stats
.reserved
);
474 } else if (!kcm
->tx_wait
) {
475 list_add_tail(&kcm
->wait_psock_list
,
476 &mux
->kcm_tx_waiters
);
480 spin_unlock_bh(&mux
->lock
);
486 static void psock_now_avail(struct kcm_psock
*psock
)
488 struct kcm_mux
*mux
= psock
->mux
;
489 struct kcm_sock
*kcm
;
491 if (list_empty(&mux
->kcm_tx_waiters
)) {
492 list_add_tail(&psock
->psock_avail_list
,
495 kcm
= list_first_entry(&mux
->kcm_tx_waiters
,
498 list_del(&kcm
->wait_psock_list
);
499 kcm
->tx_wait
= false;
502 /* Commit before changing tx_psock since that is read in
503 * reserve_psock before queuing work.
507 kcm
->tx_psock
= psock
;
508 KCM_STATS_INCR(psock
->stats
.reserved
);
509 queue_work(kcm_wq
, &kcm
->tx_work
);
513 /* kcm sock is locked. */
514 static void unreserve_psock(struct kcm_sock
*kcm
)
516 struct kcm_psock
*psock
;
517 struct kcm_mux
*mux
= kcm
->mux
;
519 spin_lock_bh(&mux
->lock
);
521 psock
= kcm
->tx_psock
;
523 if (WARN_ON(!psock
)) {
524 spin_unlock_bh(&mux
->lock
);
528 smp_rmb(); /* Read tx_psock before tx_wait */
530 kcm_update_tx_mux_stats(mux
, psock
);
532 WARN_ON(kcm
->tx_wait
);
534 kcm
->tx_psock
= NULL
;
535 psock
->tx_kcm
= NULL
;
536 KCM_STATS_INCR(psock
->stats
.unreserved
);
538 if (unlikely(psock
->tx_stopped
)) {
541 list_del(&psock
->psock_list
);
544 fput(psock
->sk
->sk_socket
->file
);
545 kmem_cache_free(kcm_psockp
, psock
);
548 /* Don't put back on available list */
550 spin_unlock_bh(&mux
->lock
);
555 psock_now_avail(psock
);
557 spin_unlock_bh(&mux
->lock
);
560 static void kcm_report_tx_retry(struct kcm_sock
*kcm
)
562 struct kcm_mux
*mux
= kcm
->mux
;
564 spin_lock_bh(&mux
->lock
);
565 KCM_STATS_INCR(mux
->stats
.tx_retries
);
566 spin_unlock_bh(&mux
->lock
);
569 /* Write any messages ready on the kcm socket. Called with kcm sock lock
570 * held. Return bytes actually sent or error.
572 static int kcm_write_msgs(struct kcm_sock
*kcm
)
574 struct sock
*sk
= &kcm
->sk
;
575 struct kcm_psock
*psock
;
576 struct sk_buff
*skb
, *head
;
577 struct kcm_tx_msg
*txm
;
578 unsigned short fragidx
, frag_offset
;
579 unsigned int sent
, total_sent
= 0;
582 kcm
->tx_wait_more
= false;
583 psock
= kcm
->tx_psock
;
584 if (unlikely(psock
&& psock
->tx_stopped
)) {
585 /* A reserved psock was aborted asynchronously. Unreserve
586 * it and we'll retry the message.
588 unreserve_psock(kcm
);
589 kcm_report_tx_retry(kcm
);
590 if (skb_queue_empty(&sk
->sk_write_queue
))
593 kcm_tx_msg(skb_peek(&sk
->sk_write_queue
))->sent
= 0;
595 } else if (skb_queue_empty(&sk
->sk_write_queue
)) {
599 head
= skb_peek(&sk
->sk_write_queue
);
600 txm
= kcm_tx_msg(head
);
603 /* Send of first skbuff in queue already in progress */
604 if (WARN_ON(!psock
)) {
609 frag_offset
= txm
->frag_offset
;
610 fragidx
= txm
->fragidx
;
617 psock
= reserve_psock(kcm
);
623 txm
= kcm_tx_msg(head
);
627 if (WARN_ON(!skb_shinfo(skb
)->nr_frags
)) {
632 for (fragidx
= 0; fragidx
< skb_shinfo(skb
)->nr_frags
;
638 frag
= &skb_shinfo(skb
)->frags
[fragidx
];
639 if (WARN_ON(!frag
->size
)) {
644 ret
= kernel_sendpage(psock
->sk
->sk_socket
,
646 frag
->page_offset
+ frag_offset
,
647 frag
->size
- frag_offset
,
650 if (ret
== -EAGAIN
) {
651 /* Save state to try again when there's
652 * write space on the socket
655 txm
->frag_offset
= frag_offset
;
656 txm
->fragidx
= fragidx
;
663 /* Hard failure in sending message, abort this
664 * psock since it has lost framing
665 * synchonization and retry sending the
666 * message from the beginning.
668 kcm_abort_tx_psock(psock
, ret
? -ret
: EPIPE
,
670 unreserve_psock(kcm
);
673 kcm_report_tx_retry(kcm
);
681 KCM_STATS_ADD(psock
->stats
.tx_bytes
, ret
);
682 if (frag_offset
< frag
->size
) {
683 /* Not finished with this frag */
689 if (skb_has_frag_list(skb
)) {
690 skb
= skb_shinfo(skb
)->frag_list
;
693 } else if (skb
->next
) {
698 /* Successfully sent the whole packet, account for it. */
699 skb_dequeue(&sk
->sk_write_queue
);
701 sk
->sk_wmem_queued
-= sent
;
703 KCM_STATS_INCR(psock
->stats
.tx_msgs
);
704 } while ((head
= skb_peek(&sk
->sk_write_queue
)));
707 /* Done with all queued messages. */
708 WARN_ON(!skb_queue_empty(&sk
->sk_write_queue
));
709 unreserve_psock(kcm
);
712 /* Check if write space is available */
713 sk
->sk_write_space(sk
);
715 return total_sent
? : ret
;
718 static void kcm_tx_work(struct work_struct
*w
)
720 struct kcm_sock
*kcm
= container_of(w
, struct kcm_sock
, tx_work
);
721 struct sock
*sk
= &kcm
->sk
;
726 /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
729 err
= kcm_write_msgs(kcm
);
731 /* Hard failure in write, report error on KCM socket */
732 pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err
);
733 report_csk_error(&kcm
->sk
, -err
);
737 /* Primarily for SOCK_SEQPACKET sockets */
738 if (likely(sk
->sk_socket
) &&
739 test_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
)) {
740 clear_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
741 sk
->sk_write_space(sk
);
748 static void kcm_push(struct kcm_sock
*kcm
)
750 if (kcm
->tx_wait_more
)
754 static ssize_t
kcm_sendpage(struct socket
*sock
, struct page
*page
,
755 int offset
, size_t size
, int flags
)
758 struct sock
*sk
= sock
->sk
;
759 struct kcm_sock
*kcm
= kcm_sk(sk
);
760 struct sk_buff
*skb
= NULL
, *head
= NULL
;
761 long timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
766 if (flags
& MSG_SENDPAGE_NOTLAST
)
769 /* No MSG_EOR from splice, only look at MSG_MORE */
770 eor
= !(flags
& MSG_MORE
);
774 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
781 /* Previously opened message */
783 skb
= kcm_tx_msg(head
)->last_skb
;
784 i
= skb_shinfo(skb
)->nr_frags
;
786 if (skb_can_coalesce(skb
, i
, page
, offset
)) {
787 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], size
);
788 skb_shinfo(skb
)->tx_flags
|= SKBTX_SHARED_FRAG
;
792 if (i
>= MAX_SKB_FRAGS
) {
793 struct sk_buff
*tskb
;
795 tskb
= alloc_skb(0, sk
->sk_allocation
);
798 err
= sk_stream_wait_memory(sk
, &timeo
);
804 skb_shinfo(head
)->frag_list
= tskb
;
809 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
813 /* Call the sk_stream functions to manage the sndbuf mem. */
814 if (!sk_stream_memory_free(sk
)) {
816 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
817 err
= sk_stream_wait_memory(sk
, &timeo
);
822 head
= alloc_skb(0, sk
->sk_allocation
);
825 err
= sk_stream_wait_memory(sk
, &timeo
);
835 skb_fill_page_desc(skb
, i
, page
, offset
, size
);
836 skb_shinfo(skb
)->tx_flags
|= SKBTX_SHARED_FRAG
;
840 skb
->data_len
+= size
;
841 skb
->truesize
+= size
;
842 sk
->sk_wmem_queued
+= size
;
843 sk_mem_charge(sk
, size
);
847 head
->data_len
+= size
;
848 head
->truesize
+= size
;
852 bool not_busy
= skb_queue_empty(&sk
->sk_write_queue
);
854 /* Message complete, queue it on send buffer */
855 __skb_queue_tail(&sk
->sk_write_queue
, head
);
857 KCM_STATS_INCR(kcm
->stats
.tx_msgs
);
859 if (flags
& MSG_BATCH
) {
860 kcm
->tx_wait_more
= true;
861 } else if (kcm
->tx_wait_more
|| not_busy
) {
862 err
= kcm_write_msgs(kcm
);
864 /* We got a hard error in write_msgs but have
865 * already queued this message. Report an error
866 * in the socket, but don't affect return value
869 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
870 report_csk_error(&kcm
->sk
, -err
);
874 /* Message not complete, save state */
876 kcm_tx_msg(head
)->last_skb
= skb
;
879 KCM_STATS_ADD(kcm
->stats
.tx_bytes
, size
);
887 err
= sk_stream_error(sk
, flags
, err
);
889 /* make sure we wake any epoll edge trigger waiter */
890 if (unlikely(skb_queue_len(&sk
->sk_write_queue
) == 0 && err
== -EAGAIN
))
891 sk
->sk_write_space(sk
);
897 static int kcm_sendmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
)
899 struct sock
*sk
= sock
->sk
;
900 struct kcm_sock
*kcm
= kcm_sk(sk
);
901 struct sk_buff
*skb
= NULL
, *head
= NULL
;
902 size_t copy
, copied
= 0;
903 long timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
904 int eor
= (sock
->type
== SOCK_DGRAM
) ?
905 !(msg
->msg_flags
& MSG_MORE
) : !!(msg
->msg_flags
& MSG_EOR
);
910 /* Per tcp_sendmsg this should be in poll */
911 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
917 /* Previously opened message */
919 skb
= kcm_tx_msg(head
)->last_skb
;
923 /* Call the sk_stream functions to manage the sndbuf mem. */
924 if (!sk_stream_memory_free(sk
)) {
926 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
927 err
= sk_stream_wait_memory(sk
, &timeo
);
932 if (msg_data_left(msg
)) {
933 /* New message, alloc head skb */
934 head
= alloc_skb(0, sk
->sk_allocation
);
937 err
= sk_stream_wait_memory(sk
, &timeo
);
941 head
= alloc_skb(0, sk
->sk_allocation
);
946 /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
947 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
949 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
953 while (msg_data_left(msg
)) {
955 int i
= skb_shinfo(skb
)->nr_frags
;
956 struct page_frag
*pfrag
= sk_page_frag(sk
);
958 if (!sk_page_frag_refill(sk
, pfrag
))
959 goto wait_for_memory
;
961 if (!skb_can_coalesce(skb
, i
, pfrag
->page
,
963 if (i
== MAX_SKB_FRAGS
) {
964 struct sk_buff
*tskb
;
966 tskb
= alloc_skb(0, sk
->sk_allocation
);
968 goto wait_for_memory
;
971 skb_shinfo(head
)->frag_list
= tskb
;
976 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
982 copy
= min_t(int, msg_data_left(msg
),
983 pfrag
->size
- pfrag
->offset
);
985 if (!sk_wmem_schedule(sk
, copy
))
986 goto wait_for_memory
;
988 err
= skb_copy_to_page_nocache(sk
, &msg
->msg_iter
, skb
,
995 /* Update the skb. */
997 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
999 skb_fill_page_desc(skb
, i
, pfrag
->page
,
1000 pfrag
->offset
, copy
);
1001 get_page(pfrag
->page
);
1004 pfrag
->offset
+= copy
;
1008 head
->data_len
+= copy
;
1015 err
= sk_stream_wait_memory(sk
, &timeo
);
1021 bool not_busy
= skb_queue_empty(&sk
->sk_write_queue
);
1024 /* Message complete, queue it on send buffer */
1025 __skb_queue_tail(&sk
->sk_write_queue
, head
);
1026 kcm
->seq_skb
= NULL
;
1027 KCM_STATS_INCR(kcm
->stats
.tx_msgs
);
1030 if (msg
->msg_flags
& MSG_BATCH
) {
1031 kcm
->tx_wait_more
= true;
1032 } else if (kcm
->tx_wait_more
|| not_busy
) {
1033 err
= kcm_write_msgs(kcm
);
1035 /* We got a hard error in write_msgs but have
1036 * already queued this message. Report an error
1037 * in the socket, but don't affect return value
1040 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1041 report_csk_error(&kcm
->sk
, -err
);
1045 /* Message not complete, save state */
1047 kcm
->seq_skb
= head
;
1048 kcm_tx_msg(head
)->last_skb
= skb
;
1051 KCM_STATS_ADD(kcm
->stats
.tx_bytes
, copied
);
1059 if (copied
&& sock
->type
== SOCK_SEQPACKET
) {
1060 /* Wrote some bytes before encountering an
1061 * error, return partial success.
1063 goto partial_message
;
1066 if (head
!= kcm
->seq_skb
)
1069 err
= sk_stream_error(sk
, msg
->msg_flags
, err
);
1071 /* make sure we wake any epoll edge trigger waiter */
1072 if (unlikely(skb_queue_len(&sk
->sk_write_queue
) == 0 && err
== -EAGAIN
))
1073 sk
->sk_write_space(sk
);
1079 static struct sk_buff
*kcm_wait_data(struct sock
*sk
, int flags
,
1080 long timeo
, int *err
)
1082 struct sk_buff
*skb
;
1084 while (!(skb
= skb_peek(&sk
->sk_receive_queue
))) {
1086 *err
= sock_error(sk
);
1090 if (sock_flag(sk
, SOCK_DONE
))
1093 if ((flags
& MSG_DONTWAIT
) || !timeo
) {
1098 sk_wait_data(sk
, &timeo
, NULL
);
1100 /* Handle signals */
1101 if (signal_pending(current
)) {
1102 *err
= sock_intr_errno(timeo
);
1110 static int kcm_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1111 size_t len
, int flags
)
1113 struct sock
*sk
= sock
->sk
;
1114 struct kcm_sock
*kcm
= kcm_sk(sk
);
1117 struct strp_rx_msg
*rxm
;
1119 struct sk_buff
*skb
;
1121 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1125 skb
= kcm_wait_data(sk
, flags
, timeo
, &err
);
1129 /* Okay, have a message on the receive queue */
1131 rxm
= strp_rx_msg(skb
);
1133 if (len
> rxm
->full_len
)
1134 len
= rxm
->full_len
;
1136 err
= skb_copy_datagram_msg(skb
, rxm
->offset
, msg
, len
);
1141 if (likely(!(flags
& MSG_PEEK
))) {
1142 KCM_STATS_ADD(kcm
->stats
.rx_bytes
, copied
);
1143 if (copied
< rxm
->full_len
) {
1144 if (sock
->type
== SOCK_DGRAM
) {
1145 /* Truncated message */
1146 msg
->msg_flags
|= MSG_TRUNC
;
1149 rxm
->offset
+= copied
;
1150 rxm
->full_len
-= copied
;
1153 /* Finished with message */
1154 msg
->msg_flags
|= MSG_EOR
;
1155 KCM_STATS_INCR(kcm
->stats
.rx_msgs
);
1156 skb_unlink(skb
, &sk
->sk_receive_queue
);
1164 return copied
? : err
;
1167 static ssize_t
kcm_splice_read(struct socket
*sock
, loff_t
*ppos
,
1168 struct pipe_inode_info
*pipe
, size_t len
,
1171 struct sock
*sk
= sock
->sk
;
1172 struct kcm_sock
*kcm
= kcm_sk(sk
);
1174 struct strp_rx_msg
*rxm
;
1177 struct sk_buff
*skb
;
1179 /* Only support splice for SOCKSEQPACKET */
1181 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1185 skb
= kcm_wait_data(sk
, flags
, timeo
, &err
);
1189 /* Okay, have a message on the receive queue */
1191 rxm
= strp_rx_msg(skb
);
1193 if (len
> rxm
->full_len
)
1194 len
= rxm
->full_len
;
1196 copied
= skb_splice_bits(skb
, sk
, rxm
->offset
, pipe
, len
, flags
);
1202 KCM_STATS_ADD(kcm
->stats
.rx_bytes
, copied
);
1204 rxm
->offset
+= copied
;
1205 rxm
->full_len
-= copied
;
1207 /* We have no way to return MSG_EOR. If all the bytes have been
1208 * read we still leave the message in the receive socket buffer.
1209 * A subsequent recvmsg needs to be done to return MSG_EOR and
1210 * finish reading the message.
1223 /* kcm sock lock held */
1224 static void kcm_recv_disable(struct kcm_sock
*kcm
)
1226 struct kcm_mux
*mux
= kcm
->mux
;
1228 if (kcm
->rx_disabled
)
1231 spin_lock_bh(&mux
->rx_lock
);
1233 kcm
->rx_disabled
= 1;
1235 /* If a psock is reserved we'll do cleanup in unreserve */
1236 if (!kcm
->rx_psock
) {
1238 list_del(&kcm
->wait_rx_list
);
1239 kcm
->rx_wait
= false;
1242 requeue_rx_msgs(mux
, &kcm
->sk
.sk_receive_queue
);
1245 spin_unlock_bh(&mux
->rx_lock
);
1248 /* kcm sock lock held */
1249 static void kcm_recv_enable(struct kcm_sock
*kcm
)
1251 struct kcm_mux
*mux
= kcm
->mux
;
1253 if (!kcm
->rx_disabled
)
1256 spin_lock_bh(&mux
->rx_lock
);
1258 kcm
->rx_disabled
= 0;
1261 spin_unlock_bh(&mux
->rx_lock
);
1264 static int kcm_setsockopt(struct socket
*sock
, int level
, int optname
,
1265 char __user
*optval
, unsigned int optlen
)
1267 struct kcm_sock
*kcm
= kcm_sk(sock
->sk
);
1271 if (level
!= SOL_KCM
)
1272 return -ENOPROTOOPT
;
1274 if (optlen
< sizeof(int))
1277 if (get_user(val
, (int __user
*)optval
))
1280 valbool
= val
? 1 : 0;
1283 case KCM_RECV_DISABLE
:
1284 lock_sock(&kcm
->sk
);
1286 kcm_recv_disable(kcm
);
1288 kcm_recv_enable(kcm
);
1289 release_sock(&kcm
->sk
);
1298 static int kcm_getsockopt(struct socket
*sock
, int level
, int optname
,
1299 char __user
*optval
, int __user
*optlen
)
1301 struct kcm_sock
*kcm
= kcm_sk(sock
->sk
);
1304 if (level
!= SOL_KCM
)
1305 return -ENOPROTOOPT
;
1307 if (get_user(len
, optlen
))
1310 len
= min_t(unsigned int, len
, sizeof(int));
1315 case KCM_RECV_DISABLE
:
1316 val
= kcm
->rx_disabled
;
1319 return -ENOPROTOOPT
;
1322 if (put_user(len
, optlen
))
1324 if (copy_to_user(optval
, &val
, len
))
1329 static void init_kcm_sock(struct kcm_sock
*kcm
, struct kcm_mux
*mux
)
1331 struct kcm_sock
*tkcm
;
1332 struct list_head
*head
;
1335 /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1336 * we set sk_state, otherwise epoll_wait always returns right away with
1339 kcm
->sk
.sk_state
= TCP_ESTABLISHED
;
1341 /* Add to mux's kcm sockets list */
1343 spin_lock_bh(&mux
->lock
);
1345 head
= &mux
->kcm_socks
;
1346 list_for_each_entry(tkcm
, &mux
->kcm_socks
, kcm_sock_list
) {
1347 if (tkcm
->index
!= index
)
1349 head
= &tkcm
->kcm_sock_list
;
1353 list_add(&kcm
->kcm_sock_list
, head
);
1356 mux
->kcm_socks_cnt
++;
1357 spin_unlock_bh(&mux
->lock
);
1359 INIT_WORK(&kcm
->tx_work
, kcm_tx_work
);
1361 spin_lock_bh(&mux
->rx_lock
);
1363 spin_unlock_bh(&mux
->rx_lock
);
1366 static int kcm_attach(struct socket
*sock
, struct socket
*csock
,
1367 struct bpf_prog
*prog
)
1369 struct kcm_sock
*kcm
= kcm_sk(sock
->sk
);
1370 struct kcm_mux
*mux
= kcm
->mux
;
1372 struct kcm_psock
*psock
= NULL
, *tpsock
;
1373 struct list_head
*head
;
1375 struct strp_callbacks cb
;
1382 psock
= kmem_cache_zalloc(kcm_psockp
, GFP_KERNEL
);
1388 psock
->bpf_prog
= prog
;
1390 cb
.rcv_msg
= kcm_rcv_strparser
;
1391 cb
.abort_parser
= NULL
;
1392 cb
.parse_msg
= kcm_parse_func_strparser
;
1393 cb
.read_sock_done
= kcm_read_sock_done
;
1395 err
= strp_init(&psock
->strp
, csk
, &cb
);
1397 kmem_cache_free(kcm_psockp
, psock
);
1403 write_lock_bh(&csk
->sk_callback_lock
);
1404 psock
->save_data_ready
= csk
->sk_data_ready
;
1405 psock
->save_write_space
= csk
->sk_write_space
;
1406 psock
->save_state_change
= csk
->sk_state_change
;
1407 csk
->sk_user_data
= psock
;
1408 csk
->sk_data_ready
= psock_data_ready
;
1409 csk
->sk_write_space
= psock_write_space
;
1410 csk
->sk_state_change
= psock_state_change
;
1411 write_unlock_bh(&csk
->sk_callback_lock
);
1413 /* Finished initialization, now add the psock to the MUX. */
1414 spin_lock_bh(&mux
->lock
);
1415 head
= &mux
->psocks
;
1416 list_for_each_entry(tpsock
, &mux
->psocks
, psock_list
) {
1417 if (tpsock
->index
!= index
)
1419 head
= &tpsock
->psock_list
;
1423 list_add(&psock
->psock_list
, head
);
1424 psock
->index
= index
;
1426 KCM_STATS_INCR(mux
->stats
.psock_attach
);
1428 psock_now_avail(psock
);
1429 spin_unlock_bh(&mux
->lock
);
1431 /* Schedule RX work in case there are already bytes queued */
1432 strp_check_rcv(&psock
->strp
);
1437 static int kcm_attach_ioctl(struct socket
*sock
, struct kcm_attach
*info
)
1439 struct socket
*csock
;
1440 struct bpf_prog
*prog
;
1443 csock
= sockfd_lookup(info
->fd
, &err
);
1447 prog
= bpf_prog_get_type(info
->bpf_fd
, BPF_PROG_TYPE_SOCKET_FILTER
);
1449 err
= PTR_ERR(prog
);
1453 err
= kcm_attach(sock
, csock
, prog
);
1459 /* Keep reference on file also */
1467 static void kcm_unattach(struct kcm_psock
*psock
)
1469 struct sock
*csk
= psock
->sk
;
1470 struct kcm_mux
*mux
= psock
->mux
;
1474 /* Stop getting callbacks from TCP socket. After this there should
1475 * be no way to reserve a kcm for this psock.
1477 write_lock_bh(&csk
->sk_callback_lock
);
1478 csk
->sk_user_data
= NULL
;
1479 csk
->sk_data_ready
= psock
->save_data_ready
;
1480 csk
->sk_write_space
= psock
->save_write_space
;
1481 csk
->sk_state_change
= psock
->save_state_change
;
1482 strp_stop(&psock
->strp
);
1484 if (WARN_ON(psock
->rx_kcm
)) {
1485 write_unlock_bh(&csk
->sk_callback_lock
);
1489 spin_lock_bh(&mux
->rx_lock
);
1491 /* Stop receiver activities. After this point psock should not be
1492 * able to get onto ready list either through callbacks or work.
1494 if (psock
->ready_rx_msg
) {
1495 list_del(&psock
->psock_ready_list
);
1496 kfree_skb(psock
->ready_rx_msg
);
1497 psock
->ready_rx_msg
= NULL
;
1498 KCM_STATS_INCR(mux
->stats
.rx_ready_drops
);
1501 spin_unlock_bh(&mux
->rx_lock
);
1503 write_unlock_bh(&csk
->sk_callback_lock
);
1505 /* Call strp_done without sock lock */
1507 strp_done(&psock
->strp
);
1510 bpf_prog_put(psock
->bpf_prog
);
1512 spin_lock_bh(&mux
->lock
);
1514 aggregate_psock_stats(&psock
->stats
, &mux
->aggregate_psock_stats
);
1515 save_strp_stats(&psock
->strp
, &mux
->aggregate_strp_stats
);
1517 KCM_STATS_INCR(mux
->stats
.psock_unattach
);
1519 if (psock
->tx_kcm
) {
1520 /* psock was reserved. Just mark it finished and we will clean
1521 * up in the kcm paths, we need kcm lock which can not be
1524 KCM_STATS_INCR(mux
->stats
.psock_unattach_rsvd
);
1525 spin_unlock_bh(&mux
->lock
);
1527 /* We are unattaching a socket that is reserved. Abort the
1528 * socket since we may be out of sync in sending on it. We need
1529 * to do this without the mux lock.
1531 kcm_abort_tx_psock(psock
, EPIPE
, false);
1533 spin_lock_bh(&mux
->lock
);
1534 if (!psock
->tx_kcm
) {
1535 /* psock now unreserved in window mux was unlocked */
1540 /* Commit done before queuing work to process it */
1543 /* Queue tx work to make sure psock->done is handled */
1544 queue_work(kcm_wq
, &psock
->tx_kcm
->tx_work
);
1545 spin_unlock_bh(&mux
->lock
);
1548 if (!psock
->tx_stopped
)
1549 list_del(&psock
->psock_avail_list
);
1550 list_del(&psock
->psock_list
);
1552 spin_unlock_bh(&mux
->lock
);
1555 fput(csk
->sk_socket
->file
);
1556 kmem_cache_free(kcm_psockp
, psock
);
1562 static int kcm_unattach_ioctl(struct socket
*sock
, struct kcm_unattach
*info
)
1564 struct kcm_sock
*kcm
= kcm_sk(sock
->sk
);
1565 struct kcm_mux
*mux
= kcm
->mux
;
1566 struct kcm_psock
*psock
;
1567 struct socket
*csock
;
1571 csock
= sockfd_lookup(info
->fd
, &err
);
1583 spin_lock_bh(&mux
->lock
);
1585 list_for_each_entry(psock
, &mux
->psocks
, psock_list
) {
1586 if (psock
->sk
!= csk
)
1589 /* Found the matching psock */
1591 if (psock
->unattaching
|| WARN_ON(psock
->done
)) {
1596 psock
->unattaching
= 1;
1598 spin_unlock_bh(&mux
->lock
);
1600 /* Lower socket lock should already be held */
1601 kcm_unattach(psock
);
1607 spin_unlock_bh(&mux
->lock
);
1614 static struct proto kcm_proto
= {
1616 .owner
= THIS_MODULE
,
1617 .obj_size
= sizeof(struct kcm_sock
),
1620 /* Clone a kcm socket. */
1621 static int kcm_clone(struct socket
*osock
, struct kcm_clone
*info
,
1622 struct socket
**newsockp
)
1624 struct socket
*newsock
;
1626 struct file
*newfile
;
1630 newsock
= sock_alloc();
1634 newsock
->type
= osock
->type
;
1635 newsock
->ops
= osock
->ops
;
1637 __module_get(newsock
->ops
->owner
);
1639 newfd
= get_unused_fd_flags(0);
1640 if (unlikely(newfd
< 0)) {
1645 newfile
= sock_alloc_file(newsock
, 0, osock
->sk
->sk_prot_creator
->name
);
1646 if (unlikely(IS_ERR(newfile
))) {
1647 err
= PTR_ERR(newfile
);
1648 goto out_sock_alloc_fail
;
1651 newsk
= sk_alloc(sock_net(osock
->sk
), PF_KCM
, GFP_KERNEL
,
1655 goto out_sk_alloc_fail
;
1658 sock_init_data(newsock
, newsk
);
1659 init_kcm_sock(kcm_sk(newsk
), kcm_sk(osock
->sk
)->mux
);
1661 fd_install(newfd
, newfile
);
1662 *newsockp
= newsock
;
1669 out_sock_alloc_fail
:
1670 put_unused_fd(newfd
);
1672 sock_release(newsock
);
1677 static int kcm_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
1682 case SIOCKCMATTACH
: {
1683 struct kcm_attach info
;
1685 if (copy_from_user(&info
, (void __user
*)arg
, sizeof(info
)))
1688 err
= kcm_attach_ioctl(sock
, &info
);
1692 case SIOCKCMUNATTACH
: {
1693 struct kcm_unattach info
;
1695 if (copy_from_user(&info
, (void __user
*)arg
, sizeof(info
)))
1698 err
= kcm_unattach_ioctl(sock
, &info
);
1702 case SIOCKCMCLONE
: {
1703 struct kcm_clone info
;
1704 struct socket
*newsock
= NULL
;
1706 if (copy_from_user(&info
, (void __user
*)arg
, sizeof(info
)))
1709 err
= kcm_clone(sock
, &info
, &newsock
);
1712 if (copy_to_user((void __user
*)arg
, &info
,
1729 static void free_mux(struct rcu_head
*rcu
)
1731 struct kcm_mux
*mux
= container_of(rcu
,
1732 struct kcm_mux
, rcu
);
1734 kmem_cache_free(kcm_muxp
, mux
);
1737 static void release_mux(struct kcm_mux
*mux
)
1739 struct kcm_net
*knet
= mux
->knet
;
1740 struct kcm_psock
*psock
, *tmp_psock
;
1742 /* Release psocks */
1743 list_for_each_entry_safe(psock
, tmp_psock
,
1744 &mux
->psocks
, psock_list
) {
1745 if (!WARN_ON(psock
->unattaching
))
1746 kcm_unattach(psock
);
1749 if (WARN_ON(mux
->psocks_cnt
))
1752 __skb_queue_purge(&mux
->rx_hold_queue
);
1754 mutex_lock(&knet
->mutex
);
1755 aggregate_mux_stats(&mux
->stats
, &knet
->aggregate_mux_stats
);
1756 aggregate_psock_stats(&mux
->aggregate_psock_stats
,
1757 &knet
->aggregate_psock_stats
);
1758 aggregate_strp_stats(&mux
->aggregate_strp_stats
,
1759 &knet
->aggregate_strp_stats
);
1760 list_del_rcu(&mux
->kcm_mux_list
);
1762 mutex_unlock(&knet
->mutex
);
1764 call_rcu(&mux
->rcu
, free_mux
);
1767 static void kcm_done(struct kcm_sock
*kcm
)
1769 struct kcm_mux
*mux
= kcm
->mux
;
1770 struct sock
*sk
= &kcm
->sk
;
1773 spin_lock_bh(&mux
->rx_lock
);
1774 if (kcm
->rx_psock
) {
1775 /* Cleanup in unreserve_rx_kcm */
1777 kcm
->rx_disabled
= 1;
1779 spin_unlock_bh(&mux
->rx_lock
);
1784 list_del(&kcm
->wait_rx_list
);
1785 kcm
->rx_wait
= false;
1787 /* Move any pending receive messages to other kcm sockets */
1788 requeue_rx_msgs(mux
, &sk
->sk_receive_queue
);
1790 spin_unlock_bh(&mux
->rx_lock
);
1792 if (WARN_ON(sk_rmem_alloc_get(sk
)))
1795 /* Detach from MUX */
1796 spin_lock_bh(&mux
->lock
);
1798 list_del(&kcm
->kcm_sock_list
);
1799 mux
->kcm_socks_cnt
--;
1800 socks_cnt
= mux
->kcm_socks_cnt
;
1802 spin_unlock_bh(&mux
->lock
);
1805 /* We are done with the mux now. */
1809 WARN_ON(kcm
->rx_wait
);
1814 /* Called by kcm_release to close a KCM socket.
1815 * If this is the last KCM socket on the MUX, destroy the MUX.
1817 static int kcm_release(struct socket
*sock
)
1819 struct sock
*sk
= sock
->sk
;
1820 struct kcm_sock
*kcm
;
1821 struct kcm_mux
*mux
;
1822 struct kcm_psock
*psock
;
1831 kfree_skb(kcm
->seq_skb
);
1834 /* Purge queue under lock to avoid race condition with tx_work trying
1835 * to act when queue is nonempty. If tx_work runs after this point
1836 * it will just return.
1838 __skb_queue_purge(&sk
->sk_write_queue
);
1840 /* Set tx_stopped. This is checked when psock is bound to a kcm and we
1841 * get a writespace callback. This prevents further work being queued
1842 * from the callback (unbinding the psock occurs after canceling work.
1844 kcm
->tx_stopped
= 1;
1848 spin_lock_bh(&mux
->lock
);
1850 /* Take of tx_wait list, after this point there should be no way
1851 * that a psock will be assigned to this kcm.
1853 list_del(&kcm
->wait_psock_list
);
1854 kcm
->tx_wait
= false;
1856 spin_unlock_bh(&mux
->lock
);
1858 /* Cancel work. After this point there should be no outside references
1859 * to the kcm socket.
1861 cancel_work_sync(&kcm
->tx_work
);
1864 psock
= kcm
->tx_psock
;
1866 /* A psock was reserved, so we need to kill it since it
1867 * may already have some bytes queued from a message. We
1868 * need to do this after removing kcm from tx_wait list.
1870 kcm_abort_tx_psock(psock
, EPIPE
, false);
1871 unreserve_psock(kcm
);
1875 WARN_ON(kcm
->tx_wait
);
1876 WARN_ON(kcm
->tx_psock
);
1885 static const struct proto_ops kcm_dgram_ops
= {
1887 .owner
= THIS_MODULE
,
1888 .release
= kcm_release
,
1889 .bind
= sock_no_bind
,
1890 .connect
= sock_no_connect
,
1891 .socketpair
= sock_no_socketpair
,
1892 .accept
= sock_no_accept
,
1893 .getname
= sock_no_getname
,
1894 .poll
= datagram_poll
,
1896 .listen
= sock_no_listen
,
1897 .shutdown
= sock_no_shutdown
,
1898 .setsockopt
= kcm_setsockopt
,
1899 .getsockopt
= kcm_getsockopt
,
1900 .sendmsg
= kcm_sendmsg
,
1901 .recvmsg
= kcm_recvmsg
,
1902 .mmap
= sock_no_mmap
,
1903 .sendpage
= kcm_sendpage
,
1906 static const struct proto_ops kcm_seqpacket_ops
= {
1908 .owner
= THIS_MODULE
,
1909 .release
= kcm_release
,
1910 .bind
= sock_no_bind
,
1911 .connect
= sock_no_connect
,
1912 .socketpair
= sock_no_socketpair
,
1913 .accept
= sock_no_accept
,
1914 .getname
= sock_no_getname
,
1915 .poll
= datagram_poll
,
1917 .listen
= sock_no_listen
,
1918 .shutdown
= sock_no_shutdown
,
1919 .setsockopt
= kcm_setsockopt
,
1920 .getsockopt
= kcm_getsockopt
,
1921 .sendmsg
= kcm_sendmsg
,
1922 .recvmsg
= kcm_recvmsg
,
1923 .mmap
= sock_no_mmap
,
1924 .sendpage
= kcm_sendpage
,
1925 .splice_read
= kcm_splice_read
,
1928 /* Create proto operation for kcm sockets */
1929 static int kcm_create(struct net
*net
, struct socket
*sock
,
1930 int protocol
, int kern
)
1932 struct kcm_net
*knet
= net_generic(net
, kcm_net_id
);
1934 struct kcm_mux
*mux
;
1936 switch (sock
->type
) {
1938 sock
->ops
= &kcm_dgram_ops
;
1940 case SOCK_SEQPACKET
:
1941 sock
->ops
= &kcm_seqpacket_ops
;
1944 return -ESOCKTNOSUPPORT
;
1947 if (protocol
!= KCMPROTO_CONNECTED
)
1948 return -EPROTONOSUPPORT
;
1950 sk
= sk_alloc(net
, PF_KCM
, GFP_KERNEL
, &kcm_proto
, kern
);
1954 /* Allocate a kcm mux, shared between KCM sockets */
1955 mux
= kmem_cache_zalloc(kcm_muxp
, GFP_KERNEL
);
1961 spin_lock_init(&mux
->lock
);
1962 spin_lock_init(&mux
->rx_lock
);
1963 INIT_LIST_HEAD(&mux
->kcm_socks
);
1964 INIT_LIST_HEAD(&mux
->kcm_rx_waiters
);
1965 INIT_LIST_HEAD(&mux
->kcm_tx_waiters
);
1967 INIT_LIST_HEAD(&mux
->psocks
);
1968 INIT_LIST_HEAD(&mux
->psocks_ready
);
1969 INIT_LIST_HEAD(&mux
->psocks_avail
);
1973 /* Add new MUX to list */
1974 mutex_lock(&knet
->mutex
);
1975 list_add_rcu(&mux
->kcm_mux_list
, &knet
->mux_list
);
1977 mutex_unlock(&knet
->mutex
);
1979 skb_queue_head_init(&mux
->rx_hold_queue
);
1981 /* Init KCM socket */
1982 sock_init_data(sock
, sk
);
1983 init_kcm_sock(kcm_sk(sk
), mux
);
1988 static struct net_proto_family kcm_family_ops
= {
1990 .create
= kcm_create
,
1991 .owner
= THIS_MODULE
,
1994 static __net_init
int kcm_init_net(struct net
*net
)
1996 struct kcm_net
*knet
= net_generic(net
, kcm_net_id
);
1998 INIT_LIST_HEAD_RCU(&knet
->mux_list
);
1999 mutex_init(&knet
->mutex
);
2004 static __net_exit
void kcm_exit_net(struct net
*net
)
2006 struct kcm_net
*knet
= net_generic(net
, kcm_net_id
);
2008 /* All KCM sockets should be closed at this point, which should mean
2009 * that all multiplexors and psocks have been destroyed.
2011 WARN_ON(!list_empty(&knet
->mux_list
));
2014 static struct pernet_operations kcm_net_ops
= {
2015 .init
= kcm_init_net
,
2016 .exit
= kcm_exit_net
,
2018 .size
= sizeof(struct kcm_net
),
2021 static int __init
kcm_init(void)
2025 kcm_muxp
= kmem_cache_create("kcm_mux_cache",
2026 sizeof(struct kcm_mux
), 0,
2027 SLAB_HWCACHE_ALIGN
| SLAB_PANIC
, NULL
);
2031 kcm_psockp
= kmem_cache_create("kcm_psock_cache",
2032 sizeof(struct kcm_psock
), 0,
2033 SLAB_HWCACHE_ALIGN
| SLAB_PANIC
, NULL
);
2037 kcm_wq
= create_singlethread_workqueue("kkcmd");
2041 err
= proto_register(&kcm_proto
, 1);
2045 err
= sock_register(&kcm_family_ops
);
2047 goto sock_register_fail
;
2049 err
= register_pernet_device(&kcm_net_ops
);
2053 err
= kcm_proc_init();
2055 goto proc_init_fail
;
2060 unregister_pernet_device(&kcm_net_ops
);
2063 sock_unregister(PF_KCM
);
2066 proto_unregister(&kcm_proto
);
2069 kmem_cache_destroy(kcm_muxp
);
2070 kmem_cache_destroy(kcm_psockp
);
2073 destroy_workqueue(kcm_wq
);
2078 static void __exit
kcm_exit(void)
2081 unregister_pernet_device(&kcm_net_ops
);
2082 sock_unregister(PF_KCM
);
2083 proto_unregister(&kcm_proto
);
2084 destroy_workqueue(kcm_wq
);
2086 kmem_cache_destroy(kcm_muxp
);
2087 kmem_cache_destroy(kcm_psockp
);
2090 module_init(kcm_init
);
2091 module_exit(kcm_exit
);
2093 MODULE_LICENSE("GPL");
2094 MODULE_ALIAS_NETPROTO(PF_KCM
);