1 // SPDX-License-Identifier: GPL-2.0
5 * Generic datagram handling routines. These are generic for all
6 * protocols. Possibly a generic IP version on top of these would
7 * make sense. Not tonight however 8-).
8 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
9 * NetROM layer all have identical poll code and mostly
10 * identical recvmsg() code. So we share it here. The poll was
11 * shared before but buried in udp.c so I moved it.
13 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
17 * Alan Cox : NULL return from skb_peek_copy()
19 * Alan Cox : Rewrote skb_read_datagram to avoid the
20 * skb_peek_copy stuff.
21 * Alan Cox : Added support for SOCK_SEQPACKET.
22 * IPX can no longer use the SO_TYPE hack
23 * but AX.25 now works right, and SPX is
25 * Alan Cox : Fixed write poll of non IP protocol
27 * Florian La Roche: Changed for my new skbuff handling.
28 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
29 * Linus Torvalds : BSD semantic fixes.
30 * Alan Cox : Datagram iovec handling
31 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
32 * Alan Cox : POSIXisms
33 * Pete Wyckoff : Unconnected accept() fix.
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/uaccess.h>
42 #include <linux/interrupt.h>
43 #include <linux/errno.h>
44 #include <linux/sched.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/poll.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
51 #include <linux/slab.h>
52 #include <linux/pagemap.h>
53 #include <linux/uio.h>
54 #include <linux/indirect_call_wrapper.h>
56 #include <net/protocol.h>
57 #include <linux/skbuff.h>
59 #include <net/checksum.h>
61 #include <net/tcp_states.h>
62 #include <trace/events/skb.h>
63 #include <net/busy_poll.h>
68 * Is a socket 'connection oriented' ?
70 static inline int connection_based(struct sock
*sk
)
72 return sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
;
75 static int receiver_wake_function(wait_queue_entry_t
*wait
, unsigned int mode
, int sync
,
79 * Avoid a wakeup if event not interesting for us
81 if (key
&& !(key_to_poll(key
) & (EPOLLIN
| EPOLLERR
)))
83 return autoremove_wake_function(wait
, mode
, sync
, key
);
86 * Wait for the last received packet to be different from skb
88 int __skb_wait_for_more_packets(struct sock
*sk
, struct sk_buff_head
*queue
,
89 int *err
, long *timeo_p
,
90 const struct sk_buff
*skb
)
93 DEFINE_WAIT_FUNC(wait
, receiver_wake_function
);
95 prepare_to_wait_exclusive(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
98 error
= sock_error(sk
);
102 if (READ_ONCE(queue
->prev
) != skb
)
105 /* Socket shut down? */
106 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
109 /* Sequenced packets can come disconnected.
110 * If so we report the problem
113 if (connection_based(sk
) &&
114 !(sk
->sk_state
== TCP_ESTABLISHED
|| sk
->sk_state
== TCP_LISTEN
))
118 if (signal_pending(current
))
122 *timeo_p
= schedule_timeout(*timeo_p
);
124 finish_wait(sk_sleep(sk
), &wait
);
127 error
= sock_intr_errno(*timeo_p
);
136 EXPORT_SYMBOL(__skb_wait_for_more_packets
);
138 static struct sk_buff
*skb_set_peeked(struct sk_buff
*skb
)
140 struct sk_buff
*nskb
;
145 /* We have to unshare an skb before modifying it. */
146 if (!skb_shared(skb
))
149 nskb
= skb_clone(skb
, GFP_ATOMIC
);
151 return ERR_PTR(-ENOMEM
);
153 skb
->prev
->next
= nskb
;
154 skb
->next
->prev
= nskb
;
155 nskb
->prev
= skb
->prev
;
156 nskb
->next
= skb
->next
;
167 struct sk_buff
*__skb_try_recv_from_queue(struct sock
*sk
,
168 struct sk_buff_head
*queue
,
171 struct sk_buff
**last
)
173 bool peek_at_off
= false;
177 if (unlikely(flags
& MSG_PEEK
&& *off
>= 0)) {
183 skb_queue_walk(queue
, skb
) {
184 if (flags
& MSG_PEEK
) {
185 if (peek_at_off
&& _off
>= skb
->len
&&
186 (_off
|| skb
->peeked
)) {
191 skb
= skb_set_peeked(skb
);
197 refcount_inc(&skb
->users
);
199 __skb_unlink(skb
, queue
);
208 * __skb_try_recv_datagram - Receive a datagram skbuff
210 * @queue: socket queue from which to receive
211 * @flags: MSG\_ flags
212 * @off: an offset in bytes to peek skb from. Returns an offset
213 * within an skb where data actually starts
214 * @err: error code returned
215 * @last: set to last peeked message to inform the wait function
216 * what to look for when peeking
218 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
219 * and possible races. This replaces identical code in packet, raw and
220 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
221 * the long standing peek and read race for datagram sockets. If you
222 * alter this routine remember it must be re-entrant.
224 * This function will lock the socket if a skb is returned, so
225 * the caller needs to unlock the socket in that case (usually by
226 * calling skb_free_datagram). Returns NULL with @err set to
227 * -EAGAIN if no data was available or to some other value if an
228 * error was detected.
230 * * It does not lock socket since today. This function is
231 * * free of race conditions. This measure should/can improve
232 * * significantly datagram socket latencies at high loads,
233 * * when data copying to user space takes lots of time.
234 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
238 * The order of the tests when we find no data waiting are specified
239 * quite explicitly by POSIX 1003.1g, don't change them without having
240 * the standard around please.
242 struct sk_buff
*__skb_try_recv_datagram(struct sock
*sk
,
243 struct sk_buff_head
*queue
,
244 unsigned int flags
, int *off
, int *err
,
245 struct sk_buff
**last
)
248 unsigned long cpu_flags
;
250 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
252 int error
= sock_error(sk
);
258 /* Again only user level code calls this function, so nothing
259 * interrupt level will suddenly eat the receive_queue.
261 * Look at current nfs client by the way...
262 * However, this function was correct in any case. 8)
264 spin_lock_irqsave(&queue
->lock
, cpu_flags
);
265 skb
= __skb_try_recv_from_queue(sk
, queue
, flags
, off
, &error
,
267 spin_unlock_irqrestore(&queue
->lock
, cpu_flags
);
273 if (!sk_can_busy_loop(sk
))
276 sk_busy_loop(sk
, flags
& MSG_DONTWAIT
);
277 } while (READ_ONCE(queue
->prev
) != *last
);
285 EXPORT_SYMBOL(__skb_try_recv_datagram
);
287 struct sk_buff
*__skb_recv_datagram(struct sock
*sk
,
288 struct sk_buff_head
*sk_queue
,
289 unsigned int flags
, int *off
, int *err
)
291 struct sk_buff
*skb
, *last
;
294 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
297 skb
= __skb_try_recv_datagram(sk
, sk_queue
, flags
, off
, err
,
305 !__skb_wait_for_more_packets(sk
, sk_queue
, err
,
310 EXPORT_SYMBOL(__skb_recv_datagram
);
312 struct sk_buff
*skb_recv_datagram(struct sock
*sk
, unsigned int flags
,
313 int noblock
, int *err
)
317 return __skb_recv_datagram(sk
, &sk
->sk_receive_queue
,
318 flags
| (noblock
? MSG_DONTWAIT
: 0),
321 EXPORT_SYMBOL(skb_recv_datagram
);
323 void skb_free_datagram(struct sock
*sk
, struct sk_buff
*skb
)
326 sk_mem_reclaim_partial(sk
);
328 EXPORT_SYMBOL(skb_free_datagram
);
330 void __skb_free_datagram_locked(struct sock
*sk
, struct sk_buff
*skb
, int len
)
334 if (!skb_unref(skb
)) {
335 sk_peek_offset_bwd(sk
, len
);
339 slow
= lock_sock_fast(sk
);
340 sk_peek_offset_bwd(sk
, len
);
342 sk_mem_reclaim_partial(sk
);
343 unlock_sock_fast(sk
, slow
);
345 /* skb is now orphaned, can be freed outside of locked section */
348 EXPORT_SYMBOL(__skb_free_datagram_locked
);
350 int __sk_queue_drop_skb(struct sock
*sk
, struct sk_buff_head
*sk_queue
,
351 struct sk_buff
*skb
, unsigned int flags
,
352 void (*destructor
)(struct sock
*sk
,
353 struct sk_buff
*skb
))
357 if (flags
& MSG_PEEK
) {
359 spin_lock_bh(&sk_queue
->lock
);
361 __skb_unlink(skb
, sk_queue
);
362 refcount_dec(&skb
->users
);
367 spin_unlock_bh(&sk_queue
->lock
);
370 atomic_inc(&sk
->sk_drops
);
373 EXPORT_SYMBOL(__sk_queue_drop_skb
);
376 * skb_kill_datagram - Free a datagram skbuff forcibly
378 * @skb: datagram skbuff
379 * @flags: MSG\_ flags
381 * This function frees a datagram skbuff that was received by
382 * skb_recv_datagram. The flags argument must match the one
383 * used for skb_recv_datagram.
385 * If the MSG_PEEK flag is set, and the packet is still on the
386 * receive queue of the socket, it will be taken off the queue
387 * before it is freed.
389 * This function currently only disables BH when acquiring the
390 * sk_receive_queue lock. Therefore it must not be used in a
391 * context where that lock is acquired in an IRQ context.
393 * It returns 0 if the packet was removed by us.
396 int skb_kill_datagram(struct sock
*sk
, struct sk_buff
*skb
, unsigned int flags
)
398 int err
= __sk_queue_drop_skb(sk
, &sk
->sk_receive_queue
, skb
, flags
,
402 sk_mem_reclaim_partial(sk
);
405 EXPORT_SYMBOL(skb_kill_datagram
);
407 INDIRECT_CALLABLE_DECLARE(static size_t simple_copy_to_iter(const void *addr
,
409 void *data __always_unused
,
410 struct iov_iter
*i
));
412 static int __skb_datagram_iter(const struct sk_buff
*skb
, int offset
,
413 struct iov_iter
*to
, int len
, bool fault_short
,
414 size_t (*cb
)(const void *, size_t, void *,
415 struct iov_iter
*), void *data
)
417 int start
= skb_headlen(skb
);
418 int i
, copy
= start
- offset
, start_off
= offset
, n
;
419 struct sk_buff
*frag_iter
;
425 n
= INDIRECT_CALL_1(cb
, simple_copy_to_iter
,
426 skb
->data
+ offset
, copy
, data
, to
);
430 if ((len
-= copy
) == 0)
434 /* Copy paged appendix. Hmm... why does this look so complicated? */
435 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
437 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
439 WARN_ON(start
> offset
+ len
);
441 end
= start
+ skb_frag_size(frag
);
442 if ((copy
= end
- offset
) > 0) {
443 struct page
*page
= skb_frag_page(frag
);
444 u8
*vaddr
= kmap(page
);
448 n
= INDIRECT_CALL_1(cb
, simple_copy_to_iter
,
449 vaddr
+ skb_frag_off(frag
) + offset
- start
,
461 skb_walk_frags(skb
, frag_iter
) {
464 WARN_ON(start
> offset
+ len
);
466 end
= start
+ frag_iter
->len
;
467 if ((copy
= end
- offset
) > 0) {
470 if (__skb_datagram_iter(frag_iter
, offset
- start
,
471 to
, copy
, fault_short
, cb
, data
))
473 if ((len
-= copy
) == 0)
482 /* This is not really a user copy fault, but rather someone
483 * gave us a bogus length on the skb. We should probably
484 * print a warning here as it may indicate a kernel bug.
488 iov_iter_revert(to
, offset
- start_off
);
492 if (fault_short
|| iov_iter_count(to
))
499 * skb_copy_and_hash_datagram_iter - Copy datagram to an iovec iterator
501 * @skb: buffer to copy
502 * @offset: offset in the buffer to start copying from
503 * @to: iovec iterator to copy to
504 * @len: amount of data to copy from buffer to iovec
505 * @hash: hash request to update
507 int skb_copy_and_hash_datagram_iter(const struct sk_buff
*skb
, int offset
,
508 struct iov_iter
*to
, int len
,
509 struct ahash_request
*hash
)
511 return __skb_datagram_iter(skb
, offset
, to
, len
, true,
512 hash_and_copy_to_iter
, hash
);
514 EXPORT_SYMBOL(skb_copy_and_hash_datagram_iter
);
516 static size_t simple_copy_to_iter(const void *addr
, size_t bytes
,
517 void *data __always_unused
, struct iov_iter
*i
)
519 return copy_to_iter(addr
, bytes
, i
);
523 * skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
524 * @skb: buffer to copy
525 * @offset: offset in the buffer to start copying from
526 * @to: iovec iterator to copy to
527 * @len: amount of data to copy from buffer to iovec
529 int skb_copy_datagram_iter(const struct sk_buff
*skb
, int offset
,
530 struct iov_iter
*to
, int len
)
532 trace_skb_copy_datagram_iovec(skb
, len
);
533 return __skb_datagram_iter(skb
, offset
, to
, len
, false,
534 simple_copy_to_iter
, NULL
);
536 EXPORT_SYMBOL(skb_copy_datagram_iter
);
539 * skb_copy_datagram_from_iter - Copy a datagram from an iov_iter.
540 * @skb: buffer to copy
541 * @offset: offset in the buffer to start copying to
542 * @from: the copy source
543 * @len: amount of data to copy to buffer from iovec
545 * Returns 0 or -EFAULT.
547 int skb_copy_datagram_from_iter(struct sk_buff
*skb
, int offset
,
548 struct iov_iter
*from
,
551 int start
= skb_headlen(skb
);
552 int i
, copy
= start
- offset
;
553 struct sk_buff
*frag_iter
;
559 if (copy_from_iter(skb
->data
+ offset
, copy
, from
) != copy
)
561 if ((len
-= copy
) == 0)
566 /* Copy paged appendix. Hmm... why does this look so complicated? */
567 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
569 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
571 WARN_ON(start
> offset
+ len
);
573 end
= start
+ skb_frag_size(frag
);
574 if ((copy
= end
- offset
) > 0) {
579 copied
= copy_page_from_iter(skb_frag_page(frag
),
580 skb_frag_off(frag
) + offset
- start
,
592 skb_walk_frags(skb
, frag_iter
) {
595 WARN_ON(start
> offset
+ len
);
597 end
= start
+ frag_iter
->len
;
598 if ((copy
= end
- offset
) > 0) {
601 if (skb_copy_datagram_from_iter(frag_iter
,
605 if ((len
-= copy
) == 0)
617 EXPORT_SYMBOL(skb_copy_datagram_from_iter
);
619 int __zerocopy_sg_from_iter(struct sock
*sk
, struct sk_buff
*skb
,
620 struct iov_iter
*from
, size_t length
)
622 int frag
= skb_shinfo(skb
)->nr_frags
;
624 while (length
&& iov_iter_count(from
)) {
625 struct page
*pages
[MAX_SKB_FRAGS
];
626 struct page
*last_head
= NULL
;
629 unsigned long truesize
;
632 if (frag
== MAX_SKB_FRAGS
)
635 copied
= iov_iter_get_pages(from
, pages
, length
,
636 MAX_SKB_FRAGS
- frag
, &start
);
640 iov_iter_advance(from
, copied
);
643 truesize
= PAGE_ALIGN(copied
+ start
);
644 skb
->data_len
+= copied
;
646 skb
->truesize
+= truesize
;
647 if (sk
&& sk
->sk_type
== SOCK_STREAM
) {
648 sk_wmem_queued_add(sk
, truesize
);
649 sk_mem_charge(sk
, truesize
);
651 refcount_add(truesize
, &skb
->sk
->sk_wmem_alloc
);
653 for (refs
= 0; copied
!= 0; start
= 0) {
654 int size
= min_t(int, copied
, PAGE_SIZE
- start
);
655 struct page
*head
= compound_head(pages
[n
]);
657 start
+= (pages
[n
] - head
) << PAGE_SHIFT
;
661 skb_frag_t
*last
= &skb_shinfo(skb
)->frags
[frag
- 1];
663 if (head
== skb_frag_page(last
) &&
664 start
== skb_frag_off(last
) + skb_frag_size(last
)) {
665 skb_frag_size_add(last
, size
);
666 /* We combined this page, we need to release
667 * a reference. Since compound pages refcount
668 * is shared among many pages, batch the refcount
669 * adjustments to limit false sharing.
677 page_ref_sub(last_head
, refs
);
680 skb_fill_page_desc(skb
, frag
++, head
, start
, size
);
683 page_ref_sub(last_head
, refs
);
687 EXPORT_SYMBOL(__zerocopy_sg_from_iter
);
690 * zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter
691 * @skb: buffer to copy
692 * @from: the source to copy from
694 * The function will first copy up to headlen, and then pin the userspace
695 * pages and build frags through them.
697 * Returns 0, -EFAULT or -EMSGSIZE.
699 int zerocopy_sg_from_iter(struct sk_buff
*skb
, struct iov_iter
*from
)
701 int copy
= min_t(int, skb_headlen(skb
), iov_iter_count(from
));
703 /* copy up to skb headlen */
704 if (skb_copy_datagram_from_iter(skb
, 0, from
, copy
))
707 return __zerocopy_sg_from_iter(NULL
, skb
, from
, ~0U);
709 EXPORT_SYMBOL(zerocopy_sg_from_iter
);
712 * skb_copy_and_csum_datagram - Copy datagram to an iovec iterator
713 * and update a checksum.
714 * @skb: buffer to copy
715 * @offset: offset in the buffer to start copying from
716 * @to: iovec iterator to copy to
717 * @len: amount of data to copy from buffer to iovec
718 * @csump: checksum pointer
720 static int skb_copy_and_csum_datagram(const struct sk_buff
*skb
, int offset
,
721 struct iov_iter
*to
, int len
,
724 struct csum_state csdata
= { .csum
= *csump
};
727 ret
= __skb_datagram_iter(skb
, offset
, to
, len
, true,
728 csum_and_copy_to_iter
, &csdata
);
732 *csump
= csdata
.csum
;
737 * skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
739 * @hlen: hardware length
742 * Caller _must_ check that skb will fit to this iovec.
744 * Returns: 0 - success.
745 * -EINVAL - checksum failure.
746 * -EFAULT - fault during copy.
748 int skb_copy_and_csum_datagram_msg(struct sk_buff
*skb
,
749 int hlen
, struct msghdr
*msg
)
752 int chunk
= skb
->len
- hlen
;
757 if (msg_data_left(msg
) < chunk
) {
758 if (__skb_checksum_complete(skb
))
760 if (skb_copy_datagram_msg(skb
, hlen
, msg
, chunk
))
763 csum
= csum_partial(skb
->data
, hlen
, skb
->csum
);
764 if (skb_copy_and_csum_datagram(skb
, hlen
, &msg
->msg_iter
,
768 if (csum_fold(csum
)) {
769 iov_iter_revert(&msg
->msg_iter
, chunk
);
773 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
774 !skb
->csum_complete_sw
)
775 netdev_rx_csum_fault(NULL
, skb
);
781 EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg
);
784 * datagram_poll - generic datagram poll
789 * Datagram poll: Again totally generic. This also handles
790 * sequenced packet sockets providing the socket receive queue
791 * is only ever holding data ready to receive.
793 * Note: when you *don't* use this routine for this protocol,
794 * and you use a different write policy from sock_writeable()
795 * then please supply your own write_space callback.
797 __poll_t
datagram_poll(struct file
*file
, struct socket
*sock
,
800 struct sock
*sk
= sock
->sk
;
803 sock_poll_wait(file
, sock
, wait
);
806 /* exceptional events? */
807 if (sk
->sk_err
|| !skb_queue_empty_lockless(&sk
->sk_error_queue
))
809 (sock_flag(sk
, SOCK_SELECT_ERR_QUEUE
) ? EPOLLPRI
: 0);
811 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
812 mask
|= EPOLLRDHUP
| EPOLLIN
| EPOLLRDNORM
;
813 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
817 if (!skb_queue_empty_lockless(&sk
->sk_receive_queue
))
818 mask
|= EPOLLIN
| EPOLLRDNORM
;
820 /* Connection-based need to check for termination and startup */
821 if (connection_based(sk
)) {
822 if (sk
->sk_state
== TCP_CLOSE
)
824 /* connection hasn't started yet? */
825 if (sk
->sk_state
== TCP_SYN_SENT
)
830 if (sock_writeable(sk
))
831 mask
|= EPOLLOUT
| EPOLLWRNORM
| EPOLLWRBAND
;
833 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
837 EXPORT_SYMBOL(datagram_poll
);