4 * Generic datagram handling routines. These are generic for all
5 * protocols. Possibly a generic IP version on top of these would
6 * make sense. Not tonight however 8-).
7 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8 * NetROM layer all have identical poll code and mostly
9 * identical recvmsg() code. So we share it here. The poll was
10 * shared before but buried in udp.c so I moved it.
12 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
16 * Alan Cox : NULL return from skb_peek_copy()
18 * Alan Cox : Rewrote skb_read_datagram to avoid the
19 * skb_peek_copy stuff.
20 * Alan Cox : Added support for SOCK_SEQPACKET.
21 * IPX can no longer use the SO_TYPE hack
22 * but AX.25 now works right, and SPX is
24 * Alan Cox : Fixed write poll of non IP protocol
26 * Florian La Roche: Changed for my new skbuff handling.
27 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
28 * Linus Torvalds : BSD semantic fixes.
29 * Alan Cox : Datagram iovec handling
30 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
31 * Alan Cox : POSIXisms
32 * Pete Wyckoff : Unconnected accept() fix.
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <linux/uaccess.h>
41 #include <linux/interrupt.h>
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/poll.h>
48 #include <linux/highmem.h>
49 #include <linux/spinlock.h>
50 #include <linux/slab.h>
51 #include <linux/pagemap.h>
52 #include <linux/uio.h>
54 #include <net/protocol.h>
55 #include <linux/skbuff.h>
57 #include <net/checksum.h>
59 #include <net/tcp_states.h>
60 #include <trace/events/skb.h>
61 #include <net/busy_poll.h>
64 * Is a socket 'connection oriented' ?
66 static inline int connection_based(struct sock
*sk
)
68 return sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
;
71 static int receiver_wake_function(wait_queue_t
*wait
, unsigned int mode
, int sync
,
74 unsigned long bits
= (unsigned long)key
;
77 * Avoid a wakeup if event not interesting for us
79 if (bits
&& !(bits
& (POLLIN
| POLLERR
)))
81 return autoremove_wake_function(wait
, mode
, sync
, key
);
84 * Wait for the last received packet to be different from skb
86 int __skb_wait_for_more_packets(struct sock
*sk
, int *err
, long *timeo_p
,
87 const struct sk_buff
*skb
)
90 DEFINE_WAIT_FUNC(wait
, receiver_wake_function
);
92 prepare_to_wait_exclusive(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
95 error
= sock_error(sk
);
99 if (sk
->sk_receive_queue
.prev
!= skb
)
102 /* Socket shut down? */
103 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
106 /* Sequenced packets can come disconnected.
107 * If so we report the problem
110 if (connection_based(sk
) &&
111 !(sk
->sk_state
== TCP_ESTABLISHED
|| sk
->sk_state
== TCP_LISTEN
))
115 if (signal_pending(current
))
119 *timeo_p
= schedule_timeout(*timeo_p
);
121 finish_wait(sk_sleep(sk
), &wait
);
124 error
= sock_intr_errno(*timeo_p
);
133 EXPORT_SYMBOL(__skb_wait_for_more_packets
);
135 static struct sk_buff
*skb_set_peeked(struct sk_buff
*skb
)
137 struct sk_buff
*nskb
;
142 /* We have to unshare an skb before modifying it. */
143 if (!skb_shared(skb
))
146 nskb
= skb_clone(skb
, GFP_ATOMIC
);
148 return ERR_PTR(-ENOMEM
);
150 skb
->prev
->next
= nskb
;
151 skb
->next
->prev
= nskb
;
152 nskb
->prev
= skb
->prev
;
153 nskb
->next
= skb
->next
;
165 * __skb_try_recv_datagram - Receive a datagram skbuff
168 * @destructor: invoked under the receive lock on successful dequeue
169 * @peeked: returns non-zero if this packet has been seen before
170 * @off: an offset in bytes to peek skb from. Returns an offset
171 * within an skb where data actually starts
172 * @err: error code returned
173 * @last: set to last peeked message to inform the wait function
174 * what to look for when peeking
176 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
177 * and possible races. This replaces identical code in packet, raw and
178 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
179 * the long standing peek and read race for datagram sockets. If you
180 * alter this routine remember it must be re-entrant.
182 * This function will lock the socket if a skb is returned, so
183 * the caller needs to unlock the socket in that case (usually by
184 * calling skb_free_datagram). Returns NULL with *err set to
185 * -EAGAIN if no data was available or to some other value if an
186 * error was detected.
188 * * It does not lock socket since today. This function is
189 * * free of race conditions. This measure should/can improve
190 * * significantly datagram socket latencies at high loads,
191 * * when data copying to user space takes lots of time.
192 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
196 * The order of the tests when we find no data waiting are specified
197 * quite explicitly by POSIX 1003.1g, don't change them without having
198 * the standard around please.
200 struct sk_buff
*__skb_try_recv_datagram(struct sock
*sk
, unsigned int flags
,
201 void (*destructor
)(struct sock
*sk
,
202 struct sk_buff
*skb
),
203 int *peeked
, int *off
, int *err
,
204 struct sk_buff
**last
)
206 struct sk_buff_head
*queue
= &sk
->sk_receive_queue
;
208 unsigned long cpu_flags
;
210 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
212 int error
= sock_error(sk
);
219 /* Again only user level code calls this function, so nothing
220 * interrupt level will suddenly eat the receive_queue.
222 * Look at current nfs client by the way...
223 * However, this function was correct in any case. 8)
227 *last
= (struct sk_buff
*)queue
;
228 spin_lock_irqsave(&queue
->lock
, cpu_flags
);
229 skb_queue_walk(queue
, skb
) {
231 if (flags
& MSG_PEEK
) {
232 if (_off
>= skb
->len
&& (skb
->len
|| _off
||
238 skb
= skb_set_peeked(skb
);
240 error
= PTR_ERR(skb
);
241 spin_unlock_irqrestore(&queue
->lock
,
247 atomic_inc(&skb
->users
);
249 __skb_unlink(skb
, queue
);
253 spin_unlock_irqrestore(&queue
->lock
, cpu_flags
);
258 spin_unlock_irqrestore(&queue
->lock
, cpu_flags
);
260 if (!sk_can_busy_loop(sk
))
263 sk_busy_loop(sk
, flags
& MSG_DONTWAIT
);
264 } while (!skb_queue_empty(&sk
->sk_receive_queue
));
272 EXPORT_SYMBOL(__skb_try_recv_datagram
);
274 struct sk_buff
*__skb_recv_datagram(struct sock
*sk
, unsigned int flags
,
275 void (*destructor
)(struct sock
*sk
,
276 struct sk_buff
*skb
),
277 int *peeked
, int *off
, int *err
)
279 struct sk_buff
*skb
, *last
;
282 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
285 skb
= __skb_try_recv_datagram(sk
, flags
, destructor
, peeked
,
293 !__skb_wait_for_more_packets(sk
, err
, &timeo
, last
));
297 EXPORT_SYMBOL(__skb_recv_datagram
);
299 struct sk_buff
*skb_recv_datagram(struct sock
*sk
, unsigned int flags
,
300 int noblock
, int *err
)
304 return __skb_recv_datagram(sk
, flags
| (noblock
? MSG_DONTWAIT
: 0),
305 NULL
, &peeked
, &off
, err
);
307 EXPORT_SYMBOL(skb_recv_datagram
);
309 void skb_free_datagram(struct sock
*sk
, struct sk_buff
*skb
)
312 sk_mem_reclaim_partial(sk
);
314 EXPORT_SYMBOL(skb_free_datagram
);
316 void __skb_free_datagram_locked(struct sock
*sk
, struct sk_buff
*skb
, int len
)
320 if (likely(atomic_read(&skb
->users
) == 1))
322 else if (likely(!atomic_dec_and_test(&skb
->users
))) {
323 sk_peek_offset_bwd(sk
, len
);
327 slow
= lock_sock_fast(sk
);
328 sk_peek_offset_bwd(sk
, len
);
330 sk_mem_reclaim_partial(sk
);
331 unlock_sock_fast(sk
, slow
);
333 /* skb is now orphaned, can be freed outside of locked section */
336 EXPORT_SYMBOL(__skb_free_datagram_locked
);
338 int __sk_queue_drop_skb(struct sock
*sk
, struct sk_buff
*skb
,
340 void (*destructor
)(struct sock
*sk
,
341 struct sk_buff
*skb
))
345 if (flags
& MSG_PEEK
) {
347 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
348 if (skb
== skb_peek(&sk
->sk_receive_queue
)) {
349 __skb_unlink(skb
, &sk
->sk_receive_queue
);
350 atomic_dec(&skb
->users
);
355 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
358 atomic_inc(&sk
->sk_drops
);
361 EXPORT_SYMBOL(__sk_queue_drop_skb
);
364 * skb_kill_datagram - Free a datagram skbuff forcibly
366 * @skb: datagram skbuff
369 * This function frees a datagram skbuff that was received by
370 * skb_recv_datagram. The flags argument must match the one
371 * used for skb_recv_datagram.
373 * If the MSG_PEEK flag is set, and the packet is still on the
374 * receive queue of the socket, it will be taken off the queue
375 * before it is freed.
377 * This function currently only disables BH when acquiring the
378 * sk_receive_queue lock. Therefore it must not be used in a
379 * context where that lock is acquired in an IRQ context.
381 * It returns 0 if the packet was removed by us.
384 int skb_kill_datagram(struct sock
*sk
, struct sk_buff
*skb
, unsigned int flags
)
386 int err
= __sk_queue_drop_skb(sk
, skb
, flags
, NULL
);
389 sk_mem_reclaim_partial(sk
);
392 EXPORT_SYMBOL(skb_kill_datagram
);
395 * skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
396 * @skb: buffer to copy
397 * @offset: offset in the buffer to start copying from
398 * @to: iovec iterator to copy to
399 * @len: amount of data to copy from buffer to iovec
401 int skb_copy_datagram_iter(const struct sk_buff
*skb
, int offset
,
402 struct iov_iter
*to
, int len
)
404 int start
= skb_headlen(skb
);
405 int i
, copy
= start
- offset
, start_off
= offset
, n
;
406 struct sk_buff
*frag_iter
;
408 trace_skb_copy_datagram_iovec(skb
, len
);
414 n
= copy_to_iter(skb
->data
+ offset
, copy
, to
);
418 if ((len
-= copy
) == 0)
422 /* Copy paged appendix. Hmm... why does this look so complicated? */
423 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
425 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
427 WARN_ON(start
> offset
+ len
);
429 end
= start
+ skb_frag_size(frag
);
430 if ((copy
= end
- offset
) > 0) {
433 n
= copy_page_to_iter(skb_frag_page(frag
),
434 frag
->page_offset
+ offset
-
445 skb_walk_frags(skb
, frag_iter
) {
448 WARN_ON(start
> offset
+ len
);
450 end
= start
+ frag_iter
->len
;
451 if ((copy
= end
- offset
) > 0) {
454 if (skb_copy_datagram_iter(frag_iter
, offset
- start
,
457 if ((len
-= copy
) == 0)
466 /* This is not really a user copy fault, but rather someone
467 * gave us a bogus length on the skb. We should probably
468 * print a warning here as it may indicate a kernel bug.
472 iov_iter_revert(to
, offset
- start_off
);
476 if (iov_iter_count(to
))
481 EXPORT_SYMBOL(skb_copy_datagram_iter
);
484 * skb_copy_datagram_from_iter - Copy a datagram from an iov_iter.
485 * @skb: buffer to copy
486 * @offset: offset in the buffer to start copying to
487 * @from: the copy source
488 * @len: amount of data to copy to buffer from iovec
490 * Returns 0 or -EFAULT.
492 int skb_copy_datagram_from_iter(struct sk_buff
*skb
, int offset
,
493 struct iov_iter
*from
,
496 int start
= skb_headlen(skb
);
497 int i
, copy
= start
- offset
;
498 struct sk_buff
*frag_iter
;
504 if (copy_from_iter(skb
->data
+ offset
, copy
, from
) != copy
)
506 if ((len
-= copy
) == 0)
511 /* Copy paged appendix. Hmm... why does this look so complicated? */
512 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
514 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
516 WARN_ON(start
> offset
+ len
);
518 end
= start
+ skb_frag_size(frag
);
519 if ((copy
= end
- offset
) > 0) {
524 copied
= copy_page_from_iter(skb_frag_page(frag
),
525 frag
->page_offset
+ offset
- start
,
537 skb_walk_frags(skb
, frag_iter
) {
540 WARN_ON(start
> offset
+ len
);
542 end
= start
+ frag_iter
->len
;
543 if ((copy
= end
- offset
) > 0) {
546 if (skb_copy_datagram_from_iter(frag_iter
,
550 if ((len
-= copy
) == 0)
562 EXPORT_SYMBOL(skb_copy_datagram_from_iter
);
565 * zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter
566 * @skb: buffer to copy
567 * @from: the source to copy from
569 * The function will first copy up to headlen, and then pin the userspace
570 * pages and build frags through them.
572 * Returns 0, -EFAULT or -EMSGSIZE.
574 int zerocopy_sg_from_iter(struct sk_buff
*skb
, struct iov_iter
*from
)
576 int len
= iov_iter_count(from
);
577 int copy
= min_t(int, skb_headlen(skb
), len
);
580 /* copy up to skb headlen */
581 if (skb_copy_datagram_from_iter(skb
, 0, from
, copy
))
584 while (iov_iter_count(from
)) {
585 struct page
*pages
[MAX_SKB_FRAGS
];
588 unsigned long truesize
;
591 if (frag
== MAX_SKB_FRAGS
)
594 copied
= iov_iter_get_pages(from
, pages
, ~0U,
595 MAX_SKB_FRAGS
- frag
, &start
);
599 iov_iter_advance(from
, copied
);
601 truesize
= PAGE_ALIGN(copied
+ start
);
602 skb
->data_len
+= copied
;
604 skb
->truesize
+= truesize
;
605 atomic_add(truesize
, &skb
->sk
->sk_wmem_alloc
);
607 int size
= min_t(int, copied
, PAGE_SIZE
- start
);
608 skb_fill_page_desc(skb
, frag
++, pages
[n
], start
, size
);
616 EXPORT_SYMBOL(zerocopy_sg_from_iter
);
618 static int skb_copy_and_csum_datagram(const struct sk_buff
*skb
, int offset
,
619 struct iov_iter
*to
, int len
,
622 int start
= skb_headlen(skb
);
623 int i
, copy
= start
- offset
, start_off
= offset
;
624 struct sk_buff
*frag_iter
;
632 n
= csum_and_copy_to_iter(skb
->data
+ offset
, copy
, csump
, to
);
636 if ((len
-= copy
) == 0)
641 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
643 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
645 WARN_ON(start
> offset
+ len
);
647 end
= start
+ skb_frag_size(frag
);
648 if ((copy
= end
- offset
) > 0) {
650 struct page
*page
= skb_frag_page(frag
);
651 u8
*vaddr
= kmap(page
);
655 n
= csum_and_copy_to_iter(vaddr
+ frag
->page_offset
+
656 offset
- start
, copy
,
662 *csump
= csum_block_add(*csump
, csum2
, pos
);
670 skb_walk_frags(skb
, frag_iter
) {
673 WARN_ON(start
> offset
+ len
);
675 end
= start
+ frag_iter
->len
;
676 if ((copy
= end
- offset
) > 0) {
680 if (skb_copy_and_csum_datagram(frag_iter
,
685 *csump
= csum_block_add(*csump
, csum2
, pos
);
686 if ((len
-= copy
) == 0)
697 iov_iter_revert(to
, offset
- start_off
);
701 __sum16
__skb_checksum_complete_head(struct sk_buff
*skb
, int len
)
705 sum
= csum_fold(skb_checksum(skb
, 0, len
, skb
->csum
));
707 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
708 !skb
->csum_complete_sw
)
709 netdev_rx_csum_fault(skb
->dev
);
711 if (!skb_shared(skb
))
712 skb
->csum_valid
= !sum
;
715 EXPORT_SYMBOL(__skb_checksum_complete_head
);
717 __sum16
__skb_checksum_complete(struct sk_buff
*skb
)
722 csum
= skb_checksum(skb
, 0, skb
->len
, 0);
724 /* skb->csum holds pseudo checksum */
725 sum
= csum_fold(csum_add(skb
->csum
, csum
));
727 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
728 !skb
->csum_complete_sw
)
729 netdev_rx_csum_fault(skb
->dev
);
732 if (!skb_shared(skb
)) {
733 /* Save full packet checksum */
735 skb
->ip_summed
= CHECKSUM_COMPLETE
;
736 skb
->csum_complete_sw
= 1;
737 skb
->csum_valid
= !sum
;
742 EXPORT_SYMBOL(__skb_checksum_complete
);
745 * skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
747 * @hlen: hardware length
750 * Caller _must_ check that skb will fit to this iovec.
752 * Returns: 0 - success.
753 * -EINVAL - checksum failure.
754 * -EFAULT - fault during copy.
756 int skb_copy_and_csum_datagram_msg(struct sk_buff
*skb
,
757 int hlen
, struct msghdr
*msg
)
760 int chunk
= skb
->len
- hlen
;
765 if (msg_data_left(msg
) < chunk
) {
766 if (__skb_checksum_complete(skb
))
768 if (skb_copy_datagram_msg(skb
, hlen
, msg
, chunk
))
771 csum
= csum_partial(skb
->data
, hlen
, skb
->csum
);
772 if (skb_copy_and_csum_datagram(skb
, hlen
, &msg
->msg_iter
,
777 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
))
778 netdev_rx_csum_fault(skb
->dev
);
782 iov_iter_revert(&msg
->msg_iter
, chunk
);
787 EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg
);
790 * datagram_poll - generic datagram poll
795 * Datagram poll: Again totally generic. This also handles
796 * sequenced packet sockets providing the socket receive queue
797 * is only ever holding data ready to receive.
799 * Note: when you _don't_ use this routine for this protocol,
800 * and you use a different write policy from sock_writeable()
801 * then please supply your own write_space callback.
803 unsigned int datagram_poll(struct file
*file
, struct socket
*sock
,
806 struct sock
*sk
= sock
->sk
;
809 sock_poll_wait(file
, sk_sleep(sk
), wait
);
812 /* exceptional events? */
813 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
815 (sock_flag(sk
, SOCK_SELECT_ERR_QUEUE
) ? POLLPRI
: 0);
817 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
818 mask
|= POLLRDHUP
| POLLIN
| POLLRDNORM
;
819 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
823 if (!skb_queue_empty(&sk
->sk_receive_queue
))
824 mask
|= POLLIN
| POLLRDNORM
;
826 /* Connection-based need to check for termination and startup */
827 if (connection_based(sk
)) {
828 if (sk
->sk_state
== TCP_CLOSE
)
830 /* connection hasn't started yet? */
831 if (sk
->sk_state
== TCP_SYN_SENT
)
836 if (sock_writeable(sk
))
837 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
839 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
843 EXPORT_SYMBOL(datagram_poll
);