]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * SUCS NET3: | |
3 | * | |
4 | * Generic datagram handling routines. These are generic for all | |
5 | * protocols. Possibly a generic IP version on top of these would | |
6 | * make sense. Not tonight however 8-). | |
7 | * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and | |
8 | * NetROM layer all have identical poll code and mostly | |
9 | * identical recvmsg() code. So we share it here. The poll was | |
10 | * shared before but buried in udp.c so I moved it. | |
11 | * | |
113aa838 | 12 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old |
1da177e4 LT |
13 | * udp.c code) |
14 | * | |
15 | * Fixes: | |
16 | * Alan Cox : NULL return from skb_peek_copy() | |
17 | * understood | |
18 | * Alan Cox : Rewrote skb_read_datagram to avoid the | |
19 | * skb_peek_copy stuff. | |
20 | * Alan Cox : Added support for SOCK_SEQPACKET. | |
21 | * IPX can no longer use the SO_TYPE hack | |
22 | * but AX.25 now works right, and SPX is | |
23 | * feasible. | |
24 | * Alan Cox : Fixed write poll of non IP protocol | |
25 | * crash. | |
26 | * Florian La Roche: Changed for my new skbuff handling. | |
27 | * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET. | |
28 | * Linus Torvalds : BSD semantic fixes. | |
29 | * Alan Cox : Datagram iovec handling | |
30 | * Darryl Miles : Fixed non-blocking SOCK_STREAM. | |
31 | * Alan Cox : POSIXisms | |
32 | * Pete Wyckoff : Unconnected accept() fix. | |
33 | * | |
34 | */ | |
35 | ||
36 | #include <linux/module.h> | |
37 | #include <linux/types.h> | |
38 | #include <linux/kernel.h> | |
7c0f6ba6 | 39 | #include <linux/uaccess.h> |
1da177e4 LT |
40 | #include <linux/mm.h> |
41 | #include <linux/interrupt.h> | |
42 | #include <linux/errno.h> | |
43 | #include <linux/sched.h> | |
44 | #include <linux/inet.h> | |
1da177e4 LT |
45 | #include <linux/netdevice.h> |
46 | #include <linux/rtnetlink.h> | |
47 | #include <linux/poll.h> | |
48 | #include <linux/highmem.h> | |
3305b80c | 49 | #include <linux/spinlock.h> |
5a0e3ad6 | 50 | #include <linux/slab.h> |
0433547a | 51 | #include <linux/pagemap.h> |
a8f820aa | 52 | #include <linux/uio.h> |
1da177e4 LT |
53 | |
54 | #include <net/protocol.h> | |
55 | #include <linux/skbuff.h> | |
1da177e4 | 56 | |
c752f073 ACM |
57 | #include <net/checksum.h> |
58 | #include <net/sock.h> | |
59 | #include <net/tcp_states.h> | |
e9b3cc1b | 60 | #include <trace/events/skb.h> |
076bb0c8 | 61 | #include <net/busy_poll.h> |
1da177e4 LT |
62 | |
63 | /* | |
64 | * Is a socket 'connection oriented' ? | |
65 | */ | |
66 | static inline int connection_based(struct sock *sk) | |
67 | { | |
68 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; | |
69 | } | |
70 | ||
ac6424b9 | 71 | static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync, |
bf368e4e ED |
72 | void *key) |
73 | { | |
74 | unsigned long bits = (unsigned long)key; | |
75 | ||
76 | /* | |
77 | * Avoid a wakeup if event not interesting for us | |
78 | */ | |
79 | if (bits && !(bits & (POLLIN | POLLERR))) | |
80 | return 0; | |
81 | return autoremove_wake_function(wait, mode, sync, key); | |
82 | } | |
1da177e4 | 83 | /* |
39cc8613 | 84 | * Wait for the last received packet to be different from skb |
1da177e4 | 85 | */ |
ea3793ee RW |
86 | int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, |
87 | const struct sk_buff *skb) | |
1da177e4 LT |
88 | { |
89 | int error; | |
bf368e4e | 90 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); |
1da177e4 | 91 | |
aa395145 | 92 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1da177e4 LT |
93 | |
94 | /* Socket errors? */ | |
95 | error = sock_error(sk); | |
96 | if (error) | |
97 | goto out_err; | |
98 | ||
39cc8613 | 99 | if (sk->sk_receive_queue.prev != skb) |
1da177e4 LT |
100 | goto out; |
101 | ||
102 | /* Socket shut down? */ | |
103 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
104 | goto out_noerr; | |
105 | ||
106 | /* Sequenced packets can come disconnected. | |
107 | * If so we report the problem | |
108 | */ | |
109 | error = -ENOTCONN; | |
110 | if (connection_based(sk) && | |
111 | !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN)) | |
112 | goto out_err; | |
113 | ||
114 | /* handle signals */ | |
115 | if (signal_pending(current)) | |
116 | goto interrupted; | |
117 | ||
118 | error = 0; | |
119 | *timeo_p = schedule_timeout(*timeo_p); | |
120 | out: | |
aa395145 | 121 | finish_wait(sk_sleep(sk), &wait); |
1da177e4 LT |
122 | return error; |
123 | interrupted: | |
124 | error = sock_intr_errno(*timeo_p); | |
125 | out_err: | |
126 | *err = error; | |
127 | goto out; | |
128 | out_noerr: | |
129 | *err = 0; | |
130 | error = 1; | |
131 | goto out; | |
132 | } | |
ea3793ee | 133 | EXPORT_SYMBOL(__skb_wait_for_more_packets); |
1da177e4 | 134 | |
a0a2a660 | 135 | static struct sk_buff *skb_set_peeked(struct sk_buff *skb) |
738ac1eb HX |
136 | { |
137 | struct sk_buff *nskb; | |
138 | ||
139 | if (skb->peeked) | |
a0a2a660 | 140 | return skb; |
738ac1eb HX |
141 | |
142 | /* We have to unshare an skb before modifying it. */ | |
143 | if (!skb_shared(skb)) | |
144 | goto done; | |
145 | ||
146 | nskb = skb_clone(skb, GFP_ATOMIC); | |
147 | if (!nskb) | |
a0a2a660 | 148 | return ERR_PTR(-ENOMEM); |
738ac1eb HX |
149 | |
150 | skb->prev->next = nskb; | |
151 | skb->next->prev = nskb; | |
152 | nskb->prev = skb->prev; | |
153 | nskb->next = skb->next; | |
154 | ||
155 | consume_skb(skb); | |
156 | skb = nskb; | |
157 | ||
158 | done: | |
159 | skb->peeked = 1; | |
160 | ||
a0a2a660 | 161 | return skb; |
738ac1eb HX |
162 | } |
163 | ||
65101aec PA |
164 | struct sk_buff *__skb_try_recv_from_queue(struct sock *sk, |
165 | struct sk_buff_head *queue, | |
166 | unsigned int flags, | |
167 | void (*destructor)(struct sock *sk, | |
168 | struct sk_buff *skb), | |
169 | int *peeked, int *off, int *err, | |
170 | struct sk_buff **last) | |
171 | { | |
172 | struct sk_buff *skb; | |
de321ed3 | 173 | int _off = *off; |
65101aec PA |
174 | |
175 | *last = queue->prev; | |
176 | skb_queue_walk(queue, skb) { | |
177 | if (flags & MSG_PEEK) { | |
de321ed3 | 178 | if (_off >= skb->len && (skb->len || _off || |
65101aec | 179 | skb->peeked)) { |
de321ed3 | 180 | _off -= skb->len; |
65101aec PA |
181 | continue; |
182 | } | |
183 | if (!skb->len) { | |
184 | skb = skb_set_peeked(skb); | |
185 | if (unlikely(IS_ERR(skb))) { | |
186 | *err = PTR_ERR(skb); | |
de321ed3 | 187 | return NULL; |
65101aec PA |
188 | } |
189 | } | |
190 | *peeked = 1; | |
63354797 | 191 | refcount_inc(&skb->users); |
65101aec PA |
192 | } else { |
193 | __skb_unlink(skb, queue); | |
194 | if (destructor) | |
195 | destructor(sk, skb); | |
196 | } | |
de321ed3 | 197 | *off = _off; |
65101aec PA |
198 | return skb; |
199 | } | |
200 | return NULL; | |
201 | } | |
202 | ||
1da177e4 | 203 | /** |
ea3793ee | 204 | * __skb_try_recv_datagram - Receive a datagram skbuff |
4dc3b16b | 205 | * @sk: socket |
d3f6cd9e | 206 | * @flags: MSG\_ flags |
7c13f97f | 207 | * @destructor: invoked under the receive lock on successful dequeue |
39cc8613 | 208 | * @peeked: returns non-zero if this packet has been seen before |
3f518bf7 PE |
209 | * @off: an offset in bytes to peek skb from. Returns an offset |
210 | * within an skb where data actually starts | |
4dc3b16b | 211 | * @err: error code returned |
ea3793ee RW |
212 | * @last: set to last peeked message to inform the wait function |
213 | * what to look for when peeking | |
1da177e4 LT |
214 | * |
215 | * Get a datagram skbuff, understands the peeking, nonblocking wakeups | |
216 | * and possible races. This replaces identical code in packet, raw and | |
217 | * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes | |
218 | * the long standing peek and read race for datagram sockets. If you | |
219 | * alter this routine remember it must be re-entrant. | |
220 | * | |
ea3793ee RW |
221 | * This function will lock the socket if a skb is returned, so |
222 | * the caller needs to unlock the socket in that case (usually by | |
d651983d | 223 | * calling skb_free_datagram). Returns NULL with @err set to |
ea3793ee RW |
224 | * -EAGAIN if no data was available or to some other value if an |
225 | * error was detected. | |
1da177e4 LT |
226 | * |
227 | * * It does not lock socket since today. This function is | |
228 | * * free of race conditions. This measure should/can improve | |
229 | * * significantly datagram socket latencies at high loads, | |
230 | * * when data copying to user space takes lots of time. | |
231 | * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet | |
232 | * * 8) Great win.) | |
233 | * * --ANK (980729) | |
234 | * | |
235 | * The order of the tests when we find no data waiting are specified | |
236 | * quite explicitly by POSIX 1003.1g, don't change them without having | |
237 | * the standard around please. | |
238 | */ | |
ea3793ee | 239 | struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags, |
7c13f97f PA |
240 | void (*destructor)(struct sock *sk, |
241 | struct sk_buff *skb), | |
ea3793ee RW |
242 | int *peeked, int *off, int *err, |
243 | struct sk_buff **last) | |
1da177e4 | 244 | { |
738ac1eb | 245 | struct sk_buff_head *queue = &sk->sk_receive_queue; |
ea3793ee | 246 | struct sk_buff *skb; |
738ac1eb | 247 | unsigned long cpu_flags; |
1da177e4 LT |
248 | /* |
249 | * Caller is allowed not to check sk->sk_err before skb_recv_datagram() | |
250 | */ | |
251 | int error = sock_error(sk); | |
252 | ||
253 | if (error) | |
254 | goto no_packet; | |
255 | ||
a297569f | 256 | *peeked = 0; |
1da177e4 LT |
257 | do { |
258 | /* Again only user level code calls this function, so nothing | |
259 | * interrupt level will suddenly eat the receive_queue. | |
260 | * | |
261 | * Look at current nfs client by the way... | |
8917a3c0 | 262 | * However, this function was correct in any case. 8) |
1da177e4 | 263 | */ |
4934b032 | 264 | spin_lock_irqsave(&queue->lock, cpu_flags); |
65101aec | 265 | skb = __skb_try_recv_from_queue(sk, queue, flags, destructor, |
de321ed3 | 266 | peeked, off, &error, last); |
3f518bf7 | 267 | spin_unlock_irqrestore(&queue->lock, cpu_flags); |
de321ed3 AV |
268 | if (error) |
269 | goto no_packet; | |
65101aec PA |
270 | if (skb) |
271 | return skb; | |
2b5cd0df AD |
272 | |
273 | if (!sk_can_busy_loop(sk)) | |
274 | break; | |
275 | ||
276 | sk_busy_loop(sk, flags & MSG_DONTWAIT); | |
277 | } while (!skb_queue_empty(&sk->sk_receive_queue)); | |
1da177e4 | 278 | |
ea3793ee | 279 | error = -EAGAIN; |
a5b50476 | 280 | |
ea3793ee RW |
281 | no_packet: |
282 | *err = error; | |
283 | return NULL; | |
284 | } | |
285 | EXPORT_SYMBOL(__skb_try_recv_datagram); | |
1da177e4 | 286 | |
ea3793ee | 287 | struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, |
7c13f97f PA |
288 | void (*destructor)(struct sock *sk, |
289 | struct sk_buff *skb), | |
ea3793ee RW |
290 | int *peeked, int *off, int *err) |
291 | { | |
292 | struct sk_buff *skb, *last; | |
293 | long timeo; | |
1da177e4 | 294 | |
ea3793ee RW |
295 | timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
296 | ||
297 | do { | |
7c13f97f PA |
298 | skb = __skb_try_recv_datagram(sk, flags, destructor, peeked, |
299 | off, err, &last); | |
ea3793ee RW |
300 | if (skb) |
301 | return skb; | |
302 | ||
760a4322 | 303 | if (*err != -EAGAIN) |
ea3793ee RW |
304 | break; |
305 | } while (timeo && | |
306 | !__skb_wait_for_more_packets(sk, err, &timeo, last)); | |
1da177e4 | 307 | |
1da177e4 LT |
308 | return NULL; |
309 | } | |
a59322be HX |
310 | EXPORT_SYMBOL(__skb_recv_datagram); |
311 | ||
95c96174 | 312 | struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, |
a59322be HX |
313 | int noblock, int *err) |
314 | { | |
3f518bf7 | 315 | int peeked, off = 0; |
a59322be HX |
316 | |
317 | return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), | |
7c13f97f | 318 | NULL, &peeked, &off, err); |
a59322be | 319 | } |
9e34a5b5 | 320 | EXPORT_SYMBOL(skb_recv_datagram); |
1da177e4 LT |
321 | |
322 | void skb_free_datagram(struct sock *sk, struct sk_buff *skb) | |
323 | { | |
ead2ceb0 | 324 | consume_skb(skb); |
270acefa | 325 | sk_mem_reclaim_partial(sk); |
1da177e4 | 326 | } |
9d410c79 ED |
327 | EXPORT_SYMBOL(skb_free_datagram); |
328 | ||
627d2d6b | 329 | void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len) |
9d410c79 | 330 | { |
8a74ad60 ED |
331 | bool slow; |
332 | ||
3889a803 | 333 | if (!skb_unref(skb)) { |
627d2d6b | 334 | sk_peek_offset_bwd(sk, len); |
93bb64ea | 335 | return; |
627d2d6b | 336 | } |
93bb64ea | 337 | |
8a74ad60 | 338 | slow = lock_sock_fast(sk); |
627d2d6b | 339 | sk_peek_offset_bwd(sk, len); |
4b0b72f7 ED |
340 | skb_orphan(skb); |
341 | sk_mem_reclaim_partial(sk); | |
8a74ad60 | 342 | unlock_sock_fast(sk, slow); |
4b0b72f7 | 343 | |
93bb64ea ED |
344 | /* skb is now orphaned, can be freed outside of locked section */ |
345 | __kfree_skb(skb); | |
9d410c79 | 346 | } |
627d2d6b | 347 | EXPORT_SYMBOL(__skb_free_datagram_locked); |
1da177e4 | 348 | |
65101aec PA |
349 | int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue, |
350 | struct sk_buff *skb, unsigned int flags, | |
69629464 ED |
351 | void (*destructor)(struct sock *sk, |
352 | struct sk_buff *skb)) | |
f8c3bf00 PA |
353 | { |
354 | int err = 0; | |
355 | ||
356 | if (flags & MSG_PEEK) { | |
357 | err = -ENOENT; | |
65101aec PA |
358 | spin_lock_bh(&sk_queue->lock); |
359 | if (skb == skb_peek(sk_queue)) { | |
360 | __skb_unlink(skb, sk_queue); | |
63354797 | 361 | refcount_dec(&skb->users); |
69629464 ED |
362 | if (destructor) |
363 | destructor(sk, skb); | |
f8c3bf00 PA |
364 | err = 0; |
365 | } | |
65101aec | 366 | spin_unlock_bh(&sk_queue->lock); |
f8c3bf00 PA |
367 | } |
368 | ||
369 | atomic_inc(&sk->sk_drops); | |
370 | return err; | |
371 | } | |
372 | EXPORT_SYMBOL(__sk_queue_drop_skb); | |
373 | ||
3305b80c HX |
374 | /** |
375 | * skb_kill_datagram - Free a datagram skbuff forcibly | |
376 | * @sk: socket | |
377 | * @skb: datagram skbuff | |
d3f6cd9e | 378 | * @flags: MSG\_ flags |
3305b80c HX |
379 | * |
380 | * This function frees a datagram skbuff that was received by | |
381 | * skb_recv_datagram. The flags argument must match the one | |
382 | * used for skb_recv_datagram. | |
383 | * | |
384 | * If the MSG_PEEK flag is set, and the packet is still on the | |
385 | * receive queue of the socket, it will be taken off the queue | |
386 | * before it is freed. | |
387 | * | |
388 | * This function currently only disables BH when acquiring the | |
389 | * sk_receive_queue lock. Therefore it must not be used in a | |
390 | * context where that lock is acquired in an IRQ context. | |
27ab2568 HX |
391 | * |
392 | * It returns 0 if the packet was removed by us. | |
3305b80c HX |
393 | */ |
394 | ||
27ab2568 | 395 | int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) |
3305b80c | 396 | { |
65101aec PA |
397 | int err = __sk_queue_drop_skb(sk, &sk->sk_receive_queue, skb, flags, |
398 | NULL); | |
3305b80c | 399 | |
61de71c6 JD |
400 | kfree_skb(skb); |
401 | sk_mem_reclaim_partial(sk); | |
27ab2568 | 402 | return err; |
3305b80c | 403 | } |
3305b80c HX |
404 | EXPORT_SYMBOL(skb_kill_datagram); |
405 | ||
a8f820aa HX |
406 | /** |
407 | * skb_copy_datagram_iter - Copy a datagram to an iovec iterator. | |
408 | * @skb: buffer to copy | |
409 | * @offset: offset in the buffer to start copying from | |
410 | * @to: iovec iterator to copy to | |
411 | * @len: amount of data to copy from buffer to iovec | |
412 | */ | |
413 | int skb_copy_datagram_iter(const struct sk_buff *skb, int offset, | |
414 | struct iov_iter *to, int len) | |
415 | { | |
416 | int start = skb_headlen(skb); | |
32786821 | 417 | int i, copy = start - offset, start_off = offset, n; |
a8f820aa HX |
418 | struct sk_buff *frag_iter; |
419 | ||
420 | trace_skb_copy_datagram_iovec(skb, len); | |
421 | ||
422 | /* Copy header. */ | |
423 | if (copy > 0) { | |
424 | if (copy > len) | |
425 | copy = len; | |
32786821 AV |
426 | n = copy_to_iter(skb->data + offset, copy, to); |
427 | offset += n; | |
428 | if (n != copy) | |
a8f820aa HX |
429 | goto short_copy; |
430 | if ((len -= copy) == 0) | |
431 | return 0; | |
a8f820aa HX |
432 | } |
433 | ||
434 | /* Copy paged appendix. Hmm... why does this look so complicated? */ | |
435 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
436 | int end; | |
437 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
438 | ||
439 | WARN_ON(start > offset + len); | |
440 | ||
441 | end = start + skb_frag_size(frag); | |
442 | if ((copy = end - offset) > 0) { | |
443 | if (copy > len) | |
444 | copy = len; | |
32786821 | 445 | n = copy_page_to_iter(skb_frag_page(frag), |
a8f820aa | 446 | frag->page_offset + offset - |
32786821 AV |
447 | start, copy, to); |
448 | offset += n; | |
449 | if (n != copy) | |
a8f820aa HX |
450 | goto short_copy; |
451 | if (!(len -= copy)) | |
452 | return 0; | |
a8f820aa HX |
453 | } |
454 | start = end; | |
455 | } | |
456 | ||
457 | skb_walk_frags(skb, frag_iter) { | |
458 | int end; | |
459 | ||
460 | WARN_ON(start > offset + len); | |
461 | ||
462 | end = start + frag_iter->len; | |
463 | if ((copy = end - offset) > 0) { | |
464 | if (copy > len) | |
465 | copy = len; | |
466 | if (skb_copy_datagram_iter(frag_iter, offset - start, | |
467 | to, copy)) | |
468 | goto fault; | |
469 | if ((len -= copy) == 0) | |
470 | return 0; | |
471 | offset += copy; | |
472 | } | |
473 | start = end; | |
474 | } | |
475 | if (!len) | |
476 | return 0; | |
477 | ||
478 | /* This is not really a user copy fault, but rather someone | |
479 | * gave us a bogus length on the skb. We should probably | |
480 | * print a warning here as it may indicate a kernel bug. | |
481 | */ | |
482 | ||
483 | fault: | |
32786821 | 484 | iov_iter_revert(to, offset - start_off); |
a8f820aa HX |
485 | return -EFAULT; |
486 | ||
487 | short_copy: | |
488 | if (iov_iter_count(to)) | |
489 | goto fault; | |
490 | ||
491 | return 0; | |
492 | } | |
493 | EXPORT_SYMBOL(skb_copy_datagram_iter); | |
494 | ||
db543c1f | 495 | /** |
8feb2fb2 | 496 | * skb_copy_datagram_from_iter - Copy a datagram from an iov_iter. |
db543c1f RR |
497 | * @skb: buffer to copy |
498 | * @offset: offset in the buffer to start copying to | |
8feb2fb2 | 499 | * @from: the copy source |
db543c1f RR |
500 | * @len: amount of data to copy to buffer from iovec |
501 | * | |
502 | * Returns 0 or -EFAULT. | |
db543c1f | 503 | */ |
3a654f97 AV |
504 | int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, |
505 | struct iov_iter *from, | |
506 | int len) | |
507 | { | |
508 | int start = skb_headlen(skb); | |
509 | int i, copy = start - offset; | |
510 | struct sk_buff *frag_iter; | |
511 | ||
512 | /* Copy header. */ | |
513 | if (copy > 0) { | |
514 | if (copy > len) | |
515 | copy = len; | |
516 | if (copy_from_iter(skb->data + offset, copy, from) != copy) | |
517 | goto fault; | |
518 | if ((len -= copy) == 0) | |
519 | return 0; | |
520 | offset += copy; | |
521 | } | |
522 | ||
523 | /* Copy paged appendix. Hmm... why does this look so complicated? */ | |
524 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
525 | int end; | |
526 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
527 | ||
528 | WARN_ON(start > offset + len); | |
529 | ||
530 | end = start + skb_frag_size(frag); | |
531 | if ((copy = end - offset) > 0) { | |
532 | size_t copied; | |
533 | ||
534 | if (copy > len) | |
535 | copy = len; | |
536 | copied = copy_page_from_iter(skb_frag_page(frag), | |
537 | frag->page_offset + offset - start, | |
538 | copy, from); | |
539 | if (copied != copy) | |
540 | goto fault; | |
541 | ||
542 | if (!(len -= copy)) | |
543 | return 0; | |
544 | offset += copy; | |
545 | } | |
546 | start = end; | |
547 | } | |
548 | ||
549 | skb_walk_frags(skb, frag_iter) { | |
550 | int end; | |
551 | ||
552 | WARN_ON(start > offset + len); | |
553 | ||
554 | end = start + frag_iter->len; | |
555 | if ((copy = end - offset) > 0) { | |
556 | if (copy > len) | |
557 | copy = len; | |
558 | if (skb_copy_datagram_from_iter(frag_iter, | |
559 | offset - start, | |
560 | from, copy)) | |
561 | goto fault; | |
562 | if ((len -= copy) == 0) | |
563 | return 0; | |
564 | offset += copy; | |
565 | } | |
566 | start = end; | |
567 | } | |
568 | if (!len) | |
569 | return 0; | |
570 | ||
571 | fault: | |
572 | return -EFAULT; | |
573 | } | |
574 | EXPORT_SYMBOL(skb_copy_datagram_from_iter); | |
575 | ||
c3bdeb5c | 576 | /** |
195e952d | 577 | * zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter |
c3bdeb5c | 578 | * @skb: buffer to copy |
195e952d | 579 | * @from: the source to copy from |
c3bdeb5c JW |
580 | * |
581 | * The function will first copy up to headlen, and then pin the userspace | |
582 | * pages and build frags through them. | |
583 | * | |
584 | * Returns 0, -EFAULT or -EMSGSIZE. | |
c3bdeb5c | 585 | */ |
3a654f97 AV |
586 | int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from) |
587 | { | |
588 | int len = iov_iter_count(from); | |
589 | int copy = min_t(int, skb_headlen(skb), len); | |
590 | int frag = 0; | |
591 | ||
592 | /* copy up to skb headlen */ | |
593 | if (skb_copy_datagram_from_iter(skb, 0, from, copy)) | |
594 | return -EFAULT; | |
595 | ||
596 | while (iov_iter_count(from)) { | |
597 | struct page *pages[MAX_SKB_FRAGS]; | |
598 | size_t start; | |
599 | ssize_t copied; | |
600 | unsigned long truesize; | |
601 | int n = 0; | |
602 | ||
603 | if (frag == MAX_SKB_FRAGS) | |
604 | return -EMSGSIZE; | |
605 | ||
606 | copied = iov_iter_get_pages(from, pages, ~0U, | |
607 | MAX_SKB_FRAGS - frag, &start); | |
608 | if (copied < 0) | |
609 | return -EFAULT; | |
610 | ||
611 | iov_iter_advance(from, copied); | |
612 | ||
613 | truesize = PAGE_ALIGN(copied + start); | |
614 | skb->data_len += copied; | |
615 | skb->len += copied; | |
616 | skb->truesize += truesize; | |
14afee4b | 617 | refcount_add(truesize, &skb->sk->sk_wmem_alloc); |
3a654f97 AV |
618 | while (copied) { |
619 | int size = min_t(int, copied, PAGE_SIZE - start); | |
620 | skb_fill_page_desc(skb, frag++, pages[n], start, size); | |
621 | start = 0; | |
622 | copied -= size; | |
623 | n++; | |
624 | } | |
625 | } | |
626 | return 0; | |
627 | } | |
628 | EXPORT_SYMBOL(zerocopy_sg_from_iter); | |
629 | ||
1da177e4 | 630 | static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, |
e5a4b0bb | 631 | struct iov_iter *to, int len, |
5084205f | 632 | __wsum *csump) |
1da177e4 | 633 | { |
1a028e50 | 634 | int start = skb_headlen(skb); |
32786821 | 635 | int i, copy = start - offset, start_off = offset; |
5b1a002a DM |
636 | struct sk_buff *frag_iter; |
637 | int pos = 0; | |
e5a4b0bb | 638 | int n; |
1da177e4 LT |
639 | |
640 | /* Copy header. */ | |
641 | if (copy > 0) { | |
1da177e4 LT |
642 | if (copy > len) |
643 | copy = len; | |
e5a4b0bb | 644 | n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to); |
32786821 | 645 | offset += n; |
e5a4b0bb | 646 | if (n != copy) |
1da177e4 LT |
647 | goto fault; |
648 | if ((len -= copy) == 0) | |
649 | return 0; | |
1da177e4 LT |
650 | pos = copy; |
651 | } | |
652 | ||
653 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1a028e50 | 654 | int end; |
9e903e08 | 655 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1da177e4 | 656 | |
547b792c | 657 | WARN_ON(start > offset + len); |
1a028e50 | 658 | |
9e903e08 | 659 | end = start + skb_frag_size(frag); |
1da177e4 | 660 | if ((copy = end - offset) > 0) { |
e5a4b0bb | 661 | __wsum csum2 = 0; |
ea2ab693 | 662 | struct page *page = skb_frag_page(frag); |
e5a4b0bb | 663 | u8 *vaddr = kmap(page); |
1da177e4 LT |
664 | |
665 | if (copy > len) | |
666 | copy = len; | |
e5a4b0bb AV |
667 | n = csum_and_copy_to_iter(vaddr + frag->page_offset + |
668 | offset - start, copy, | |
669 | &csum2, to); | |
1da177e4 | 670 | kunmap(page); |
32786821 | 671 | offset += n; |
e5a4b0bb | 672 | if (n != copy) |
1da177e4 LT |
673 | goto fault; |
674 | *csump = csum_block_add(*csump, csum2, pos); | |
675 | if (!(len -= copy)) | |
676 | return 0; | |
1da177e4 LT |
677 | pos += copy; |
678 | } | |
1a028e50 | 679 | start = end; |
1da177e4 LT |
680 | } |
681 | ||
5b1a002a DM |
682 | skb_walk_frags(skb, frag_iter) { |
683 | int end; | |
684 | ||
685 | WARN_ON(start > offset + len); | |
686 | ||
687 | end = start + frag_iter->len; | |
688 | if ((copy = end - offset) > 0) { | |
689 | __wsum csum2 = 0; | |
690 | if (copy > len) | |
691 | copy = len; | |
692 | if (skb_copy_and_csum_datagram(frag_iter, | |
693 | offset - start, | |
694 | to, copy, | |
695 | &csum2)) | |
696 | goto fault; | |
697 | *csump = csum_block_add(*csump, csum2, pos); | |
698 | if ((len -= copy) == 0) | |
699 | return 0; | |
700 | offset += copy; | |
5b1a002a | 701 | pos += copy; |
1da177e4 | 702 | } |
5b1a002a | 703 | start = end; |
1da177e4 LT |
704 | } |
705 | if (!len) | |
706 | return 0; | |
707 | ||
708 | fault: | |
32786821 | 709 | iov_iter_revert(to, offset - start_off); |
1da177e4 LT |
710 | return -EFAULT; |
711 | } | |
712 | ||
759e5d00 | 713 | __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) |
fb286bb2 | 714 | { |
d3bc23e7 | 715 | __sum16 sum; |
fb286bb2 | 716 | |
759e5d00 | 717 | sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); |
46fb51eb TH |
718 | if (likely(!sum)) { |
719 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && | |
720 | !skb->csum_complete_sw) | |
721 | netdev_rx_csum_fault(skb->dev); | |
722 | } | |
89c22d8c HX |
723 | if (!skb_shared(skb)) |
724 | skb->csum_valid = !sum; | |
fb286bb2 HX |
725 | return sum; |
726 | } | |
759e5d00 HX |
727 | EXPORT_SYMBOL(__skb_checksum_complete_head); |
728 | ||
729 | __sum16 __skb_checksum_complete(struct sk_buff *skb) | |
730 | { | |
46fb51eb TH |
731 | __wsum csum; |
732 | __sum16 sum; | |
733 | ||
734 | csum = skb_checksum(skb, 0, skb->len, 0); | |
735 | ||
736 | /* skb->csum holds pseudo checksum */ | |
737 | sum = csum_fold(csum_add(skb->csum, csum)); | |
738 | if (likely(!sum)) { | |
739 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && | |
740 | !skb->csum_complete_sw) | |
741 | netdev_rx_csum_fault(skb->dev); | |
742 | } | |
743 | ||
89c22d8c HX |
744 | if (!skb_shared(skb)) { |
745 | /* Save full packet checksum */ | |
746 | skb->csum = csum; | |
747 | skb->ip_summed = CHECKSUM_COMPLETE; | |
748 | skb->csum_complete_sw = 1; | |
749 | skb->csum_valid = !sum; | |
750 | } | |
46fb51eb TH |
751 | |
752 | return sum; | |
759e5d00 | 753 | } |
fb286bb2 HX |
754 | EXPORT_SYMBOL(__skb_checksum_complete); |
755 | ||
1da177e4 | 756 | /** |
e5a4b0bb | 757 | * skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec. |
4dc3b16b PP |
758 | * @skb: skbuff |
759 | * @hlen: hardware length | |
e5a4b0bb | 760 | * @msg: destination |
4ec93edb | 761 | * |
1da177e4 LT |
762 | * Caller _must_ check that skb will fit to this iovec. |
763 | * | |
764 | * Returns: 0 - success. | |
765 | * -EINVAL - checksum failure. | |
e5a4b0bb | 766 | * -EFAULT - fault during copy. |
1da177e4 | 767 | */ |
e5a4b0bb AV |
768 | int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, |
769 | int hlen, struct msghdr *msg) | |
1da177e4 | 770 | { |
d3bc23e7 | 771 | __wsum csum; |
1da177e4 LT |
772 | int chunk = skb->len - hlen; |
773 | ||
ef8aef55 HX |
774 | if (!chunk) |
775 | return 0; | |
776 | ||
01e97e65 | 777 | if (msg_data_left(msg) < chunk) { |
fb286bb2 | 778 | if (__skb_checksum_complete(skb)) |
a6a59932 | 779 | return -EINVAL; |
e5a4b0bb | 780 | if (skb_copy_datagram_msg(skb, hlen, msg, chunk)) |
1da177e4 LT |
781 | goto fault; |
782 | } else { | |
783 | csum = csum_partial(skb->data, hlen, skb->csum); | |
e5a4b0bb | 784 | if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter, |
1da177e4 LT |
785 | chunk, &csum)) |
786 | goto fault; | |
a6a59932 DT |
787 | |
788 | if (csum_fold(csum)) { | |
789 | iov_iter_revert(&msg->msg_iter, chunk); | |
790 | return -EINVAL; | |
791 | } | |
792 | ||
84fa7933 | 793 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
fb286bb2 | 794 | netdev_rx_csum_fault(skb->dev); |
1da177e4 LT |
795 | } |
796 | return 0; | |
1da177e4 LT |
797 | fault: |
798 | return -EFAULT; | |
799 | } | |
e5a4b0bb | 800 | EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg); |
1da177e4 LT |
801 | |
802 | /** | |
803 | * datagram_poll - generic datagram poll | |
4dc3b16b PP |
804 | * @file: file struct |
805 | * @sock: socket | |
806 | * @wait: poll table | |
1da177e4 LT |
807 | * |
808 | * Datagram poll: Again totally generic. This also handles | |
809 | * sequenced packet sockets providing the socket receive queue | |
810 | * is only ever holding data ready to receive. | |
811 | * | |
d3f6cd9e | 812 | * Note: when you *don't* use this routine for this protocol, |
1da177e4 LT |
813 | * and you use a different write policy from sock_writeable() |
814 | * then please supply your own write_space callback. | |
815 | */ | |
816 | unsigned int datagram_poll(struct file *file, struct socket *sock, | |
817 | poll_table *wait) | |
818 | { | |
819 | struct sock *sk = sock->sk; | |
820 | unsigned int mask; | |
821 | ||
aa395145 | 822 | sock_poll_wait(file, sk_sleep(sk), wait); |
1da177e4 LT |
823 | mask = 0; |
824 | ||
825 | /* exceptional events? */ | |
826 | if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) | |
7d4c04fc | 827 | mask |= POLLERR | |
8facd5fb | 828 | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); |
7d4c04fc | 829 | |
f348d70a | 830 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
db40980f | 831 | mask |= POLLRDHUP | POLLIN | POLLRDNORM; |
1da177e4 LT |
832 | if (sk->sk_shutdown == SHUTDOWN_MASK) |
833 | mask |= POLLHUP; | |
834 | ||
835 | /* readable? */ | |
db40980f | 836 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
1da177e4 LT |
837 | mask |= POLLIN | POLLRDNORM; |
838 | ||
839 | /* Connection-based need to check for termination and startup */ | |
840 | if (connection_based(sk)) { | |
841 | if (sk->sk_state == TCP_CLOSE) | |
842 | mask |= POLLHUP; | |
843 | /* connection hasn't started yet? */ | |
844 | if (sk->sk_state == TCP_SYN_SENT) | |
845 | return mask; | |
846 | } | |
847 | ||
848 | /* writable? */ | |
849 | if (sock_writeable(sk)) | |
850 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | |
851 | else | |
9cd3e072 | 852 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
1da177e4 LT |
853 | |
854 | return mask; | |
855 | } | |
1da177e4 | 856 | EXPORT_SYMBOL(datagram_poll); |