]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * SUCS NET3: | |
3 | * | |
4 | * Generic datagram handling routines. These are generic for all | |
5 | * protocols. Possibly a generic IP version on top of these would | |
6 | * make sense. Not tonight however 8-). | |
7 | * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and | |
8 | * NetROM layer all have identical poll code and mostly | |
9 | * identical recvmsg() code. So we share it here. The poll was | |
10 | * shared before but buried in udp.c so I moved it. | |
11 | * | |
113aa838 | 12 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old |
1da177e4 LT |
13 | * udp.c code) |
14 | * | |
15 | * Fixes: | |
16 | * Alan Cox : NULL return from skb_peek_copy() | |
17 | * understood | |
18 | * Alan Cox : Rewrote skb_read_datagram to avoid the | |
19 | * skb_peek_copy stuff. | |
20 | * Alan Cox : Added support for SOCK_SEQPACKET. | |
21 | * IPX can no longer use the SO_TYPE hack | |
22 | * but AX.25 now works right, and SPX is | |
23 | * feasible. | |
24 | * Alan Cox : Fixed write poll of non IP protocol | |
25 | * crash. | |
26 | * Florian La Roche: Changed for my new skbuff handling. | |
27 | * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET. | |
28 | * Linus Torvalds : BSD semantic fixes. | |
29 | * Alan Cox : Datagram iovec handling | |
30 | * Darryl Miles : Fixed non-blocking SOCK_STREAM. | |
31 | * Alan Cox : POSIXisms | |
32 | * Pete Wyckoff : Unconnected accept() fix. | |
33 | * | |
34 | */ | |
35 | ||
36 | #include <linux/module.h> | |
37 | #include <linux/types.h> | |
38 | #include <linux/kernel.h> | |
7c0f6ba6 | 39 | #include <linux/uaccess.h> |
1da177e4 LT |
40 | #include <linux/mm.h> |
41 | #include <linux/interrupt.h> | |
42 | #include <linux/errno.h> | |
43 | #include <linux/sched.h> | |
44 | #include <linux/inet.h> | |
1da177e4 LT |
45 | #include <linux/netdevice.h> |
46 | #include <linux/rtnetlink.h> | |
47 | #include <linux/poll.h> | |
48 | #include <linux/highmem.h> | |
3305b80c | 49 | #include <linux/spinlock.h> |
5a0e3ad6 | 50 | #include <linux/slab.h> |
0433547a | 51 | #include <linux/pagemap.h> |
a8f820aa | 52 | #include <linux/uio.h> |
1da177e4 LT |
53 | |
54 | #include <net/protocol.h> | |
55 | #include <linux/skbuff.h> | |
1da177e4 | 56 | |
c752f073 ACM |
57 | #include <net/checksum.h> |
58 | #include <net/sock.h> | |
59 | #include <net/tcp_states.h> | |
e9b3cc1b | 60 | #include <trace/events/skb.h> |
076bb0c8 | 61 | #include <net/busy_poll.h> |
1da177e4 LT |
62 | |
63 | /* | |
64 | * Is a socket 'connection oriented' ? | |
65 | */ | |
66 | static inline int connection_based(struct sock *sk) | |
67 | { | |
68 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; | |
69 | } | |
70 | ||
95c96174 | 71 | static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync, |
bf368e4e ED |
72 | void *key) |
73 | { | |
74 | unsigned long bits = (unsigned long)key; | |
75 | ||
76 | /* | |
77 | * Avoid a wakeup if event not interesting for us | |
78 | */ | |
79 | if (bits && !(bits & (POLLIN | POLLERR))) | |
80 | return 0; | |
81 | return autoremove_wake_function(wait, mode, sync, key); | |
82 | } | |
1da177e4 | 83 | /* |
39cc8613 | 84 | * Wait for the last received packet to be different from skb |
1da177e4 | 85 | */ |
ea3793ee RW |
86 | int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, |
87 | const struct sk_buff *skb) | |
1da177e4 LT |
88 | { |
89 | int error; | |
bf368e4e | 90 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); |
1da177e4 | 91 | |
aa395145 | 92 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1da177e4 LT |
93 | |
94 | /* Socket errors? */ | |
95 | error = sock_error(sk); | |
96 | if (error) | |
97 | goto out_err; | |
98 | ||
39cc8613 | 99 | if (sk->sk_receive_queue.prev != skb) |
1da177e4 LT |
100 | goto out; |
101 | ||
102 | /* Socket shut down? */ | |
103 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
104 | goto out_noerr; | |
105 | ||
106 | /* Sequenced packets can come disconnected. | |
107 | * If so we report the problem | |
108 | */ | |
109 | error = -ENOTCONN; | |
110 | if (connection_based(sk) && | |
111 | !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN)) | |
112 | goto out_err; | |
113 | ||
114 | /* handle signals */ | |
115 | if (signal_pending(current)) | |
116 | goto interrupted; | |
117 | ||
118 | error = 0; | |
119 | *timeo_p = schedule_timeout(*timeo_p); | |
120 | out: | |
aa395145 | 121 | finish_wait(sk_sleep(sk), &wait); |
1da177e4 LT |
122 | return error; |
123 | interrupted: | |
124 | error = sock_intr_errno(*timeo_p); | |
125 | out_err: | |
126 | *err = error; | |
127 | goto out; | |
128 | out_noerr: | |
129 | *err = 0; | |
130 | error = 1; | |
131 | goto out; | |
132 | } | |
ea3793ee | 133 | EXPORT_SYMBOL(__skb_wait_for_more_packets); |
1da177e4 | 134 | |
a0a2a660 | 135 | static struct sk_buff *skb_set_peeked(struct sk_buff *skb) |
738ac1eb HX |
136 | { |
137 | struct sk_buff *nskb; | |
138 | ||
139 | if (skb->peeked) | |
a0a2a660 | 140 | return skb; |
738ac1eb HX |
141 | |
142 | /* We have to unshare an skb before modifying it. */ | |
143 | if (!skb_shared(skb)) | |
144 | goto done; | |
145 | ||
146 | nskb = skb_clone(skb, GFP_ATOMIC); | |
147 | if (!nskb) | |
a0a2a660 | 148 | return ERR_PTR(-ENOMEM); |
738ac1eb HX |
149 | |
150 | skb->prev->next = nskb; | |
151 | skb->next->prev = nskb; | |
152 | nskb->prev = skb->prev; | |
153 | nskb->next = skb->next; | |
154 | ||
155 | consume_skb(skb); | |
156 | skb = nskb; | |
157 | ||
158 | done: | |
159 | skb->peeked = 1; | |
160 | ||
a0a2a660 | 161 | return skb; |
738ac1eb HX |
162 | } |
163 | ||
1da177e4 | 164 | /** |
ea3793ee | 165 | * __skb_try_recv_datagram - Receive a datagram skbuff |
4dc3b16b PP |
166 | * @sk: socket |
167 | * @flags: MSG_ flags | |
7c13f97f | 168 | * @destructor: invoked under the receive lock on successful dequeue |
39cc8613 | 169 | * @peeked: returns non-zero if this packet has been seen before |
3f518bf7 PE |
170 | * @off: an offset in bytes to peek skb from. Returns an offset |
171 | * within an skb where data actually starts | |
4dc3b16b | 172 | * @err: error code returned |
ea3793ee RW |
173 | * @last: set to last peeked message to inform the wait function |
174 | * what to look for when peeking | |
1da177e4 LT |
175 | * |
176 | * Get a datagram skbuff, understands the peeking, nonblocking wakeups | |
177 | * and possible races. This replaces identical code in packet, raw and | |
178 | * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes | |
179 | * the long standing peek and read race for datagram sockets. If you | |
180 | * alter this routine remember it must be re-entrant. | |
181 | * | |
ea3793ee RW |
182 | * This function will lock the socket if a skb is returned, so |
183 | * the caller needs to unlock the socket in that case (usually by | |
184 | * calling skb_free_datagram). Returns NULL with *err set to | |
185 | * -EAGAIN if no data was available or to some other value if an | |
186 | * error was detected. | |
1da177e4 LT |
187 | * |
188 | * * It does not lock socket since today. This function is | |
189 | * * free of race conditions. This measure should/can improve | |
190 | * * significantly datagram socket latencies at high loads, | |
191 | * * when data copying to user space takes lots of time. | |
192 | * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet | |
193 | * * 8) Great win.) | |
194 | * * --ANK (980729) | |
195 | * | |
196 | * The order of the tests when we find no data waiting are specified | |
197 | * quite explicitly by POSIX 1003.1g, don't change them without having | |
198 | * the standard around please. | |
199 | */ | |
ea3793ee | 200 | struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags, |
7c13f97f PA |
201 | void (*destructor)(struct sock *sk, |
202 | struct sk_buff *skb), | |
ea3793ee RW |
203 | int *peeked, int *off, int *err, |
204 | struct sk_buff **last) | |
1da177e4 | 205 | { |
738ac1eb | 206 | struct sk_buff_head *queue = &sk->sk_receive_queue; |
ea3793ee | 207 | struct sk_buff *skb; |
738ac1eb | 208 | unsigned long cpu_flags; |
1da177e4 LT |
209 | /* |
210 | * Caller is allowed not to check sk->sk_err before skb_recv_datagram() | |
211 | */ | |
212 | int error = sock_error(sk); | |
213 | ||
214 | if (error) | |
215 | goto no_packet; | |
216 | ||
a297569f | 217 | *peeked = 0; |
1da177e4 LT |
218 | do { |
219 | /* Again only user level code calls this function, so nothing | |
220 | * interrupt level will suddenly eat the receive_queue. | |
221 | * | |
222 | * Look at current nfs client by the way... | |
8917a3c0 | 223 | * However, this function was correct in any case. 8) |
1da177e4 | 224 | */ |
39cc8613 | 225 | int _off = *off; |
a59322be | 226 | |
ea3793ee | 227 | *last = (struct sk_buff *)queue; |
4934b032 | 228 | spin_lock_irqsave(&queue->lock, cpu_flags); |
3f518bf7 | 229 | skb_queue_walk(queue, skb) { |
ea3793ee | 230 | *last = skb; |
a59322be | 231 | if (flags & MSG_PEEK) { |
39cc8613 | 232 | if (_off >= skb->len && (skb->len || _off || |
add05ad4 | 233 | skb->peeked)) { |
39cc8613 | 234 | _off -= skb->len; |
3f518bf7 PE |
235 | continue; |
236 | } | |
a297569f ED |
237 | if (!skb->len) { |
238 | skb = skb_set_peeked(skb); | |
239 | if (IS_ERR(skb)) { | |
240 | error = PTR_ERR(skb); | |
241 | spin_unlock_irqrestore(&queue->lock, | |
242 | cpu_flags); | |
243 | goto no_packet; | |
244 | } | |
ea3793ee | 245 | } |
a297569f | 246 | *peeked = 1; |
1da177e4 | 247 | atomic_inc(&skb->users); |
7c13f97f | 248 | } else { |
4934b032 | 249 | __skb_unlink(skb, queue); |
7c13f97f PA |
250 | if (destructor) |
251 | destructor(sk, skb); | |
252 | } | |
3f518bf7 | 253 | spin_unlock_irqrestore(&queue->lock, cpu_flags); |
39cc8613 | 254 | *off = _off; |
1da177e4 | 255 | return skb; |
3f518bf7 | 256 | } |
ea3793ee | 257 | |
3f518bf7 | 258 | spin_unlock_irqrestore(&queue->lock, cpu_flags); |
ea3793ee RW |
259 | } while (sk_can_busy_loop(sk) && |
260 | sk_busy_loop(sk, flags & MSG_DONTWAIT)); | |
1da177e4 | 261 | |
ea3793ee | 262 | error = -EAGAIN; |
a5b50476 | 263 | |
ea3793ee RW |
264 | no_packet: |
265 | *err = error; | |
266 | return NULL; | |
267 | } | |
268 | EXPORT_SYMBOL(__skb_try_recv_datagram); | |
1da177e4 | 269 | |
ea3793ee | 270 | struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, |
7c13f97f PA |
271 | void (*destructor)(struct sock *sk, |
272 | struct sk_buff *skb), | |
ea3793ee RW |
273 | int *peeked, int *off, int *err) |
274 | { | |
275 | struct sk_buff *skb, *last; | |
276 | long timeo; | |
1da177e4 | 277 | |
ea3793ee RW |
278 | timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
279 | ||
280 | do { | |
7c13f97f PA |
281 | skb = __skb_try_recv_datagram(sk, flags, destructor, peeked, |
282 | off, err, &last); | |
ea3793ee RW |
283 | if (skb) |
284 | return skb; | |
285 | ||
760a4322 | 286 | if (*err != -EAGAIN) |
ea3793ee RW |
287 | break; |
288 | } while (timeo && | |
289 | !__skb_wait_for_more_packets(sk, err, &timeo, last)); | |
1da177e4 | 290 | |
1da177e4 LT |
291 | return NULL; |
292 | } | |
a59322be HX |
293 | EXPORT_SYMBOL(__skb_recv_datagram); |
294 | ||
95c96174 | 295 | struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, |
a59322be HX |
296 | int noblock, int *err) |
297 | { | |
3f518bf7 | 298 | int peeked, off = 0; |
a59322be HX |
299 | |
300 | return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), | |
7c13f97f | 301 | NULL, &peeked, &off, err); |
a59322be | 302 | } |
9e34a5b5 | 303 | EXPORT_SYMBOL(skb_recv_datagram); |
1da177e4 LT |
304 | |
305 | void skb_free_datagram(struct sock *sk, struct sk_buff *skb) | |
306 | { | |
ead2ceb0 | 307 | consume_skb(skb); |
270acefa | 308 | sk_mem_reclaim_partial(sk); |
1da177e4 | 309 | } |
9d410c79 ED |
310 | EXPORT_SYMBOL(skb_free_datagram); |
311 | ||
627d2d6b | 312 | void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len) |
9d410c79 | 313 | { |
8a74ad60 ED |
314 | bool slow; |
315 | ||
93bb64ea ED |
316 | if (likely(atomic_read(&skb->users) == 1)) |
317 | smp_rmb(); | |
627d2d6b | 318 | else if (likely(!atomic_dec_and_test(&skb->users))) { |
319 | sk_peek_offset_bwd(sk, len); | |
93bb64ea | 320 | return; |
627d2d6b | 321 | } |
93bb64ea | 322 | |
8a74ad60 | 323 | slow = lock_sock_fast(sk); |
627d2d6b | 324 | sk_peek_offset_bwd(sk, len); |
4b0b72f7 ED |
325 | skb_orphan(skb); |
326 | sk_mem_reclaim_partial(sk); | |
8a74ad60 | 327 | unlock_sock_fast(sk, slow); |
4b0b72f7 | 328 | |
93bb64ea ED |
329 | /* skb is now orphaned, can be freed outside of locked section */ |
330 | __kfree_skb(skb); | |
9d410c79 | 331 | } |
627d2d6b | 332 | EXPORT_SYMBOL(__skb_free_datagram_locked); |
1da177e4 | 333 | |
f8c3bf00 PA |
334 | int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb, |
335 | unsigned int flags) | |
336 | { | |
337 | int err = 0; | |
338 | ||
339 | if (flags & MSG_PEEK) { | |
340 | err = -ENOENT; | |
341 | spin_lock_bh(&sk->sk_receive_queue.lock); | |
342 | if (skb == skb_peek(&sk->sk_receive_queue)) { | |
343 | __skb_unlink(skb, &sk->sk_receive_queue); | |
344 | atomic_dec(&skb->users); | |
345 | err = 0; | |
346 | } | |
347 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
348 | } | |
349 | ||
350 | atomic_inc(&sk->sk_drops); | |
351 | return err; | |
352 | } | |
353 | EXPORT_SYMBOL(__sk_queue_drop_skb); | |
354 | ||
3305b80c HX |
355 | /** |
356 | * skb_kill_datagram - Free a datagram skbuff forcibly | |
357 | * @sk: socket | |
358 | * @skb: datagram skbuff | |
359 | * @flags: MSG_ flags | |
360 | * | |
361 | * This function frees a datagram skbuff that was received by | |
362 | * skb_recv_datagram. The flags argument must match the one | |
363 | * used for skb_recv_datagram. | |
364 | * | |
365 | * If the MSG_PEEK flag is set, and the packet is still on the | |
366 | * receive queue of the socket, it will be taken off the queue | |
367 | * before it is freed. | |
368 | * | |
369 | * This function currently only disables BH when acquiring the | |
370 | * sk_receive_queue lock. Therefore it must not be used in a | |
371 | * context where that lock is acquired in an IRQ context. | |
27ab2568 HX |
372 | * |
373 | * It returns 0 if the packet was removed by us. | |
3305b80c HX |
374 | */ |
375 | ||
27ab2568 | 376 | int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) |
3305b80c | 377 | { |
f8c3bf00 | 378 | int err = __sk_queue_drop_skb(sk, skb, flags); |
3305b80c | 379 | |
61de71c6 JD |
380 | kfree_skb(skb); |
381 | sk_mem_reclaim_partial(sk); | |
27ab2568 | 382 | return err; |
3305b80c | 383 | } |
3305b80c HX |
384 | EXPORT_SYMBOL(skb_kill_datagram); |
385 | ||
a8f820aa HX |
386 | /** |
387 | * skb_copy_datagram_iter - Copy a datagram to an iovec iterator. | |
388 | * @skb: buffer to copy | |
389 | * @offset: offset in the buffer to start copying from | |
390 | * @to: iovec iterator to copy to | |
391 | * @len: amount of data to copy from buffer to iovec | |
392 | */ | |
393 | int skb_copy_datagram_iter(const struct sk_buff *skb, int offset, | |
394 | struct iov_iter *to, int len) | |
395 | { | |
396 | int start = skb_headlen(skb); | |
397 | int i, copy = start - offset; | |
398 | struct sk_buff *frag_iter; | |
399 | ||
400 | trace_skb_copy_datagram_iovec(skb, len); | |
401 | ||
402 | /* Copy header. */ | |
403 | if (copy > 0) { | |
404 | if (copy > len) | |
405 | copy = len; | |
406 | if (copy_to_iter(skb->data + offset, copy, to) != copy) | |
407 | goto short_copy; | |
408 | if ((len -= copy) == 0) | |
409 | return 0; | |
410 | offset += copy; | |
411 | } | |
412 | ||
413 | /* Copy paged appendix. Hmm... why does this look so complicated? */ | |
414 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
415 | int end; | |
416 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
417 | ||
418 | WARN_ON(start > offset + len); | |
419 | ||
420 | end = start + skb_frag_size(frag); | |
421 | if ((copy = end - offset) > 0) { | |
422 | if (copy > len) | |
423 | copy = len; | |
424 | if (copy_page_to_iter(skb_frag_page(frag), | |
425 | frag->page_offset + offset - | |
426 | start, copy, to) != copy) | |
427 | goto short_copy; | |
428 | if (!(len -= copy)) | |
429 | return 0; | |
430 | offset += copy; | |
431 | } | |
432 | start = end; | |
433 | } | |
434 | ||
435 | skb_walk_frags(skb, frag_iter) { | |
436 | int end; | |
437 | ||
438 | WARN_ON(start > offset + len); | |
439 | ||
440 | end = start + frag_iter->len; | |
441 | if ((copy = end - offset) > 0) { | |
442 | if (copy > len) | |
443 | copy = len; | |
444 | if (skb_copy_datagram_iter(frag_iter, offset - start, | |
445 | to, copy)) | |
446 | goto fault; | |
447 | if ((len -= copy) == 0) | |
448 | return 0; | |
449 | offset += copy; | |
450 | } | |
451 | start = end; | |
452 | } | |
453 | if (!len) | |
454 | return 0; | |
455 | ||
456 | /* This is not really a user copy fault, but rather someone | |
457 | * gave us a bogus length on the skb. We should probably | |
458 | * print a warning here as it may indicate a kernel bug. | |
459 | */ | |
460 | ||
461 | fault: | |
462 | return -EFAULT; | |
463 | ||
464 | short_copy: | |
465 | if (iov_iter_count(to)) | |
466 | goto fault; | |
467 | ||
468 | return 0; | |
469 | } | |
470 | EXPORT_SYMBOL(skb_copy_datagram_iter); | |
471 | ||
db543c1f | 472 | /** |
8feb2fb2 | 473 | * skb_copy_datagram_from_iter - Copy a datagram from an iov_iter. |
db543c1f RR |
474 | * @skb: buffer to copy |
475 | * @offset: offset in the buffer to start copying to | |
8feb2fb2 | 476 | * @from: the copy source |
db543c1f RR |
477 | * @len: amount of data to copy to buffer from iovec |
478 | * | |
479 | * Returns 0 or -EFAULT. | |
db543c1f | 480 | */ |
3a654f97 AV |
481 | int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, |
482 | struct iov_iter *from, | |
483 | int len) | |
484 | { | |
485 | int start = skb_headlen(skb); | |
486 | int i, copy = start - offset; | |
487 | struct sk_buff *frag_iter; | |
488 | ||
489 | /* Copy header. */ | |
490 | if (copy > 0) { | |
491 | if (copy > len) | |
492 | copy = len; | |
493 | if (copy_from_iter(skb->data + offset, copy, from) != copy) | |
494 | goto fault; | |
495 | if ((len -= copy) == 0) | |
496 | return 0; | |
497 | offset += copy; | |
498 | } | |
499 | ||
500 | /* Copy paged appendix. Hmm... why does this look so complicated? */ | |
501 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
502 | int end; | |
503 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
504 | ||
505 | WARN_ON(start > offset + len); | |
506 | ||
507 | end = start + skb_frag_size(frag); | |
508 | if ((copy = end - offset) > 0) { | |
509 | size_t copied; | |
510 | ||
511 | if (copy > len) | |
512 | copy = len; | |
513 | copied = copy_page_from_iter(skb_frag_page(frag), | |
514 | frag->page_offset + offset - start, | |
515 | copy, from); | |
516 | if (copied != copy) | |
517 | goto fault; | |
518 | ||
519 | if (!(len -= copy)) | |
520 | return 0; | |
521 | offset += copy; | |
522 | } | |
523 | start = end; | |
524 | } | |
525 | ||
526 | skb_walk_frags(skb, frag_iter) { | |
527 | int end; | |
528 | ||
529 | WARN_ON(start > offset + len); | |
530 | ||
531 | end = start + frag_iter->len; | |
532 | if ((copy = end - offset) > 0) { | |
533 | if (copy > len) | |
534 | copy = len; | |
535 | if (skb_copy_datagram_from_iter(frag_iter, | |
536 | offset - start, | |
537 | from, copy)) | |
538 | goto fault; | |
539 | if ((len -= copy) == 0) | |
540 | return 0; | |
541 | offset += copy; | |
542 | } | |
543 | start = end; | |
544 | } | |
545 | if (!len) | |
546 | return 0; | |
547 | ||
548 | fault: | |
549 | return -EFAULT; | |
550 | } | |
551 | EXPORT_SYMBOL(skb_copy_datagram_from_iter); | |
552 | ||
c3bdeb5c | 553 | /** |
195e952d | 554 | * zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter |
c3bdeb5c | 555 | * @skb: buffer to copy |
195e952d | 556 | * @from: the source to copy from |
c3bdeb5c JW |
557 | * |
558 | * The function will first copy up to headlen, and then pin the userspace | |
559 | * pages and build frags through them. | |
560 | * | |
561 | * Returns 0, -EFAULT or -EMSGSIZE. | |
c3bdeb5c | 562 | */ |
3a654f97 AV |
563 | int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from) |
564 | { | |
565 | int len = iov_iter_count(from); | |
566 | int copy = min_t(int, skb_headlen(skb), len); | |
567 | int frag = 0; | |
568 | ||
569 | /* copy up to skb headlen */ | |
570 | if (skb_copy_datagram_from_iter(skb, 0, from, copy)) | |
571 | return -EFAULT; | |
572 | ||
573 | while (iov_iter_count(from)) { | |
574 | struct page *pages[MAX_SKB_FRAGS]; | |
575 | size_t start; | |
576 | ssize_t copied; | |
577 | unsigned long truesize; | |
578 | int n = 0; | |
579 | ||
580 | if (frag == MAX_SKB_FRAGS) | |
581 | return -EMSGSIZE; | |
582 | ||
583 | copied = iov_iter_get_pages(from, pages, ~0U, | |
584 | MAX_SKB_FRAGS - frag, &start); | |
585 | if (copied < 0) | |
586 | return -EFAULT; | |
587 | ||
588 | iov_iter_advance(from, copied); | |
589 | ||
590 | truesize = PAGE_ALIGN(copied + start); | |
591 | skb->data_len += copied; | |
592 | skb->len += copied; | |
593 | skb->truesize += truesize; | |
594 | atomic_add(truesize, &skb->sk->sk_wmem_alloc); | |
595 | while (copied) { | |
596 | int size = min_t(int, copied, PAGE_SIZE - start); | |
597 | skb_fill_page_desc(skb, frag++, pages[n], start, size); | |
598 | start = 0; | |
599 | copied -= size; | |
600 | n++; | |
601 | } | |
602 | } | |
603 | return 0; | |
604 | } | |
605 | EXPORT_SYMBOL(zerocopy_sg_from_iter); | |
606 | ||
1da177e4 | 607 | static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, |
e5a4b0bb | 608 | struct iov_iter *to, int len, |
5084205f | 609 | __wsum *csump) |
1da177e4 | 610 | { |
1a028e50 | 611 | int start = skb_headlen(skb); |
1a028e50 | 612 | int i, copy = start - offset; |
5b1a002a DM |
613 | struct sk_buff *frag_iter; |
614 | int pos = 0; | |
e5a4b0bb | 615 | int n; |
1da177e4 LT |
616 | |
617 | /* Copy header. */ | |
618 | if (copy > 0) { | |
1da177e4 LT |
619 | if (copy > len) |
620 | copy = len; | |
e5a4b0bb AV |
621 | n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to); |
622 | if (n != copy) | |
1da177e4 LT |
623 | goto fault; |
624 | if ((len -= copy) == 0) | |
625 | return 0; | |
626 | offset += copy; | |
1da177e4 LT |
627 | pos = copy; |
628 | } | |
629 | ||
630 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1a028e50 | 631 | int end; |
9e903e08 | 632 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1da177e4 | 633 | |
547b792c | 634 | WARN_ON(start > offset + len); |
1a028e50 | 635 | |
9e903e08 | 636 | end = start + skb_frag_size(frag); |
1da177e4 | 637 | if ((copy = end - offset) > 0) { |
e5a4b0bb | 638 | __wsum csum2 = 0; |
ea2ab693 | 639 | struct page *page = skb_frag_page(frag); |
e5a4b0bb | 640 | u8 *vaddr = kmap(page); |
1da177e4 LT |
641 | |
642 | if (copy > len) | |
643 | copy = len; | |
e5a4b0bb AV |
644 | n = csum_and_copy_to_iter(vaddr + frag->page_offset + |
645 | offset - start, copy, | |
646 | &csum2, to); | |
1da177e4 | 647 | kunmap(page); |
e5a4b0bb | 648 | if (n != copy) |
1da177e4 LT |
649 | goto fault; |
650 | *csump = csum_block_add(*csump, csum2, pos); | |
651 | if (!(len -= copy)) | |
652 | return 0; | |
653 | offset += copy; | |
1da177e4 LT |
654 | pos += copy; |
655 | } | |
1a028e50 | 656 | start = end; |
1da177e4 LT |
657 | } |
658 | ||
5b1a002a DM |
659 | skb_walk_frags(skb, frag_iter) { |
660 | int end; | |
661 | ||
662 | WARN_ON(start > offset + len); | |
663 | ||
664 | end = start + frag_iter->len; | |
665 | if ((copy = end - offset) > 0) { | |
666 | __wsum csum2 = 0; | |
667 | if (copy > len) | |
668 | copy = len; | |
669 | if (skb_copy_and_csum_datagram(frag_iter, | |
670 | offset - start, | |
671 | to, copy, | |
672 | &csum2)) | |
673 | goto fault; | |
674 | *csump = csum_block_add(*csump, csum2, pos); | |
675 | if ((len -= copy) == 0) | |
676 | return 0; | |
677 | offset += copy; | |
5b1a002a | 678 | pos += copy; |
1da177e4 | 679 | } |
5b1a002a | 680 | start = end; |
1da177e4 LT |
681 | } |
682 | if (!len) | |
683 | return 0; | |
684 | ||
685 | fault: | |
686 | return -EFAULT; | |
687 | } | |
688 | ||
759e5d00 | 689 | __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) |
fb286bb2 | 690 | { |
d3bc23e7 | 691 | __sum16 sum; |
fb286bb2 | 692 | |
759e5d00 | 693 | sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); |
46fb51eb TH |
694 | if (likely(!sum)) { |
695 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && | |
696 | !skb->csum_complete_sw) | |
697 | netdev_rx_csum_fault(skb->dev); | |
698 | } | |
89c22d8c HX |
699 | if (!skb_shared(skb)) |
700 | skb->csum_valid = !sum; | |
fb286bb2 HX |
701 | return sum; |
702 | } | |
759e5d00 HX |
703 | EXPORT_SYMBOL(__skb_checksum_complete_head); |
704 | ||
705 | __sum16 __skb_checksum_complete(struct sk_buff *skb) | |
706 | { | |
46fb51eb TH |
707 | __wsum csum; |
708 | __sum16 sum; | |
709 | ||
710 | csum = skb_checksum(skb, 0, skb->len, 0); | |
711 | ||
712 | /* skb->csum holds pseudo checksum */ | |
713 | sum = csum_fold(csum_add(skb->csum, csum)); | |
714 | if (likely(!sum)) { | |
715 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && | |
716 | !skb->csum_complete_sw) | |
717 | netdev_rx_csum_fault(skb->dev); | |
718 | } | |
719 | ||
89c22d8c HX |
720 | if (!skb_shared(skb)) { |
721 | /* Save full packet checksum */ | |
722 | skb->csum = csum; | |
723 | skb->ip_summed = CHECKSUM_COMPLETE; | |
724 | skb->csum_complete_sw = 1; | |
725 | skb->csum_valid = !sum; | |
726 | } | |
46fb51eb TH |
727 | |
728 | return sum; | |
759e5d00 | 729 | } |
fb286bb2 HX |
730 | EXPORT_SYMBOL(__skb_checksum_complete); |
731 | ||
1da177e4 | 732 | /** |
e5a4b0bb | 733 | * skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec. |
4dc3b16b PP |
734 | * @skb: skbuff |
735 | * @hlen: hardware length | |
e5a4b0bb | 736 | * @msg: destination |
4ec93edb | 737 | * |
1da177e4 LT |
738 | * Caller _must_ check that skb will fit to this iovec. |
739 | * | |
740 | * Returns: 0 - success. | |
741 | * -EINVAL - checksum failure. | |
e5a4b0bb | 742 | * -EFAULT - fault during copy. |
1da177e4 | 743 | */ |
e5a4b0bb AV |
744 | int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, |
745 | int hlen, struct msghdr *msg) | |
1da177e4 | 746 | { |
d3bc23e7 | 747 | __wsum csum; |
1da177e4 LT |
748 | int chunk = skb->len - hlen; |
749 | ||
ef8aef55 HX |
750 | if (!chunk) |
751 | return 0; | |
752 | ||
01e97e65 | 753 | if (msg_data_left(msg) < chunk) { |
fb286bb2 | 754 | if (__skb_checksum_complete(skb)) |
1da177e4 | 755 | goto csum_error; |
e5a4b0bb | 756 | if (skb_copy_datagram_msg(skb, hlen, msg, chunk)) |
1da177e4 LT |
757 | goto fault; |
758 | } else { | |
759 | csum = csum_partial(skb->data, hlen, skb->csum); | |
e5a4b0bb | 760 | if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter, |
1da177e4 LT |
761 | chunk, &csum)) |
762 | goto fault; | |
d3bc23e7 | 763 | if (csum_fold(csum)) |
1da177e4 | 764 | goto csum_error; |
84fa7933 | 765 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
fb286bb2 | 766 | netdev_rx_csum_fault(skb->dev); |
1da177e4 LT |
767 | } |
768 | return 0; | |
769 | csum_error: | |
770 | return -EINVAL; | |
771 | fault: | |
772 | return -EFAULT; | |
773 | } | |
e5a4b0bb | 774 | EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg); |
1da177e4 LT |
775 | |
776 | /** | |
777 | * datagram_poll - generic datagram poll | |
4dc3b16b PP |
778 | * @file: file struct |
779 | * @sock: socket | |
780 | * @wait: poll table | |
1da177e4 LT |
781 | * |
782 | * Datagram poll: Again totally generic. This also handles | |
783 | * sequenced packet sockets providing the socket receive queue | |
784 | * is only ever holding data ready to receive. | |
785 | * | |
786 | * Note: when you _don't_ use this routine for this protocol, | |
787 | * and you use a different write policy from sock_writeable() | |
788 | * then please supply your own write_space callback. | |
789 | */ | |
790 | unsigned int datagram_poll(struct file *file, struct socket *sock, | |
791 | poll_table *wait) | |
792 | { | |
793 | struct sock *sk = sock->sk; | |
794 | unsigned int mask; | |
795 | ||
aa395145 | 796 | sock_poll_wait(file, sk_sleep(sk), wait); |
1da177e4 LT |
797 | mask = 0; |
798 | ||
799 | /* exceptional events? */ | |
800 | if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) | |
7d4c04fc | 801 | mask |= POLLERR | |
8facd5fb | 802 | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); |
7d4c04fc | 803 | |
f348d70a | 804 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
db40980f | 805 | mask |= POLLRDHUP | POLLIN | POLLRDNORM; |
1da177e4 LT |
806 | if (sk->sk_shutdown == SHUTDOWN_MASK) |
807 | mask |= POLLHUP; | |
808 | ||
809 | /* readable? */ | |
db40980f | 810 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
1da177e4 LT |
811 | mask |= POLLIN | POLLRDNORM; |
812 | ||
813 | /* Connection-based need to check for termination and startup */ | |
814 | if (connection_based(sk)) { | |
815 | if (sk->sk_state == TCP_CLOSE) | |
816 | mask |= POLLHUP; | |
817 | /* connection hasn't started yet? */ | |
818 | if (sk->sk_state == TCP_SYN_SENT) | |
819 | return mask; | |
820 | } | |
821 | ||
822 | /* writable? */ | |
823 | if (sock_writeable(sk)) | |
824 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | |
825 | else | |
9cd3e072 | 826 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
1da177e4 LT |
827 | |
828 | return mask; | |
829 | } | |
1da177e4 | 830 | EXPORT_SYMBOL(datagram_poll); |