]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * SUCS NET3: | |
3 | * | |
4 | * Generic datagram handling routines. These are generic for all | |
5 | * protocols. Possibly a generic IP version on top of these would | |
6 | * make sense. Not tonight however 8-). | |
7 | * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and | |
8 | * NetROM layer all have identical poll code and mostly | |
9 | * identical recvmsg() code. So we share it here. The poll was | |
10 | * shared before but buried in udp.c so I moved it. | |
11 | * | |
113aa838 | 12 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old |
1da177e4 LT |
13 | * udp.c code) |
14 | * | |
15 | * Fixes: | |
16 | * Alan Cox : NULL return from skb_peek_copy() | |
17 | * understood | |
18 | * Alan Cox : Rewrote skb_read_datagram to avoid the | |
19 | * skb_peek_copy stuff. | |
20 | * Alan Cox : Added support for SOCK_SEQPACKET. | |
21 | * IPX can no longer use the SO_TYPE hack | |
22 | * but AX.25 now works right, and SPX is | |
23 | * feasible. | |
24 | * Alan Cox : Fixed write poll of non IP protocol | |
25 | * crash. | |
26 | * Florian La Roche: Changed for my new skbuff handling. | |
27 | * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET. | |
28 | * Linus Torvalds : BSD semantic fixes. | |
29 | * Alan Cox : Datagram iovec handling | |
30 | * Darryl Miles : Fixed non-blocking SOCK_STREAM. | |
31 | * Alan Cox : POSIXisms | |
32 | * Pete Wyckoff : Unconnected accept() fix. | |
33 | * | |
34 | */ | |
35 | ||
36 | #include <linux/module.h> | |
37 | #include <linux/types.h> | |
38 | #include <linux/kernel.h> | |
7c0f6ba6 | 39 | #include <linux/uaccess.h> |
1da177e4 LT |
40 | #include <linux/mm.h> |
41 | #include <linux/interrupt.h> | |
42 | #include <linux/errno.h> | |
43 | #include <linux/sched.h> | |
44 | #include <linux/inet.h> | |
1da177e4 LT |
45 | #include <linux/netdevice.h> |
46 | #include <linux/rtnetlink.h> | |
47 | #include <linux/poll.h> | |
48 | #include <linux/highmem.h> | |
3305b80c | 49 | #include <linux/spinlock.h> |
5a0e3ad6 | 50 | #include <linux/slab.h> |
0433547a | 51 | #include <linux/pagemap.h> |
a8f820aa | 52 | #include <linux/uio.h> |
1da177e4 LT |
53 | |
54 | #include <net/protocol.h> | |
55 | #include <linux/skbuff.h> | |
1da177e4 | 56 | |
c752f073 ACM |
57 | #include <net/checksum.h> |
58 | #include <net/sock.h> | |
59 | #include <net/tcp_states.h> | |
e9b3cc1b | 60 | #include <trace/events/skb.h> |
076bb0c8 | 61 | #include <net/busy_poll.h> |
1da177e4 LT |
62 | |
63 | /* | |
64 | * Is a socket 'connection oriented' ? | |
65 | */ | |
66 | static inline int connection_based(struct sock *sk) | |
67 | { | |
68 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; | |
69 | } | |
70 | ||
95c96174 | 71 | static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync, |
bf368e4e ED |
72 | void *key) |
73 | { | |
74 | unsigned long bits = (unsigned long)key; | |
75 | ||
76 | /* | |
77 | * Avoid a wakeup if event not interesting for us | |
78 | */ | |
79 | if (bits && !(bits & (POLLIN | POLLERR))) | |
80 | return 0; | |
81 | return autoremove_wake_function(wait, mode, sync, key); | |
82 | } | |
1da177e4 | 83 | /* |
39cc8613 | 84 | * Wait for the last received packet to be different from skb |
1da177e4 | 85 | */ |
ea3793ee RW |
86 | int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, |
87 | const struct sk_buff *skb) | |
1da177e4 LT |
88 | { |
89 | int error; | |
bf368e4e | 90 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); |
1da177e4 | 91 | |
aa395145 | 92 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1da177e4 LT |
93 | |
94 | /* Socket errors? */ | |
95 | error = sock_error(sk); | |
96 | if (error) | |
97 | goto out_err; | |
98 | ||
39cc8613 | 99 | if (sk->sk_receive_queue.prev != skb) |
1da177e4 LT |
100 | goto out; |
101 | ||
102 | /* Socket shut down? */ | |
103 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
104 | goto out_noerr; | |
105 | ||
106 | /* Sequenced packets can come disconnected. | |
107 | * If so we report the problem | |
108 | */ | |
109 | error = -ENOTCONN; | |
110 | if (connection_based(sk) && | |
111 | !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN)) | |
112 | goto out_err; | |
113 | ||
114 | /* handle signals */ | |
115 | if (signal_pending(current)) | |
116 | goto interrupted; | |
117 | ||
118 | error = 0; | |
119 | *timeo_p = schedule_timeout(*timeo_p); | |
120 | out: | |
aa395145 | 121 | finish_wait(sk_sleep(sk), &wait); |
1da177e4 LT |
122 | return error; |
123 | interrupted: | |
124 | error = sock_intr_errno(*timeo_p); | |
125 | out_err: | |
126 | *err = error; | |
127 | goto out; | |
128 | out_noerr: | |
129 | *err = 0; | |
130 | error = 1; | |
131 | goto out; | |
132 | } | |
ea3793ee | 133 | EXPORT_SYMBOL(__skb_wait_for_more_packets); |
1da177e4 | 134 | |
a0a2a660 | 135 | static struct sk_buff *skb_set_peeked(struct sk_buff *skb) |
738ac1eb HX |
136 | { |
137 | struct sk_buff *nskb; | |
138 | ||
139 | if (skb->peeked) | |
a0a2a660 | 140 | return skb; |
738ac1eb HX |
141 | |
142 | /* We have to unshare an skb before modifying it. */ | |
143 | if (!skb_shared(skb)) | |
144 | goto done; | |
145 | ||
146 | nskb = skb_clone(skb, GFP_ATOMIC); | |
147 | if (!nskb) | |
a0a2a660 | 148 | return ERR_PTR(-ENOMEM); |
738ac1eb HX |
149 | |
150 | skb->prev->next = nskb; | |
151 | skb->next->prev = nskb; | |
152 | nskb->prev = skb->prev; | |
153 | nskb->next = skb->next; | |
154 | ||
155 | consume_skb(skb); | |
156 | skb = nskb; | |
157 | ||
158 | done: | |
159 | skb->peeked = 1; | |
160 | ||
a0a2a660 | 161 | return skb; |
738ac1eb HX |
162 | } |
163 | ||
1da177e4 | 164 | /** |
ea3793ee | 165 | * __skb_try_recv_datagram - Receive a datagram skbuff |
4dc3b16b PP |
166 | * @sk: socket |
167 | * @flags: MSG_ flags | |
7c13f97f | 168 | * @destructor: invoked under the receive lock on successful dequeue |
39cc8613 | 169 | * @peeked: returns non-zero if this packet has been seen before |
3f518bf7 PE |
170 | * @off: an offset in bytes to peek skb from. Returns an offset |
171 | * within an skb where data actually starts | |
4dc3b16b | 172 | * @err: error code returned |
ea3793ee RW |
173 | * @last: set to last peeked message to inform the wait function |
174 | * what to look for when peeking | |
1da177e4 LT |
175 | * |
176 | * Get a datagram skbuff, understands the peeking, nonblocking wakeups | |
177 | * and possible races. This replaces identical code in packet, raw and | |
178 | * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes | |
179 | * the long standing peek and read race for datagram sockets. If you | |
180 | * alter this routine remember it must be re-entrant. | |
181 | * | |
ea3793ee RW |
182 | * This function will lock the socket if a skb is returned, so |
183 | * the caller needs to unlock the socket in that case (usually by | |
184 | * calling skb_free_datagram). Returns NULL with *err set to | |
185 | * -EAGAIN if no data was available or to some other value if an | |
186 | * error was detected. | |
1da177e4 LT |
187 | * |
188 | * * It does not lock socket since today. This function is | |
189 | * * free of race conditions. This measure should/can improve | |
190 | * * significantly datagram socket latencies at high loads, | |
191 | * * when data copying to user space takes lots of time. | |
192 | * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet | |
193 | * * 8) Great win.) | |
194 | * * --ANK (980729) | |
195 | * | |
196 | * The order of the tests when we find no data waiting are specified | |
197 | * quite explicitly by POSIX 1003.1g, don't change them without having | |
198 | * the standard around please. | |
199 | */ | |
ea3793ee | 200 | struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags, |
7c13f97f PA |
201 | void (*destructor)(struct sock *sk, |
202 | struct sk_buff *skb), | |
ea3793ee RW |
203 | int *peeked, int *off, int *err, |
204 | struct sk_buff **last) | |
1da177e4 | 205 | { |
738ac1eb | 206 | struct sk_buff_head *queue = &sk->sk_receive_queue; |
ea3793ee | 207 | struct sk_buff *skb; |
738ac1eb | 208 | unsigned long cpu_flags; |
1da177e4 LT |
209 | /* |
210 | * Caller is allowed not to check sk->sk_err before skb_recv_datagram() | |
211 | */ | |
212 | int error = sock_error(sk); | |
213 | ||
214 | if (error) | |
215 | goto no_packet; | |
216 | ||
a297569f | 217 | *peeked = 0; |
1da177e4 LT |
218 | do { |
219 | /* Again only user level code calls this function, so nothing | |
220 | * interrupt level will suddenly eat the receive_queue. | |
221 | * | |
222 | * Look at current nfs client by the way... | |
8917a3c0 | 223 | * However, this function was correct in any case. 8) |
1da177e4 | 224 | */ |
39cc8613 | 225 | int _off = *off; |
a59322be | 226 | |
ea3793ee | 227 | *last = (struct sk_buff *)queue; |
4934b032 | 228 | spin_lock_irqsave(&queue->lock, cpu_flags); |
3f518bf7 | 229 | skb_queue_walk(queue, skb) { |
ea3793ee | 230 | *last = skb; |
a59322be | 231 | if (flags & MSG_PEEK) { |
39cc8613 | 232 | if (_off >= skb->len && (skb->len || _off || |
add05ad4 | 233 | skb->peeked)) { |
39cc8613 | 234 | _off -= skb->len; |
3f518bf7 PE |
235 | continue; |
236 | } | |
a297569f ED |
237 | if (!skb->len) { |
238 | skb = skb_set_peeked(skb); | |
239 | if (IS_ERR(skb)) { | |
240 | error = PTR_ERR(skb); | |
241 | spin_unlock_irqrestore(&queue->lock, | |
242 | cpu_flags); | |
243 | goto no_packet; | |
244 | } | |
ea3793ee | 245 | } |
a297569f | 246 | *peeked = 1; |
1da177e4 | 247 | atomic_inc(&skb->users); |
7c13f97f | 248 | } else { |
4934b032 | 249 | __skb_unlink(skb, queue); |
7c13f97f PA |
250 | if (destructor) |
251 | destructor(sk, skb); | |
252 | } | |
3f518bf7 | 253 | spin_unlock_irqrestore(&queue->lock, cpu_flags); |
39cc8613 | 254 | *off = _off; |
1da177e4 | 255 | return skb; |
3f518bf7 | 256 | } |
ea3793ee | 257 | |
3f518bf7 | 258 | spin_unlock_irqrestore(&queue->lock, cpu_flags); |
2b5cd0df AD |
259 | |
260 | if (!sk_can_busy_loop(sk)) | |
261 | break; | |
262 | ||
263 | sk_busy_loop(sk, flags & MSG_DONTWAIT); | |
264 | } while (!skb_queue_empty(&sk->sk_receive_queue)); | |
1da177e4 | 265 | |
ea3793ee | 266 | error = -EAGAIN; |
a5b50476 | 267 | |
ea3793ee RW |
268 | no_packet: |
269 | *err = error; | |
270 | return NULL; | |
271 | } | |
272 | EXPORT_SYMBOL(__skb_try_recv_datagram); | |
1da177e4 | 273 | |
ea3793ee | 274 | struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, |
7c13f97f PA |
275 | void (*destructor)(struct sock *sk, |
276 | struct sk_buff *skb), | |
ea3793ee RW |
277 | int *peeked, int *off, int *err) |
278 | { | |
279 | struct sk_buff *skb, *last; | |
280 | long timeo; | |
1da177e4 | 281 | |
ea3793ee RW |
282 | timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
283 | ||
284 | do { | |
7c13f97f PA |
285 | skb = __skb_try_recv_datagram(sk, flags, destructor, peeked, |
286 | off, err, &last); | |
ea3793ee RW |
287 | if (skb) |
288 | return skb; | |
289 | ||
760a4322 | 290 | if (*err != -EAGAIN) |
ea3793ee RW |
291 | break; |
292 | } while (timeo && | |
293 | !__skb_wait_for_more_packets(sk, err, &timeo, last)); | |
1da177e4 | 294 | |
1da177e4 LT |
295 | return NULL; |
296 | } | |
a59322be HX |
297 | EXPORT_SYMBOL(__skb_recv_datagram); |
298 | ||
95c96174 | 299 | struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, |
a59322be HX |
300 | int noblock, int *err) |
301 | { | |
3f518bf7 | 302 | int peeked, off = 0; |
a59322be HX |
303 | |
304 | return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), | |
7c13f97f | 305 | NULL, &peeked, &off, err); |
a59322be | 306 | } |
9e34a5b5 | 307 | EXPORT_SYMBOL(skb_recv_datagram); |
1da177e4 LT |
308 | |
309 | void skb_free_datagram(struct sock *sk, struct sk_buff *skb) | |
310 | { | |
ead2ceb0 | 311 | consume_skb(skb); |
270acefa | 312 | sk_mem_reclaim_partial(sk); |
1da177e4 | 313 | } |
9d410c79 ED |
314 | EXPORT_SYMBOL(skb_free_datagram); |
315 | ||
627d2d6b | 316 | void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len) |
9d410c79 | 317 | { |
8a74ad60 ED |
318 | bool slow; |
319 | ||
93bb64ea ED |
320 | if (likely(atomic_read(&skb->users) == 1)) |
321 | smp_rmb(); | |
627d2d6b | 322 | else if (likely(!atomic_dec_and_test(&skb->users))) { |
323 | sk_peek_offset_bwd(sk, len); | |
93bb64ea | 324 | return; |
627d2d6b | 325 | } |
93bb64ea | 326 | |
8a74ad60 | 327 | slow = lock_sock_fast(sk); |
627d2d6b | 328 | sk_peek_offset_bwd(sk, len); |
4b0b72f7 ED |
329 | skb_orphan(skb); |
330 | sk_mem_reclaim_partial(sk); | |
8a74ad60 | 331 | unlock_sock_fast(sk, slow); |
4b0b72f7 | 332 | |
93bb64ea ED |
333 | /* skb is now orphaned, can be freed outside of locked section */ |
334 | __kfree_skb(skb); | |
9d410c79 | 335 | } |
627d2d6b | 336 | EXPORT_SYMBOL(__skb_free_datagram_locked); |
1da177e4 | 337 | |
f8c3bf00 | 338 | int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb, |
69629464 ED |
339 | unsigned int flags, |
340 | void (*destructor)(struct sock *sk, | |
341 | struct sk_buff *skb)) | |
f8c3bf00 PA |
342 | { |
343 | int err = 0; | |
344 | ||
345 | if (flags & MSG_PEEK) { | |
346 | err = -ENOENT; | |
347 | spin_lock_bh(&sk->sk_receive_queue.lock); | |
348 | if (skb == skb_peek(&sk->sk_receive_queue)) { | |
349 | __skb_unlink(skb, &sk->sk_receive_queue); | |
350 | atomic_dec(&skb->users); | |
69629464 ED |
351 | if (destructor) |
352 | destructor(sk, skb); | |
f8c3bf00 PA |
353 | err = 0; |
354 | } | |
355 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
356 | } | |
357 | ||
358 | atomic_inc(&sk->sk_drops); | |
359 | return err; | |
360 | } | |
361 | EXPORT_SYMBOL(__sk_queue_drop_skb); | |
362 | ||
3305b80c HX |
363 | /** |
364 | * skb_kill_datagram - Free a datagram skbuff forcibly | |
365 | * @sk: socket | |
366 | * @skb: datagram skbuff | |
367 | * @flags: MSG_ flags | |
368 | * | |
369 | * This function frees a datagram skbuff that was received by | |
370 | * skb_recv_datagram. The flags argument must match the one | |
371 | * used for skb_recv_datagram. | |
372 | * | |
373 | * If the MSG_PEEK flag is set, and the packet is still on the | |
374 | * receive queue of the socket, it will be taken off the queue | |
375 | * before it is freed. | |
376 | * | |
377 | * This function currently only disables BH when acquiring the | |
378 | * sk_receive_queue lock. Therefore it must not be used in a | |
379 | * context where that lock is acquired in an IRQ context. | |
27ab2568 HX |
380 | * |
381 | * It returns 0 if the packet was removed by us. | |
3305b80c HX |
382 | */ |
383 | ||
27ab2568 | 384 | int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) |
3305b80c | 385 | { |
69629464 | 386 | int err = __sk_queue_drop_skb(sk, skb, flags, NULL); |
3305b80c | 387 | |
61de71c6 JD |
388 | kfree_skb(skb); |
389 | sk_mem_reclaim_partial(sk); | |
27ab2568 | 390 | return err; |
3305b80c | 391 | } |
3305b80c HX |
392 | EXPORT_SYMBOL(skb_kill_datagram); |
393 | ||
a8f820aa HX |
394 | /** |
395 | * skb_copy_datagram_iter - Copy a datagram to an iovec iterator. | |
396 | * @skb: buffer to copy | |
397 | * @offset: offset in the buffer to start copying from | |
398 | * @to: iovec iterator to copy to | |
399 | * @len: amount of data to copy from buffer to iovec | |
400 | */ | |
401 | int skb_copy_datagram_iter(const struct sk_buff *skb, int offset, | |
402 | struct iov_iter *to, int len) | |
403 | { | |
404 | int start = skb_headlen(skb); | |
32786821 | 405 | int i, copy = start - offset, start_off = offset, n; |
a8f820aa HX |
406 | struct sk_buff *frag_iter; |
407 | ||
408 | trace_skb_copy_datagram_iovec(skb, len); | |
409 | ||
410 | /* Copy header. */ | |
411 | if (copy > 0) { | |
412 | if (copy > len) | |
413 | copy = len; | |
32786821 AV |
414 | n = copy_to_iter(skb->data + offset, copy, to); |
415 | offset += n; | |
416 | if (n != copy) | |
a8f820aa HX |
417 | goto short_copy; |
418 | if ((len -= copy) == 0) | |
419 | return 0; | |
a8f820aa HX |
420 | } |
421 | ||
422 | /* Copy paged appendix. Hmm... why does this look so complicated? */ | |
423 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
424 | int end; | |
425 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
426 | ||
427 | WARN_ON(start > offset + len); | |
428 | ||
429 | end = start + skb_frag_size(frag); | |
430 | if ((copy = end - offset) > 0) { | |
431 | if (copy > len) | |
432 | copy = len; | |
32786821 | 433 | n = copy_page_to_iter(skb_frag_page(frag), |
a8f820aa | 434 | frag->page_offset + offset - |
32786821 AV |
435 | start, copy, to); |
436 | offset += n; | |
437 | if (n != copy) | |
a8f820aa HX |
438 | goto short_copy; |
439 | if (!(len -= copy)) | |
440 | return 0; | |
a8f820aa HX |
441 | } |
442 | start = end; | |
443 | } | |
444 | ||
445 | skb_walk_frags(skb, frag_iter) { | |
446 | int end; | |
447 | ||
448 | WARN_ON(start > offset + len); | |
449 | ||
450 | end = start + frag_iter->len; | |
451 | if ((copy = end - offset) > 0) { | |
452 | if (copy > len) | |
453 | copy = len; | |
454 | if (skb_copy_datagram_iter(frag_iter, offset - start, | |
455 | to, copy)) | |
456 | goto fault; | |
457 | if ((len -= copy) == 0) | |
458 | return 0; | |
459 | offset += copy; | |
460 | } | |
461 | start = end; | |
462 | } | |
463 | if (!len) | |
464 | return 0; | |
465 | ||
466 | /* This is not really a user copy fault, but rather someone | |
467 | * gave us a bogus length on the skb. We should probably | |
468 | * print a warning here as it may indicate a kernel bug. | |
469 | */ | |
470 | ||
471 | fault: | |
32786821 | 472 | iov_iter_revert(to, offset - start_off); |
a8f820aa HX |
473 | return -EFAULT; |
474 | ||
475 | short_copy: | |
476 | if (iov_iter_count(to)) | |
477 | goto fault; | |
478 | ||
479 | return 0; | |
480 | } | |
481 | EXPORT_SYMBOL(skb_copy_datagram_iter); | |
482 | ||
db543c1f | 483 | /** |
8feb2fb2 | 484 | * skb_copy_datagram_from_iter - Copy a datagram from an iov_iter. |
db543c1f RR |
485 | * @skb: buffer to copy |
486 | * @offset: offset in the buffer to start copying to | |
8feb2fb2 | 487 | * @from: the copy source |
db543c1f RR |
488 | * @len: amount of data to copy to buffer from iovec |
489 | * | |
490 | * Returns 0 or -EFAULT. | |
db543c1f | 491 | */ |
3a654f97 AV |
492 | int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, |
493 | struct iov_iter *from, | |
494 | int len) | |
495 | { | |
496 | int start = skb_headlen(skb); | |
497 | int i, copy = start - offset; | |
498 | struct sk_buff *frag_iter; | |
499 | ||
500 | /* Copy header. */ | |
501 | if (copy > 0) { | |
502 | if (copy > len) | |
503 | copy = len; | |
504 | if (copy_from_iter(skb->data + offset, copy, from) != copy) | |
505 | goto fault; | |
506 | if ((len -= copy) == 0) | |
507 | return 0; | |
508 | offset += copy; | |
509 | } | |
510 | ||
511 | /* Copy paged appendix. Hmm... why does this look so complicated? */ | |
512 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
513 | int end; | |
514 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
515 | ||
516 | WARN_ON(start > offset + len); | |
517 | ||
518 | end = start + skb_frag_size(frag); | |
519 | if ((copy = end - offset) > 0) { | |
520 | size_t copied; | |
521 | ||
522 | if (copy > len) | |
523 | copy = len; | |
524 | copied = copy_page_from_iter(skb_frag_page(frag), | |
525 | frag->page_offset + offset - start, | |
526 | copy, from); | |
527 | if (copied != copy) | |
528 | goto fault; | |
529 | ||
530 | if (!(len -= copy)) | |
531 | return 0; | |
532 | offset += copy; | |
533 | } | |
534 | start = end; | |
535 | } | |
536 | ||
537 | skb_walk_frags(skb, frag_iter) { | |
538 | int end; | |
539 | ||
540 | WARN_ON(start > offset + len); | |
541 | ||
542 | end = start + frag_iter->len; | |
543 | if ((copy = end - offset) > 0) { | |
544 | if (copy > len) | |
545 | copy = len; | |
546 | if (skb_copy_datagram_from_iter(frag_iter, | |
547 | offset - start, | |
548 | from, copy)) | |
549 | goto fault; | |
550 | if ((len -= copy) == 0) | |
551 | return 0; | |
552 | offset += copy; | |
553 | } | |
554 | start = end; | |
555 | } | |
556 | if (!len) | |
557 | return 0; | |
558 | ||
559 | fault: | |
560 | return -EFAULT; | |
561 | } | |
562 | EXPORT_SYMBOL(skb_copy_datagram_from_iter); | |
563 | ||
c3bdeb5c | 564 | /** |
195e952d | 565 | * zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter |
c3bdeb5c | 566 | * @skb: buffer to copy |
195e952d | 567 | * @from: the source to copy from |
c3bdeb5c JW |
568 | * |
569 | * The function will first copy up to headlen, and then pin the userspace | |
570 | * pages and build frags through them. | |
571 | * | |
572 | * Returns 0, -EFAULT or -EMSGSIZE. | |
c3bdeb5c | 573 | */ |
3a654f97 AV |
574 | int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from) |
575 | { | |
576 | int len = iov_iter_count(from); | |
577 | int copy = min_t(int, skb_headlen(skb), len); | |
578 | int frag = 0; | |
579 | ||
580 | /* copy up to skb headlen */ | |
581 | if (skb_copy_datagram_from_iter(skb, 0, from, copy)) | |
582 | return -EFAULT; | |
583 | ||
584 | while (iov_iter_count(from)) { | |
585 | struct page *pages[MAX_SKB_FRAGS]; | |
586 | size_t start; | |
587 | ssize_t copied; | |
588 | unsigned long truesize; | |
589 | int n = 0; | |
590 | ||
591 | if (frag == MAX_SKB_FRAGS) | |
592 | return -EMSGSIZE; | |
593 | ||
594 | copied = iov_iter_get_pages(from, pages, ~0U, | |
595 | MAX_SKB_FRAGS - frag, &start); | |
596 | if (copied < 0) | |
597 | return -EFAULT; | |
598 | ||
599 | iov_iter_advance(from, copied); | |
600 | ||
601 | truesize = PAGE_ALIGN(copied + start); | |
602 | skb->data_len += copied; | |
603 | skb->len += copied; | |
604 | skb->truesize += truesize; | |
605 | atomic_add(truesize, &skb->sk->sk_wmem_alloc); | |
606 | while (copied) { | |
607 | int size = min_t(int, copied, PAGE_SIZE - start); | |
608 | skb_fill_page_desc(skb, frag++, pages[n], start, size); | |
609 | start = 0; | |
610 | copied -= size; | |
611 | n++; | |
612 | } | |
613 | } | |
614 | return 0; | |
615 | } | |
616 | EXPORT_SYMBOL(zerocopy_sg_from_iter); | |
617 | ||
1da177e4 | 618 | static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, |
e5a4b0bb | 619 | struct iov_iter *to, int len, |
5084205f | 620 | __wsum *csump) |
1da177e4 | 621 | { |
1a028e50 | 622 | int start = skb_headlen(skb); |
32786821 | 623 | int i, copy = start - offset, start_off = offset; |
5b1a002a DM |
624 | struct sk_buff *frag_iter; |
625 | int pos = 0; | |
e5a4b0bb | 626 | int n; |
1da177e4 LT |
627 | |
628 | /* Copy header. */ | |
629 | if (copy > 0) { | |
1da177e4 LT |
630 | if (copy > len) |
631 | copy = len; | |
e5a4b0bb | 632 | n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to); |
32786821 | 633 | offset += n; |
e5a4b0bb | 634 | if (n != copy) |
1da177e4 LT |
635 | goto fault; |
636 | if ((len -= copy) == 0) | |
637 | return 0; | |
1da177e4 LT |
638 | pos = copy; |
639 | } | |
640 | ||
641 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1a028e50 | 642 | int end; |
9e903e08 | 643 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1da177e4 | 644 | |
547b792c | 645 | WARN_ON(start > offset + len); |
1a028e50 | 646 | |
9e903e08 | 647 | end = start + skb_frag_size(frag); |
1da177e4 | 648 | if ((copy = end - offset) > 0) { |
e5a4b0bb | 649 | __wsum csum2 = 0; |
ea2ab693 | 650 | struct page *page = skb_frag_page(frag); |
e5a4b0bb | 651 | u8 *vaddr = kmap(page); |
1da177e4 LT |
652 | |
653 | if (copy > len) | |
654 | copy = len; | |
e5a4b0bb AV |
655 | n = csum_and_copy_to_iter(vaddr + frag->page_offset + |
656 | offset - start, copy, | |
657 | &csum2, to); | |
1da177e4 | 658 | kunmap(page); |
32786821 | 659 | offset += n; |
e5a4b0bb | 660 | if (n != copy) |
1da177e4 LT |
661 | goto fault; |
662 | *csump = csum_block_add(*csump, csum2, pos); | |
663 | if (!(len -= copy)) | |
664 | return 0; | |
1da177e4 LT |
665 | pos += copy; |
666 | } | |
1a028e50 | 667 | start = end; |
1da177e4 LT |
668 | } |
669 | ||
5b1a002a DM |
670 | skb_walk_frags(skb, frag_iter) { |
671 | int end; | |
672 | ||
673 | WARN_ON(start > offset + len); | |
674 | ||
675 | end = start + frag_iter->len; | |
676 | if ((copy = end - offset) > 0) { | |
677 | __wsum csum2 = 0; | |
678 | if (copy > len) | |
679 | copy = len; | |
680 | if (skb_copy_and_csum_datagram(frag_iter, | |
681 | offset - start, | |
682 | to, copy, | |
683 | &csum2)) | |
684 | goto fault; | |
685 | *csump = csum_block_add(*csump, csum2, pos); | |
686 | if ((len -= copy) == 0) | |
687 | return 0; | |
688 | offset += copy; | |
5b1a002a | 689 | pos += copy; |
1da177e4 | 690 | } |
5b1a002a | 691 | start = end; |
1da177e4 LT |
692 | } |
693 | if (!len) | |
694 | return 0; | |
695 | ||
696 | fault: | |
32786821 | 697 | iov_iter_revert(to, offset - start_off); |
1da177e4 LT |
698 | return -EFAULT; |
699 | } | |
700 | ||
759e5d00 | 701 | __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) |
fb286bb2 | 702 | { |
d3bc23e7 | 703 | __sum16 sum; |
fb286bb2 | 704 | |
759e5d00 | 705 | sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); |
46fb51eb TH |
706 | if (likely(!sum)) { |
707 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && | |
708 | !skb->csum_complete_sw) | |
709 | netdev_rx_csum_fault(skb->dev); | |
710 | } | |
89c22d8c HX |
711 | if (!skb_shared(skb)) |
712 | skb->csum_valid = !sum; | |
fb286bb2 HX |
713 | return sum; |
714 | } | |
759e5d00 HX |
715 | EXPORT_SYMBOL(__skb_checksum_complete_head); |
716 | ||
717 | __sum16 __skb_checksum_complete(struct sk_buff *skb) | |
718 | { | |
46fb51eb TH |
719 | __wsum csum; |
720 | __sum16 sum; | |
721 | ||
722 | csum = skb_checksum(skb, 0, skb->len, 0); | |
723 | ||
724 | /* skb->csum holds pseudo checksum */ | |
725 | sum = csum_fold(csum_add(skb->csum, csum)); | |
726 | if (likely(!sum)) { | |
727 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && | |
728 | !skb->csum_complete_sw) | |
729 | netdev_rx_csum_fault(skb->dev); | |
730 | } | |
731 | ||
89c22d8c HX |
732 | if (!skb_shared(skb)) { |
733 | /* Save full packet checksum */ | |
734 | skb->csum = csum; | |
735 | skb->ip_summed = CHECKSUM_COMPLETE; | |
736 | skb->csum_complete_sw = 1; | |
737 | skb->csum_valid = !sum; | |
738 | } | |
46fb51eb TH |
739 | |
740 | return sum; | |
759e5d00 | 741 | } |
fb286bb2 HX |
742 | EXPORT_SYMBOL(__skb_checksum_complete); |
743 | ||
1da177e4 | 744 | /** |
e5a4b0bb | 745 | * skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec. |
4dc3b16b PP |
746 | * @skb: skbuff |
747 | * @hlen: hardware length | |
e5a4b0bb | 748 | * @msg: destination |
4ec93edb | 749 | * |
1da177e4 LT |
750 | * Caller _must_ check that skb will fit to this iovec. |
751 | * | |
752 | * Returns: 0 - success. | |
753 | * -EINVAL - checksum failure. | |
e5a4b0bb | 754 | * -EFAULT - fault during copy. |
1da177e4 | 755 | */ |
e5a4b0bb AV |
756 | int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, |
757 | int hlen, struct msghdr *msg) | |
1da177e4 | 758 | { |
d3bc23e7 | 759 | __wsum csum; |
1da177e4 LT |
760 | int chunk = skb->len - hlen; |
761 | ||
ef8aef55 HX |
762 | if (!chunk) |
763 | return 0; | |
764 | ||
01e97e65 | 765 | if (msg_data_left(msg) < chunk) { |
fb286bb2 | 766 | if (__skb_checksum_complete(skb)) |
a6a59932 | 767 | return -EINVAL; |
e5a4b0bb | 768 | if (skb_copy_datagram_msg(skb, hlen, msg, chunk)) |
1da177e4 LT |
769 | goto fault; |
770 | } else { | |
771 | csum = csum_partial(skb->data, hlen, skb->csum); | |
e5a4b0bb | 772 | if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter, |
1da177e4 LT |
773 | chunk, &csum)) |
774 | goto fault; | |
a6a59932 DT |
775 | |
776 | if (csum_fold(csum)) { | |
777 | iov_iter_revert(&msg->msg_iter, chunk); | |
778 | return -EINVAL; | |
779 | } | |
780 | ||
84fa7933 | 781 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
fb286bb2 | 782 | netdev_rx_csum_fault(skb->dev); |
1da177e4 LT |
783 | } |
784 | return 0; | |
1da177e4 LT |
785 | fault: |
786 | return -EFAULT; | |
787 | } | |
e5a4b0bb | 788 | EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg); |
1da177e4 LT |
789 | |
790 | /** | |
791 | * datagram_poll - generic datagram poll | |
4dc3b16b PP |
792 | * @file: file struct |
793 | * @sock: socket | |
794 | * @wait: poll table | |
1da177e4 LT |
795 | * |
796 | * Datagram poll: Again totally generic. This also handles | |
797 | * sequenced packet sockets providing the socket receive queue | |
798 | * is only ever holding data ready to receive. | |
799 | * | |
800 | * Note: when you _don't_ use this routine for this protocol, | |
801 | * and you use a different write policy from sock_writeable() | |
802 | * then please supply your own write_space callback. | |
803 | */ | |
804 | unsigned int datagram_poll(struct file *file, struct socket *sock, | |
805 | poll_table *wait) | |
806 | { | |
807 | struct sock *sk = sock->sk; | |
808 | unsigned int mask; | |
809 | ||
aa395145 | 810 | sock_poll_wait(file, sk_sleep(sk), wait); |
1da177e4 LT |
811 | mask = 0; |
812 | ||
813 | /* exceptional events? */ | |
814 | if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) | |
7d4c04fc | 815 | mask |= POLLERR | |
8facd5fb | 816 | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); |
7d4c04fc | 817 | |
f348d70a | 818 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
db40980f | 819 | mask |= POLLRDHUP | POLLIN | POLLRDNORM; |
1da177e4 LT |
820 | if (sk->sk_shutdown == SHUTDOWN_MASK) |
821 | mask |= POLLHUP; | |
822 | ||
823 | /* readable? */ | |
db40980f | 824 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
1da177e4 LT |
825 | mask |= POLLIN | POLLRDNORM; |
826 | ||
827 | /* Connection-based need to check for termination and startup */ | |
828 | if (connection_based(sk)) { | |
829 | if (sk->sk_state == TCP_CLOSE) | |
830 | mask |= POLLHUP; | |
831 | /* connection hasn't started yet? */ | |
832 | if (sk->sk_state == TCP_SYN_SENT) | |
833 | return mask; | |
834 | } | |
835 | ||
836 | /* writable? */ | |
837 | if (sock_writeable(sk)) | |
838 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | |
839 | else | |
9cd3e072 | 840 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
1da177e4 LT |
841 | |
842 | return mask; | |
843 | } | |
1da177e4 | 844 | EXPORT_SYMBOL(datagram_poll); |