]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * SUCS NET3: | |
3 | * | |
4 | * Generic datagram handling routines. These are generic for all | |
5 | * protocols. Possibly a generic IP version on top of these would | |
6 | * make sense. Not tonight however 8-). | |
7 | * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and | |
8 | * NetROM layer all have identical poll code and mostly | |
9 | * identical recvmsg() code. So we share it here. The poll was | |
10 | * shared before but buried in udp.c so I moved it. | |
11 | * | |
113aa838 | 12 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old |
1da177e4 LT |
13 | * udp.c code) |
14 | * | |
15 | * Fixes: | |
16 | * Alan Cox : NULL return from skb_peek_copy() | |
17 | * understood | |
18 | * Alan Cox : Rewrote skb_read_datagram to avoid the | |
19 | * skb_peek_copy stuff. | |
20 | * Alan Cox : Added support for SOCK_SEQPACKET. | |
21 | * IPX can no longer use the SO_TYPE hack | |
22 | * but AX.25 now works right, and SPX is | |
23 | * feasible. | |
24 | * Alan Cox : Fixed write poll of non IP protocol | |
25 | * crash. | |
26 | * Florian La Roche: Changed for my new skbuff handling. | |
27 | * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET. | |
28 | * Linus Torvalds : BSD semantic fixes. | |
29 | * Alan Cox : Datagram iovec handling | |
30 | * Darryl Miles : Fixed non-blocking SOCK_STREAM. | |
31 | * Alan Cox : POSIXisms | |
32 | * Pete Wyckoff : Unconnected accept() fix. | |
33 | * | |
34 | */ | |
35 | ||
36 | #include <linux/module.h> | |
37 | #include <linux/types.h> | |
38 | #include <linux/kernel.h> | |
39 | #include <asm/uaccess.h> | |
40 | #include <asm/system.h> | |
41 | #include <linux/mm.h> | |
42 | #include <linux/interrupt.h> | |
43 | #include <linux/errno.h> | |
44 | #include <linux/sched.h> | |
45 | #include <linux/inet.h> | |
1da177e4 LT |
46 | #include <linux/netdevice.h> |
47 | #include <linux/rtnetlink.h> | |
48 | #include <linux/poll.h> | |
49 | #include <linux/highmem.h> | |
3305b80c | 50 | #include <linux/spinlock.h> |
5a0e3ad6 | 51 | #include <linux/slab.h> |
1da177e4 LT |
52 | |
53 | #include <net/protocol.h> | |
54 | #include <linux/skbuff.h> | |
1da177e4 | 55 | |
c752f073 ACM |
56 | #include <net/checksum.h> |
57 | #include <net/sock.h> | |
58 | #include <net/tcp_states.h> | |
e9b3cc1b | 59 | #include <trace/events/skb.h> |
1da177e4 LT |
60 | |
61 | /* | |
62 | * Is a socket 'connection oriented' ? | |
63 | */ | |
64 | static inline int connection_based(struct sock *sk) | |
65 | { | |
66 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; | |
67 | } | |
68 | ||
bf368e4e ED |
69 | static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync, |
70 | void *key) | |
71 | { | |
72 | unsigned long bits = (unsigned long)key; | |
73 | ||
74 | /* | |
75 | * Avoid a wakeup if event not interesting for us | |
76 | */ | |
77 | if (bits && !(bits & (POLLIN | POLLERR))) | |
78 | return 0; | |
79 | return autoremove_wake_function(wait, mode, sync, key); | |
80 | } | |
1da177e4 LT |
81 | /* |
82 | * Wait for a packet.. | |
83 | */ | |
84 | static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) | |
85 | { | |
86 | int error; | |
bf368e4e | 87 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); |
1da177e4 | 88 | |
aa395145 | 89 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1da177e4 LT |
90 | |
91 | /* Socket errors? */ | |
92 | error = sock_error(sk); | |
93 | if (error) | |
94 | goto out_err; | |
95 | ||
96 | if (!skb_queue_empty(&sk->sk_receive_queue)) | |
97 | goto out; | |
98 | ||
99 | /* Socket shut down? */ | |
100 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
101 | goto out_noerr; | |
102 | ||
103 | /* Sequenced packets can come disconnected. | |
104 | * If so we report the problem | |
105 | */ | |
106 | error = -ENOTCONN; | |
107 | if (connection_based(sk) && | |
108 | !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN)) | |
109 | goto out_err; | |
110 | ||
111 | /* handle signals */ | |
112 | if (signal_pending(current)) | |
113 | goto interrupted; | |
114 | ||
115 | error = 0; | |
116 | *timeo_p = schedule_timeout(*timeo_p); | |
117 | out: | |
aa395145 | 118 | finish_wait(sk_sleep(sk), &wait); |
1da177e4 LT |
119 | return error; |
120 | interrupted: | |
121 | error = sock_intr_errno(*timeo_p); | |
122 | out_err: | |
123 | *err = error; | |
124 | goto out; | |
125 | out_noerr: | |
126 | *err = 0; | |
127 | error = 1; | |
128 | goto out; | |
129 | } | |
130 | ||
131 | /** | |
a59322be | 132 | * __skb_recv_datagram - Receive a datagram skbuff |
4dc3b16b PP |
133 | * @sk: socket |
134 | * @flags: MSG_ flags | |
a59322be | 135 | * @peeked: returns non-zero if this packet has been seen before |
4dc3b16b | 136 | * @err: error code returned |
1da177e4 LT |
137 | * |
138 | * Get a datagram skbuff, understands the peeking, nonblocking wakeups | |
139 | * and possible races. This replaces identical code in packet, raw and | |
140 | * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes | |
141 | * the long standing peek and read race for datagram sockets. If you | |
142 | * alter this routine remember it must be re-entrant. | |
143 | * | |
144 | * This function will lock the socket if a skb is returned, so the caller | |
145 | * needs to unlock the socket in that case (usually by calling | |
146 | * skb_free_datagram) | |
147 | * | |
148 | * * It does not lock socket since today. This function is | |
149 | * * free of race conditions. This measure should/can improve | |
150 | * * significantly datagram socket latencies at high loads, | |
151 | * * when data copying to user space takes lots of time. | |
152 | * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet | |
153 | * * 8) Great win.) | |
154 | * * --ANK (980729) | |
155 | * | |
156 | * The order of the tests when we find no data waiting are specified | |
157 | * quite explicitly by POSIX 1003.1g, don't change them without having | |
158 | * the standard around please. | |
159 | */ | |
a59322be HX |
160 | struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, |
161 | int *peeked, int *err) | |
1da177e4 LT |
162 | { |
163 | struct sk_buff *skb; | |
164 | long timeo; | |
165 | /* | |
166 | * Caller is allowed not to check sk->sk_err before skb_recv_datagram() | |
167 | */ | |
168 | int error = sock_error(sk); | |
169 | ||
170 | if (error) | |
171 | goto no_packet; | |
172 | ||
a59322be | 173 | timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
1da177e4 LT |
174 | |
175 | do { | |
176 | /* Again only user level code calls this function, so nothing | |
177 | * interrupt level will suddenly eat the receive_queue. | |
178 | * | |
179 | * Look at current nfs client by the way... | |
180 | * However, this function was corrent in any case. 8) | |
181 | */ | |
a59322be HX |
182 | unsigned long cpu_flags; |
183 | ||
184 | spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); | |
185 | skb = skb_peek(&sk->sk_receive_queue); | |
186 | if (skb) { | |
187 | *peeked = skb->peeked; | |
188 | if (flags & MSG_PEEK) { | |
189 | skb->peeked = 1; | |
1da177e4 | 190 | atomic_inc(&skb->users); |
a59322be HX |
191 | } else |
192 | __skb_unlink(skb, &sk->sk_receive_queue); | |
193 | } | |
194 | spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); | |
1da177e4 LT |
195 | |
196 | if (skb) | |
197 | return skb; | |
198 | ||
199 | /* User doesn't want to wait */ | |
200 | error = -EAGAIN; | |
201 | if (!timeo) | |
202 | goto no_packet; | |
203 | ||
204 | } while (!wait_for_packet(sk, err, &timeo)); | |
205 | ||
206 | return NULL; | |
207 | ||
208 | no_packet: | |
209 | *err = error; | |
210 | return NULL; | |
211 | } | |
a59322be HX |
212 | EXPORT_SYMBOL(__skb_recv_datagram); |
213 | ||
214 | struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, | |
215 | int noblock, int *err) | |
216 | { | |
217 | int peeked; | |
218 | ||
219 | return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), | |
220 | &peeked, err); | |
221 | } | |
1da177e4 LT |
222 | |
223 | void skb_free_datagram(struct sock *sk, struct sk_buff *skb) | |
224 | { | |
ead2ceb0 | 225 | consume_skb(skb); |
270acefa | 226 | sk_mem_reclaim_partial(sk); |
1da177e4 | 227 | } |
9d410c79 ED |
228 | EXPORT_SYMBOL(skb_free_datagram); |
229 | ||
230 | void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) | |
231 | { | |
93bb64ea ED |
232 | if (likely(atomic_read(&skb->users) == 1)) |
233 | smp_rmb(); | |
234 | else if (likely(!atomic_dec_and_test(&skb->users))) | |
235 | return; | |
236 | ||
4b0b72f7 ED |
237 | lock_sock_bh(sk); |
238 | skb_orphan(skb); | |
239 | sk_mem_reclaim_partial(sk); | |
240 | unlock_sock_bh(sk); | |
241 | ||
93bb64ea ED |
242 | /* skb is now orphaned, can be freed outside of locked section */ |
243 | __kfree_skb(skb); | |
9d410c79 ED |
244 | } |
245 | EXPORT_SYMBOL(skb_free_datagram_locked); | |
1da177e4 | 246 | |
3305b80c HX |
247 | /** |
248 | * skb_kill_datagram - Free a datagram skbuff forcibly | |
249 | * @sk: socket | |
250 | * @skb: datagram skbuff | |
251 | * @flags: MSG_ flags | |
252 | * | |
253 | * This function frees a datagram skbuff that was received by | |
254 | * skb_recv_datagram. The flags argument must match the one | |
255 | * used for skb_recv_datagram. | |
256 | * | |
257 | * If the MSG_PEEK flag is set, and the packet is still on the | |
258 | * receive queue of the socket, it will be taken off the queue | |
259 | * before it is freed. | |
260 | * | |
261 | * This function currently only disables BH when acquiring the | |
262 | * sk_receive_queue lock. Therefore it must not be used in a | |
263 | * context where that lock is acquired in an IRQ context. | |
27ab2568 HX |
264 | * |
265 | * It returns 0 if the packet was removed by us. | |
3305b80c HX |
266 | */ |
267 | ||
27ab2568 | 268 | int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) |
3305b80c | 269 | { |
27ab2568 HX |
270 | int err = 0; |
271 | ||
3305b80c | 272 | if (flags & MSG_PEEK) { |
27ab2568 | 273 | err = -ENOENT; |
3305b80c HX |
274 | spin_lock_bh(&sk->sk_receive_queue.lock); |
275 | if (skb == skb_peek(&sk->sk_receive_queue)) { | |
276 | __skb_unlink(skb, &sk->sk_receive_queue); | |
277 | atomic_dec(&skb->users); | |
27ab2568 | 278 | err = 0; |
3305b80c HX |
279 | } |
280 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
281 | } | |
282 | ||
61de71c6 | 283 | kfree_skb(skb); |
8edf19c2 | 284 | atomic_inc(&sk->sk_drops); |
61de71c6 JD |
285 | sk_mem_reclaim_partial(sk); |
286 | ||
27ab2568 | 287 | return err; |
3305b80c HX |
288 | } |
289 | ||
290 | EXPORT_SYMBOL(skb_kill_datagram); | |
291 | ||
1da177e4 LT |
292 | /** |
293 | * skb_copy_datagram_iovec - Copy a datagram to an iovec. | |
4dc3b16b PP |
294 | * @skb: buffer to copy |
295 | * @offset: offset in the buffer to start copying from | |
67be2dd1 | 296 | * @to: io vector to copy to |
4dc3b16b | 297 | * @len: amount of data to copy from buffer to iovec |
1da177e4 LT |
298 | * |
299 | * Note: the iovec is modified during the copy. | |
300 | */ | |
301 | int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, | |
302 | struct iovec *to, int len) | |
303 | { | |
1a028e50 DM |
304 | int start = skb_headlen(skb); |
305 | int i, copy = start - offset; | |
5b1a002a | 306 | struct sk_buff *frag_iter; |
c75d721c | 307 | |
e9b3cc1b NH |
308 | trace_skb_copy_datagram_iovec(skb, len); |
309 | ||
b4d9eda0 DM |
310 | /* Copy header. */ |
311 | if (copy > 0) { | |
312 | if (copy > len) | |
313 | copy = len; | |
314 | if (memcpy_toiovec(to, skb->data + offset, copy)) | |
315 | goto fault; | |
316 | if ((len -= copy) == 0) | |
317 | return 0; | |
318 | offset += copy; | |
319 | } | |
c75d721c | 320 | |
b4d9eda0 DM |
321 | /* Copy paged appendix. Hmm... why does this look so complicated? */ |
322 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1a028e50 | 323 | int end; |
1da177e4 | 324 | |
547b792c | 325 | WARN_ON(start > offset + len); |
1a028e50 DM |
326 | |
327 | end = start + skb_shinfo(skb)->frags[i].size; | |
b4d9eda0 DM |
328 | if ((copy = end - offset) > 0) { |
329 | int err; | |
330 | u8 *vaddr; | |
331 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
332 | struct page *page = frag->page; | |
1da177e4 LT |
333 | |
334 | if (copy > len) | |
335 | copy = len; | |
b4d9eda0 | 336 | vaddr = kmap(page); |
1a028e50 DM |
337 | err = memcpy_toiovec(to, vaddr + frag->page_offset + |
338 | offset - start, copy); | |
b4d9eda0 | 339 | kunmap(page); |
1da177e4 LT |
340 | if (err) |
341 | goto fault; | |
342 | if (!(len -= copy)) | |
343 | return 0; | |
344 | offset += copy; | |
345 | } | |
1a028e50 | 346 | start = end; |
1da177e4 | 347 | } |
b4d9eda0 | 348 | |
5b1a002a DM |
349 | skb_walk_frags(skb, frag_iter) { |
350 | int end; | |
351 | ||
352 | WARN_ON(start > offset + len); | |
353 | ||
354 | end = start + frag_iter->len; | |
355 | if ((copy = end - offset) > 0) { | |
356 | if (copy > len) | |
357 | copy = len; | |
358 | if (skb_copy_datagram_iovec(frag_iter, | |
359 | offset - start, | |
360 | to, copy)) | |
361 | goto fault; | |
362 | if ((len -= copy) == 0) | |
363 | return 0; | |
364 | offset += copy; | |
b4d9eda0 | 365 | } |
5b1a002a | 366 | start = end; |
1da177e4 | 367 | } |
b4d9eda0 DM |
368 | if (!len) |
369 | return 0; | |
370 | ||
1da177e4 LT |
371 | fault: |
372 | return -EFAULT; | |
373 | } | |
374 | ||
0a1ec07a MT |
375 | /** |
376 | * skb_copy_datagram_const_iovec - Copy a datagram to an iovec. | |
377 | * @skb: buffer to copy | |
378 | * @offset: offset in the buffer to start copying from | |
379 | * @to: io vector to copy to | |
380 | * @to_offset: offset in the io vector to start copying to | |
381 | * @len: amount of data to copy from buffer to iovec | |
382 | * | |
383 | * Returns 0 or -EFAULT. | |
384 | * Note: the iovec is not modified during the copy. | |
385 | */ | |
386 | int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset, | |
387 | const struct iovec *to, int to_offset, | |
388 | int len) | |
389 | { | |
390 | int start = skb_headlen(skb); | |
391 | int i, copy = start - offset; | |
5b1a002a | 392 | struct sk_buff *frag_iter; |
0a1ec07a MT |
393 | |
394 | /* Copy header. */ | |
395 | if (copy > 0) { | |
396 | if (copy > len) | |
397 | copy = len; | |
398 | if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy)) | |
399 | goto fault; | |
400 | if ((len -= copy) == 0) | |
401 | return 0; | |
402 | offset += copy; | |
403 | to_offset += copy; | |
404 | } | |
405 | ||
406 | /* Copy paged appendix. Hmm... why does this look so complicated? */ | |
407 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
408 | int end; | |
409 | ||
410 | WARN_ON(start > offset + len); | |
411 | ||
412 | end = start + skb_shinfo(skb)->frags[i].size; | |
413 | if ((copy = end - offset) > 0) { | |
414 | int err; | |
415 | u8 *vaddr; | |
416 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
417 | struct page *page = frag->page; | |
418 | ||
419 | if (copy > len) | |
420 | copy = len; | |
421 | vaddr = kmap(page); | |
422 | err = memcpy_toiovecend(to, vaddr + frag->page_offset + | |
423 | offset - start, to_offset, copy); | |
424 | kunmap(page); | |
425 | if (err) | |
426 | goto fault; | |
427 | if (!(len -= copy)) | |
428 | return 0; | |
429 | offset += copy; | |
430 | to_offset += copy; | |
431 | } | |
432 | start = end; | |
433 | } | |
434 | ||
5b1a002a DM |
435 | skb_walk_frags(skb, frag_iter) { |
436 | int end; | |
437 | ||
438 | WARN_ON(start > offset + len); | |
439 | ||
440 | end = start + frag_iter->len; | |
441 | if ((copy = end - offset) > 0) { | |
442 | if (copy > len) | |
443 | copy = len; | |
444 | if (skb_copy_datagram_const_iovec(frag_iter, | |
445 | offset - start, | |
446 | to, to_offset, | |
447 | copy)) | |
448 | goto fault; | |
449 | if ((len -= copy) == 0) | |
450 | return 0; | |
451 | offset += copy; | |
452 | to_offset += copy; | |
0a1ec07a | 453 | } |
5b1a002a | 454 | start = end; |
0a1ec07a MT |
455 | } |
456 | if (!len) | |
457 | return 0; | |
458 | ||
459 | fault: | |
460 | return -EFAULT; | |
461 | } | |
462 | EXPORT_SYMBOL(skb_copy_datagram_const_iovec); | |
463 | ||
db543c1f RR |
464 | /** |
465 | * skb_copy_datagram_from_iovec - Copy a datagram from an iovec. | |
466 | * @skb: buffer to copy | |
467 | * @offset: offset in the buffer to start copying to | |
468 | * @from: io vector to copy to | |
6f26c9a7 | 469 | * @from_offset: offset in the io vector to start copying from |
db543c1f RR |
470 | * @len: amount of data to copy to buffer from iovec |
471 | * | |
472 | * Returns 0 or -EFAULT. | |
6f26c9a7 | 473 | * Note: the iovec is not modified during the copy. |
db543c1f RR |
474 | */ |
475 | int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, | |
6f26c9a7 MT |
476 | const struct iovec *from, int from_offset, |
477 | int len) | |
db543c1f RR |
478 | { |
479 | int start = skb_headlen(skb); | |
480 | int i, copy = start - offset; | |
5b1a002a | 481 | struct sk_buff *frag_iter; |
db543c1f RR |
482 | |
483 | /* Copy header. */ | |
484 | if (copy > 0) { | |
485 | if (copy > len) | |
486 | copy = len; | |
d2d27bfd SS |
487 | if (memcpy_fromiovecend(skb->data + offset, from, from_offset, |
488 | copy)) | |
db543c1f RR |
489 | goto fault; |
490 | if ((len -= copy) == 0) | |
491 | return 0; | |
492 | offset += copy; | |
6f26c9a7 | 493 | from_offset += copy; |
db543c1f RR |
494 | } |
495 | ||
496 | /* Copy paged appendix. Hmm... why does this look so complicated? */ | |
497 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
498 | int end; | |
499 | ||
500 | WARN_ON(start > offset + len); | |
501 | ||
502 | end = start + skb_shinfo(skb)->frags[i].size; | |
503 | if ((copy = end - offset) > 0) { | |
504 | int err; | |
505 | u8 *vaddr; | |
506 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
507 | struct page *page = frag->page; | |
508 | ||
509 | if (copy > len) | |
510 | copy = len; | |
511 | vaddr = kmap(page); | |
6f26c9a7 MT |
512 | err = memcpy_fromiovecend(vaddr + frag->page_offset + |
513 | offset - start, | |
514 | from, from_offset, copy); | |
db543c1f RR |
515 | kunmap(page); |
516 | if (err) | |
517 | goto fault; | |
518 | ||
519 | if (!(len -= copy)) | |
520 | return 0; | |
521 | offset += copy; | |
6f26c9a7 | 522 | from_offset += copy; |
db543c1f RR |
523 | } |
524 | start = end; | |
525 | } | |
526 | ||
5b1a002a DM |
527 | skb_walk_frags(skb, frag_iter) { |
528 | int end; | |
529 | ||
530 | WARN_ON(start > offset + len); | |
531 | ||
532 | end = start + frag_iter->len; | |
533 | if ((copy = end - offset) > 0) { | |
534 | if (copy > len) | |
535 | copy = len; | |
536 | if (skb_copy_datagram_from_iovec(frag_iter, | |
537 | offset - start, | |
538 | from, | |
539 | from_offset, | |
540 | copy)) | |
541 | goto fault; | |
542 | if ((len -= copy) == 0) | |
543 | return 0; | |
544 | offset += copy; | |
545 | from_offset += copy; | |
db543c1f | 546 | } |
5b1a002a | 547 | start = end; |
db543c1f RR |
548 | } |
549 | if (!len) | |
550 | return 0; | |
551 | ||
552 | fault: | |
553 | return -EFAULT; | |
554 | } | |
555 | EXPORT_SYMBOL(skb_copy_datagram_from_iovec); | |
556 | ||
1da177e4 LT |
557 | static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, |
558 | u8 __user *to, int len, | |
5084205f | 559 | __wsum *csump) |
1da177e4 | 560 | { |
1a028e50 | 561 | int start = skb_headlen(skb); |
1a028e50 | 562 | int i, copy = start - offset; |
5b1a002a DM |
563 | struct sk_buff *frag_iter; |
564 | int pos = 0; | |
1da177e4 LT |
565 | |
566 | /* Copy header. */ | |
567 | if (copy > 0) { | |
568 | int err = 0; | |
569 | if (copy > len) | |
570 | copy = len; | |
571 | *csump = csum_and_copy_to_user(skb->data + offset, to, copy, | |
572 | *csump, &err); | |
573 | if (err) | |
574 | goto fault; | |
575 | if ((len -= copy) == 0) | |
576 | return 0; | |
577 | offset += copy; | |
578 | to += copy; | |
579 | pos = copy; | |
580 | } | |
581 | ||
582 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1a028e50 | 583 | int end; |
1da177e4 | 584 | |
547b792c | 585 | WARN_ON(start > offset + len); |
1a028e50 DM |
586 | |
587 | end = start + skb_shinfo(skb)->frags[i].size; | |
1da177e4 | 588 | if ((copy = end - offset) > 0) { |
5084205f | 589 | __wsum csum2; |
1da177e4 LT |
590 | int err = 0; |
591 | u8 *vaddr; | |
592 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
593 | struct page *page = frag->page; | |
594 | ||
595 | if (copy > len) | |
596 | copy = len; | |
597 | vaddr = kmap(page); | |
598 | csum2 = csum_and_copy_to_user(vaddr + | |
1a028e50 DM |
599 | frag->page_offset + |
600 | offset - start, | |
1da177e4 LT |
601 | to, copy, 0, &err); |
602 | kunmap(page); | |
603 | if (err) | |
604 | goto fault; | |
605 | *csump = csum_block_add(*csump, csum2, pos); | |
606 | if (!(len -= copy)) | |
607 | return 0; | |
608 | offset += copy; | |
609 | to += copy; | |
610 | pos += copy; | |
611 | } | |
1a028e50 | 612 | start = end; |
1da177e4 LT |
613 | } |
614 | ||
5b1a002a DM |
615 | skb_walk_frags(skb, frag_iter) { |
616 | int end; | |
617 | ||
618 | WARN_ON(start > offset + len); | |
619 | ||
620 | end = start + frag_iter->len; | |
621 | if ((copy = end - offset) > 0) { | |
622 | __wsum csum2 = 0; | |
623 | if (copy > len) | |
624 | copy = len; | |
625 | if (skb_copy_and_csum_datagram(frag_iter, | |
626 | offset - start, | |
627 | to, copy, | |
628 | &csum2)) | |
629 | goto fault; | |
630 | *csump = csum_block_add(*csump, csum2, pos); | |
631 | if ((len -= copy) == 0) | |
632 | return 0; | |
633 | offset += copy; | |
634 | to += copy; | |
635 | pos += copy; | |
1da177e4 | 636 | } |
5b1a002a | 637 | start = end; |
1da177e4 LT |
638 | } |
639 | if (!len) | |
640 | return 0; | |
641 | ||
642 | fault: | |
643 | return -EFAULT; | |
644 | } | |
645 | ||
759e5d00 | 646 | __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) |
fb286bb2 | 647 | { |
d3bc23e7 | 648 | __sum16 sum; |
fb286bb2 | 649 | |
759e5d00 | 650 | sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); |
fb286bb2 | 651 | if (likely(!sum)) { |
84fa7933 | 652 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
fb286bb2 HX |
653 | netdev_rx_csum_fault(skb->dev); |
654 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
655 | } | |
656 | return sum; | |
657 | } | |
759e5d00 HX |
658 | EXPORT_SYMBOL(__skb_checksum_complete_head); |
659 | ||
660 | __sum16 __skb_checksum_complete(struct sk_buff *skb) | |
661 | { | |
662 | return __skb_checksum_complete_head(skb, skb->len); | |
663 | } | |
fb286bb2 HX |
664 | EXPORT_SYMBOL(__skb_checksum_complete); |
665 | ||
1da177e4 LT |
666 | /** |
667 | * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec. | |
4dc3b16b PP |
668 | * @skb: skbuff |
669 | * @hlen: hardware length | |
67be2dd1 | 670 | * @iov: io vector |
4ec93edb | 671 | * |
1da177e4 LT |
672 | * Caller _must_ check that skb will fit to this iovec. |
673 | * | |
674 | * Returns: 0 - success. | |
675 | * -EINVAL - checksum failure. | |
676 | * -EFAULT - fault during copy. Beware, in this case iovec | |
677 | * can be modified! | |
678 | */ | |
fb286bb2 | 679 | int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, |
1da177e4 LT |
680 | int hlen, struct iovec *iov) |
681 | { | |
d3bc23e7 | 682 | __wsum csum; |
1da177e4 LT |
683 | int chunk = skb->len - hlen; |
684 | ||
ef8aef55 HX |
685 | if (!chunk) |
686 | return 0; | |
687 | ||
1da177e4 LT |
688 | /* Skip filled elements. |
689 | * Pretty silly, look at memcpy_toiovec, though 8) | |
690 | */ | |
691 | while (!iov->iov_len) | |
692 | iov++; | |
693 | ||
694 | if (iov->iov_len < chunk) { | |
fb286bb2 | 695 | if (__skb_checksum_complete(skb)) |
1da177e4 LT |
696 | goto csum_error; |
697 | if (skb_copy_datagram_iovec(skb, hlen, iov, chunk)) | |
698 | goto fault; | |
699 | } else { | |
700 | csum = csum_partial(skb->data, hlen, skb->csum); | |
701 | if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, | |
702 | chunk, &csum)) | |
703 | goto fault; | |
d3bc23e7 | 704 | if (csum_fold(csum)) |
1da177e4 | 705 | goto csum_error; |
84fa7933 | 706 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
fb286bb2 | 707 | netdev_rx_csum_fault(skb->dev); |
1da177e4 LT |
708 | iov->iov_len -= chunk; |
709 | iov->iov_base += chunk; | |
710 | } | |
711 | return 0; | |
712 | csum_error: | |
713 | return -EINVAL; | |
714 | fault: | |
715 | return -EFAULT; | |
716 | } | |
717 | ||
718 | /** | |
719 | * datagram_poll - generic datagram poll | |
4dc3b16b PP |
720 | * @file: file struct |
721 | * @sock: socket | |
722 | * @wait: poll table | |
1da177e4 LT |
723 | * |
724 | * Datagram poll: Again totally generic. This also handles | |
725 | * sequenced packet sockets providing the socket receive queue | |
726 | * is only ever holding data ready to receive. | |
727 | * | |
728 | * Note: when you _don't_ use this routine for this protocol, | |
729 | * and you use a different write policy from sock_writeable() | |
730 | * then please supply your own write_space callback. | |
731 | */ | |
732 | unsigned int datagram_poll(struct file *file, struct socket *sock, | |
733 | poll_table *wait) | |
734 | { | |
735 | struct sock *sk = sock->sk; | |
736 | unsigned int mask; | |
737 | ||
aa395145 | 738 | sock_poll_wait(file, sk_sleep(sk), wait); |
1da177e4 LT |
739 | mask = 0; |
740 | ||
741 | /* exceptional events? */ | |
742 | if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) | |
743 | mask |= POLLERR; | |
f348d70a DL |
744 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
745 | mask |= POLLRDHUP; | |
1da177e4 LT |
746 | if (sk->sk_shutdown == SHUTDOWN_MASK) |
747 | mask |= POLLHUP; | |
748 | ||
749 | /* readable? */ | |
750 | if (!skb_queue_empty(&sk->sk_receive_queue) || | |
751 | (sk->sk_shutdown & RCV_SHUTDOWN)) | |
752 | mask |= POLLIN | POLLRDNORM; | |
753 | ||
754 | /* Connection-based need to check for termination and startup */ | |
755 | if (connection_based(sk)) { | |
756 | if (sk->sk_state == TCP_CLOSE) | |
757 | mask |= POLLHUP; | |
758 | /* connection hasn't started yet? */ | |
759 | if (sk->sk_state == TCP_SYN_SENT) | |
760 | return mask; | |
761 | } | |
762 | ||
763 | /* writable? */ | |
764 | if (sock_writeable(sk)) | |
765 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | |
766 | else | |
767 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | |
768 | ||
769 | return mask; | |
770 | } | |
771 | ||
772 | EXPORT_SYMBOL(datagram_poll); | |
773 | EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec); | |
774 | EXPORT_SYMBOL(skb_copy_datagram_iovec); | |
1da177e4 | 775 | EXPORT_SYMBOL(skb_recv_datagram); |