]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/core/datagram.c
datagram: Factor out sk queue referencing
[mirror_ubuntu-artful-kernel.git] / net / core / datagram.c
CommitLineData
1da177e4
LT
1/*
2 * SUCS NET3:
3 *
4 * Generic datagram handling routines. These are generic for all
5 * protocols. Possibly a generic IP version on top of these would
6 * make sense. Not tonight however 8-).
7 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8 * NetROM layer all have identical poll code and mostly
9 * identical recvmsg() code. So we share it here. The poll was
10 * shared before but buried in udp.c so I moved it.
11 *
113aa838 12 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
1da177e4
LT
13 * udp.c code)
14 *
15 * Fixes:
16 * Alan Cox : NULL return from skb_peek_copy()
17 * understood
18 * Alan Cox : Rewrote skb_read_datagram to avoid the
19 * skb_peek_copy stuff.
20 * Alan Cox : Added support for SOCK_SEQPACKET.
21 * IPX can no longer use the SO_TYPE hack
22 * but AX.25 now works right, and SPX is
23 * feasible.
24 * Alan Cox : Fixed write poll of non IP protocol
25 * crash.
26 * Florian La Roche: Changed for my new skbuff handling.
27 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
28 * Linus Torvalds : BSD semantic fixes.
29 * Alan Cox : Datagram iovec handling
30 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
31 * Alan Cox : POSIXisms
32 * Pete Wyckoff : Unconnected accept() fix.
33 *
34 */
35
36#include <linux/module.h>
37#include <linux/types.h>
38#include <linux/kernel.h>
39#include <asm/uaccess.h>
40#include <asm/system.h>
41#include <linux/mm.h>
42#include <linux/interrupt.h>
43#include <linux/errno.h>
44#include <linux/sched.h>
45#include <linux/inet.h>
1da177e4
LT
46#include <linux/netdevice.h>
47#include <linux/rtnetlink.h>
48#include <linux/poll.h>
49#include <linux/highmem.h>
3305b80c 50#include <linux/spinlock.h>
5a0e3ad6 51#include <linux/slab.h>
1da177e4
LT
52
53#include <net/protocol.h>
54#include <linux/skbuff.h>
1da177e4 55
c752f073
ACM
56#include <net/checksum.h>
57#include <net/sock.h>
58#include <net/tcp_states.h>
e9b3cc1b 59#include <trace/events/skb.h>
1da177e4
LT
60
61/*
62 * Is a socket 'connection oriented' ?
63 */
64static inline int connection_based(struct sock *sk)
65{
66 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
67}
68
bf368e4e
ED
69static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync,
70 void *key)
71{
72 unsigned long bits = (unsigned long)key;
73
74 /*
75 * Avoid a wakeup if event not interesting for us
76 */
77 if (bits && !(bits & (POLLIN | POLLERR)))
78 return 0;
79 return autoremove_wake_function(wait, mode, sync, key);
80}
1da177e4
LT
81/*
82 * Wait for a packet..
83 */
84static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
85{
86 int error;
bf368e4e 87 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
1da177e4 88
aa395145 89 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1da177e4
LT
90
91 /* Socket errors? */
92 error = sock_error(sk);
93 if (error)
94 goto out_err;
95
96 if (!skb_queue_empty(&sk->sk_receive_queue))
97 goto out;
98
99 /* Socket shut down? */
100 if (sk->sk_shutdown & RCV_SHUTDOWN)
101 goto out_noerr;
102
103 /* Sequenced packets can come disconnected.
104 * If so we report the problem
105 */
106 error = -ENOTCONN;
107 if (connection_based(sk) &&
108 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
109 goto out_err;
110
111 /* handle signals */
112 if (signal_pending(current))
113 goto interrupted;
114
115 error = 0;
116 *timeo_p = schedule_timeout(*timeo_p);
117out:
aa395145 118 finish_wait(sk_sleep(sk), &wait);
1da177e4
LT
119 return error;
120interrupted:
121 error = sock_intr_errno(*timeo_p);
122out_err:
123 *err = error;
124 goto out;
125out_noerr:
126 *err = 0;
127 error = 1;
128 goto out;
129}
130
131/**
a59322be 132 * __skb_recv_datagram - Receive a datagram skbuff
4dc3b16b
PP
133 * @sk: socket
134 * @flags: MSG_ flags
a59322be 135 * @peeked: returns non-zero if this packet has been seen before
4dc3b16b 136 * @err: error code returned
1da177e4
LT
137 *
138 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
139 * and possible races. This replaces identical code in packet, raw and
140 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
141 * the long standing peek and read race for datagram sockets. If you
142 * alter this routine remember it must be re-entrant.
143 *
144 * This function will lock the socket if a skb is returned, so the caller
145 * needs to unlock the socket in that case (usually by calling
146 * skb_free_datagram)
147 *
148 * * It does not lock socket since today. This function is
149 * * free of race conditions. This measure should/can improve
150 * * significantly datagram socket latencies at high loads,
151 * * when data copying to user space takes lots of time.
152 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
153 * * 8) Great win.)
154 * * --ANK (980729)
155 *
156 * The order of the tests when we find no data waiting are specified
157 * quite explicitly by POSIX 1003.1g, don't change them without having
158 * the standard around please.
159 */
a59322be
HX
160struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
161 int *peeked, int *err)
1da177e4
LT
162{
163 struct sk_buff *skb;
164 long timeo;
165 /*
166 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
167 */
168 int error = sock_error(sk);
169
170 if (error)
171 goto no_packet;
172
a59322be 173 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1da177e4
LT
174
175 do {
176 /* Again only user level code calls this function, so nothing
177 * interrupt level will suddenly eat the receive_queue.
178 *
179 * Look at current nfs client by the way...
8917a3c0 180 * However, this function was correct in any case. 8)
1da177e4 181 */
a59322be 182 unsigned long cpu_flags;
4934b032 183 struct sk_buff_head *queue = &sk->sk_receive_queue;
a59322be 184
4934b032
PE
185 spin_lock_irqsave(&queue->lock, cpu_flags);
186 skb = skb_peek(queue);
a59322be
HX
187 if (skb) {
188 *peeked = skb->peeked;
189 if (flags & MSG_PEEK) {
190 skb->peeked = 1;
1da177e4 191 atomic_inc(&skb->users);
a59322be 192 } else
4934b032 193 __skb_unlink(skb, queue);
a59322be 194 }
4934b032 195 spin_unlock_irqrestore(&queue->lock, cpu_flags);
1da177e4
LT
196
197 if (skb)
198 return skb;
199
200 /* User doesn't want to wait */
201 error = -EAGAIN;
202 if (!timeo)
203 goto no_packet;
204
205 } while (!wait_for_packet(sk, err, &timeo));
206
207 return NULL;
208
209no_packet:
210 *err = error;
211 return NULL;
212}
a59322be
HX
213EXPORT_SYMBOL(__skb_recv_datagram);
214
215struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
216 int noblock, int *err)
217{
218 int peeked;
219
220 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
221 &peeked, err);
222}
9e34a5b5 223EXPORT_SYMBOL(skb_recv_datagram);
1da177e4
LT
224
225void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
226{
ead2ceb0 227 consume_skb(skb);
270acefa 228 sk_mem_reclaim_partial(sk);
1da177e4 229}
9d410c79
ED
230EXPORT_SYMBOL(skb_free_datagram);
231
232void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
233{
8a74ad60
ED
234 bool slow;
235
93bb64ea
ED
236 if (likely(atomic_read(&skb->users) == 1))
237 smp_rmb();
238 else if (likely(!atomic_dec_and_test(&skb->users)))
239 return;
240
8a74ad60 241 slow = lock_sock_fast(sk);
4b0b72f7
ED
242 skb_orphan(skb);
243 sk_mem_reclaim_partial(sk);
8a74ad60 244 unlock_sock_fast(sk, slow);
4b0b72f7 245
93bb64ea 246 /* skb is now orphaned, can be freed outside of locked section */
07dc22e7 247 trace_kfree_skb(skb, skb_free_datagram_locked);
93bb64ea 248 __kfree_skb(skb);
9d410c79
ED
249}
250EXPORT_SYMBOL(skb_free_datagram_locked);
1da177e4 251
3305b80c
HX
252/**
253 * skb_kill_datagram - Free a datagram skbuff forcibly
254 * @sk: socket
255 * @skb: datagram skbuff
256 * @flags: MSG_ flags
257 *
258 * This function frees a datagram skbuff that was received by
259 * skb_recv_datagram. The flags argument must match the one
260 * used for skb_recv_datagram.
261 *
262 * If the MSG_PEEK flag is set, and the packet is still on the
263 * receive queue of the socket, it will be taken off the queue
264 * before it is freed.
265 *
266 * This function currently only disables BH when acquiring the
267 * sk_receive_queue lock. Therefore it must not be used in a
268 * context where that lock is acquired in an IRQ context.
27ab2568
HX
269 *
270 * It returns 0 if the packet was removed by us.
3305b80c
HX
271 */
272
27ab2568 273int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
3305b80c 274{
27ab2568
HX
275 int err = 0;
276
3305b80c 277 if (flags & MSG_PEEK) {
27ab2568 278 err = -ENOENT;
3305b80c
HX
279 spin_lock_bh(&sk->sk_receive_queue.lock);
280 if (skb == skb_peek(&sk->sk_receive_queue)) {
281 __skb_unlink(skb, &sk->sk_receive_queue);
282 atomic_dec(&skb->users);
27ab2568 283 err = 0;
3305b80c
HX
284 }
285 spin_unlock_bh(&sk->sk_receive_queue.lock);
286 }
287
61de71c6 288 kfree_skb(skb);
8edf19c2 289 atomic_inc(&sk->sk_drops);
61de71c6
JD
290 sk_mem_reclaim_partial(sk);
291
27ab2568 292 return err;
3305b80c 293}
3305b80c
HX
294EXPORT_SYMBOL(skb_kill_datagram);
295
1da177e4
LT
296/**
297 * skb_copy_datagram_iovec - Copy a datagram to an iovec.
4dc3b16b
PP
298 * @skb: buffer to copy
299 * @offset: offset in the buffer to start copying from
67be2dd1 300 * @to: io vector to copy to
4dc3b16b 301 * @len: amount of data to copy from buffer to iovec
1da177e4
LT
302 *
303 * Note: the iovec is modified during the copy.
304 */
305int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
306 struct iovec *to, int len)
307{
1a028e50
DM
308 int start = skb_headlen(skb);
309 int i, copy = start - offset;
5b1a002a 310 struct sk_buff *frag_iter;
c75d721c 311
e9b3cc1b
NH
312 trace_skb_copy_datagram_iovec(skb, len);
313
b4d9eda0
DM
314 /* Copy header. */
315 if (copy > 0) {
316 if (copy > len)
317 copy = len;
318 if (memcpy_toiovec(to, skb->data + offset, copy))
319 goto fault;
320 if ((len -= copy) == 0)
321 return 0;
322 offset += copy;
323 }
c75d721c 324
b4d9eda0
DM
325 /* Copy paged appendix. Hmm... why does this look so complicated? */
326 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1a028e50 327 int end;
9e903e08 328 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1da177e4 329
547b792c 330 WARN_ON(start > offset + len);
1a028e50 331
9e903e08 332 end = start + skb_frag_size(frag);
b4d9eda0
DM
333 if ((copy = end - offset) > 0) {
334 int err;
335 u8 *vaddr;
ea2ab693 336 struct page *page = skb_frag_page(frag);
1da177e4
LT
337
338 if (copy > len)
339 copy = len;
b4d9eda0 340 vaddr = kmap(page);
1a028e50
DM
341 err = memcpy_toiovec(to, vaddr + frag->page_offset +
342 offset - start, copy);
b4d9eda0 343 kunmap(page);
1da177e4
LT
344 if (err)
345 goto fault;
346 if (!(len -= copy))
347 return 0;
348 offset += copy;
349 }
1a028e50 350 start = end;
1da177e4 351 }
b4d9eda0 352
5b1a002a
DM
353 skb_walk_frags(skb, frag_iter) {
354 int end;
355
356 WARN_ON(start > offset + len);
357
358 end = start + frag_iter->len;
359 if ((copy = end - offset) > 0) {
360 if (copy > len)
361 copy = len;
362 if (skb_copy_datagram_iovec(frag_iter,
363 offset - start,
364 to, copy))
365 goto fault;
366 if ((len -= copy) == 0)
367 return 0;
368 offset += copy;
b4d9eda0 369 }
5b1a002a 370 start = end;
1da177e4 371 }
b4d9eda0
DM
372 if (!len)
373 return 0;
374
1da177e4
LT
375fault:
376 return -EFAULT;
377}
9e34a5b5 378EXPORT_SYMBOL(skb_copy_datagram_iovec);
1da177e4 379
0a1ec07a
MT
380/**
381 * skb_copy_datagram_const_iovec - Copy a datagram to an iovec.
382 * @skb: buffer to copy
383 * @offset: offset in the buffer to start copying from
384 * @to: io vector to copy to
385 * @to_offset: offset in the io vector to start copying to
386 * @len: amount of data to copy from buffer to iovec
387 *
388 * Returns 0 or -EFAULT.
389 * Note: the iovec is not modified during the copy.
390 */
391int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
392 const struct iovec *to, int to_offset,
393 int len)
394{
395 int start = skb_headlen(skb);
396 int i, copy = start - offset;
5b1a002a 397 struct sk_buff *frag_iter;
0a1ec07a
MT
398
399 /* Copy header. */
400 if (copy > 0) {
401 if (copy > len)
402 copy = len;
403 if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy))
404 goto fault;
405 if ((len -= copy) == 0)
406 return 0;
407 offset += copy;
408 to_offset += copy;
409 }
410
411 /* Copy paged appendix. Hmm... why does this look so complicated? */
412 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
413 int end;
9e903e08 414 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0a1ec07a
MT
415
416 WARN_ON(start > offset + len);
417
9e903e08 418 end = start + skb_frag_size(frag);
0a1ec07a
MT
419 if ((copy = end - offset) > 0) {
420 int err;
421 u8 *vaddr;
ea2ab693 422 struct page *page = skb_frag_page(frag);
0a1ec07a
MT
423
424 if (copy > len)
425 copy = len;
426 vaddr = kmap(page);
427 err = memcpy_toiovecend(to, vaddr + frag->page_offset +
428 offset - start, to_offset, copy);
429 kunmap(page);
430 if (err)
431 goto fault;
432 if (!(len -= copy))
433 return 0;
434 offset += copy;
435 to_offset += copy;
436 }
437 start = end;
438 }
439
5b1a002a
DM
440 skb_walk_frags(skb, frag_iter) {
441 int end;
442
443 WARN_ON(start > offset + len);
444
445 end = start + frag_iter->len;
446 if ((copy = end - offset) > 0) {
447 if (copy > len)
448 copy = len;
449 if (skb_copy_datagram_const_iovec(frag_iter,
450 offset - start,
451 to, to_offset,
452 copy))
453 goto fault;
454 if ((len -= copy) == 0)
455 return 0;
456 offset += copy;
457 to_offset += copy;
0a1ec07a 458 }
5b1a002a 459 start = end;
0a1ec07a
MT
460 }
461 if (!len)
462 return 0;
463
464fault:
465 return -EFAULT;
466}
467EXPORT_SYMBOL(skb_copy_datagram_const_iovec);
468
db543c1f
RR
469/**
470 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
471 * @skb: buffer to copy
472 * @offset: offset in the buffer to start copying to
473 * @from: io vector to copy to
6f26c9a7 474 * @from_offset: offset in the io vector to start copying from
db543c1f
RR
475 * @len: amount of data to copy to buffer from iovec
476 *
477 * Returns 0 or -EFAULT.
6f26c9a7 478 * Note: the iovec is not modified during the copy.
db543c1f
RR
479 */
480int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
6f26c9a7
MT
481 const struct iovec *from, int from_offset,
482 int len)
db543c1f
RR
483{
484 int start = skb_headlen(skb);
485 int i, copy = start - offset;
5b1a002a 486 struct sk_buff *frag_iter;
db543c1f
RR
487
488 /* Copy header. */
489 if (copy > 0) {
490 if (copy > len)
491 copy = len;
d2d27bfd
SS
492 if (memcpy_fromiovecend(skb->data + offset, from, from_offset,
493 copy))
db543c1f
RR
494 goto fault;
495 if ((len -= copy) == 0)
496 return 0;
497 offset += copy;
6f26c9a7 498 from_offset += copy;
db543c1f
RR
499 }
500
501 /* Copy paged appendix. Hmm... why does this look so complicated? */
502 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
503 int end;
9e903e08 504 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
db543c1f
RR
505
506 WARN_ON(start > offset + len);
507
9e903e08 508 end = start + skb_frag_size(frag);
db543c1f
RR
509 if ((copy = end - offset) > 0) {
510 int err;
511 u8 *vaddr;
ea2ab693 512 struct page *page = skb_frag_page(frag);
db543c1f
RR
513
514 if (copy > len)
515 copy = len;
516 vaddr = kmap(page);
6f26c9a7
MT
517 err = memcpy_fromiovecend(vaddr + frag->page_offset +
518 offset - start,
519 from, from_offset, copy);
db543c1f
RR
520 kunmap(page);
521 if (err)
522 goto fault;
523
524 if (!(len -= copy))
525 return 0;
526 offset += copy;
6f26c9a7 527 from_offset += copy;
db543c1f
RR
528 }
529 start = end;
530 }
531
5b1a002a
DM
532 skb_walk_frags(skb, frag_iter) {
533 int end;
534
535 WARN_ON(start > offset + len);
536
537 end = start + frag_iter->len;
538 if ((copy = end - offset) > 0) {
539 if (copy > len)
540 copy = len;
541 if (skb_copy_datagram_from_iovec(frag_iter,
542 offset - start,
543 from,
544 from_offset,
545 copy))
546 goto fault;
547 if ((len -= copy) == 0)
548 return 0;
549 offset += copy;
550 from_offset += copy;
db543c1f 551 }
5b1a002a 552 start = end;
db543c1f
RR
553 }
554 if (!len)
555 return 0;
556
557fault:
558 return -EFAULT;
559}
560EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
561
1da177e4
LT
562static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
563 u8 __user *to, int len,
5084205f 564 __wsum *csump)
1da177e4 565{
1a028e50 566 int start = skb_headlen(skb);
1a028e50 567 int i, copy = start - offset;
5b1a002a
DM
568 struct sk_buff *frag_iter;
569 int pos = 0;
1da177e4
LT
570
571 /* Copy header. */
572 if (copy > 0) {
573 int err = 0;
574 if (copy > len)
575 copy = len;
576 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
577 *csump, &err);
578 if (err)
579 goto fault;
580 if ((len -= copy) == 0)
581 return 0;
582 offset += copy;
583 to += copy;
584 pos = copy;
585 }
586
587 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1a028e50 588 int end;
9e903e08 589 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1da177e4 590
547b792c 591 WARN_ON(start > offset + len);
1a028e50 592
9e903e08 593 end = start + skb_frag_size(frag);
1da177e4 594 if ((copy = end - offset) > 0) {
5084205f 595 __wsum csum2;
1da177e4
LT
596 int err = 0;
597 u8 *vaddr;
ea2ab693 598 struct page *page = skb_frag_page(frag);
1da177e4
LT
599
600 if (copy > len)
601 copy = len;
602 vaddr = kmap(page);
603 csum2 = csum_and_copy_to_user(vaddr +
1a028e50
DM
604 frag->page_offset +
605 offset - start,
1da177e4
LT
606 to, copy, 0, &err);
607 kunmap(page);
608 if (err)
609 goto fault;
610 *csump = csum_block_add(*csump, csum2, pos);
611 if (!(len -= copy))
612 return 0;
613 offset += copy;
614 to += copy;
615 pos += copy;
616 }
1a028e50 617 start = end;
1da177e4
LT
618 }
619
5b1a002a
DM
620 skb_walk_frags(skb, frag_iter) {
621 int end;
622
623 WARN_ON(start > offset + len);
624
625 end = start + frag_iter->len;
626 if ((copy = end - offset) > 0) {
627 __wsum csum2 = 0;
628 if (copy > len)
629 copy = len;
630 if (skb_copy_and_csum_datagram(frag_iter,
631 offset - start,
632 to, copy,
633 &csum2))
634 goto fault;
635 *csump = csum_block_add(*csump, csum2, pos);
636 if ((len -= copy) == 0)
637 return 0;
638 offset += copy;
639 to += copy;
640 pos += copy;
1da177e4 641 }
5b1a002a 642 start = end;
1da177e4
LT
643 }
644 if (!len)
645 return 0;
646
647fault:
648 return -EFAULT;
649}
650
759e5d00 651__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
fb286bb2 652{
d3bc23e7 653 __sum16 sum;
fb286bb2 654
759e5d00 655 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
fb286bb2 656 if (likely(!sum)) {
84fa7933 657 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
fb286bb2
HX
658 netdev_rx_csum_fault(skb->dev);
659 skb->ip_summed = CHECKSUM_UNNECESSARY;
660 }
661 return sum;
662}
759e5d00
HX
663EXPORT_SYMBOL(__skb_checksum_complete_head);
664
665__sum16 __skb_checksum_complete(struct sk_buff *skb)
666{
667 return __skb_checksum_complete_head(skb, skb->len);
668}
fb286bb2
HX
669EXPORT_SYMBOL(__skb_checksum_complete);
670
1da177e4
LT
671/**
672 * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
4dc3b16b
PP
673 * @skb: skbuff
674 * @hlen: hardware length
67be2dd1 675 * @iov: io vector
4ec93edb 676 *
1da177e4
LT
677 * Caller _must_ check that skb will fit to this iovec.
678 *
679 * Returns: 0 - success.
680 * -EINVAL - checksum failure.
681 * -EFAULT - fault during copy. Beware, in this case iovec
682 * can be modified!
683 */
fb286bb2 684int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
1da177e4
LT
685 int hlen, struct iovec *iov)
686{
d3bc23e7 687 __wsum csum;
1da177e4
LT
688 int chunk = skb->len - hlen;
689
ef8aef55
HX
690 if (!chunk)
691 return 0;
692
1da177e4
LT
693 /* Skip filled elements.
694 * Pretty silly, look at memcpy_toiovec, though 8)
695 */
696 while (!iov->iov_len)
697 iov++;
698
699 if (iov->iov_len < chunk) {
fb286bb2 700 if (__skb_checksum_complete(skb))
1da177e4
LT
701 goto csum_error;
702 if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
703 goto fault;
704 } else {
705 csum = csum_partial(skb->data, hlen, skb->csum);
706 if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base,
707 chunk, &csum))
708 goto fault;
d3bc23e7 709 if (csum_fold(csum))
1da177e4 710 goto csum_error;
84fa7933 711 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
fb286bb2 712 netdev_rx_csum_fault(skb->dev);
1da177e4
LT
713 iov->iov_len -= chunk;
714 iov->iov_base += chunk;
715 }
716 return 0;
717csum_error:
718 return -EINVAL;
719fault:
720 return -EFAULT;
721}
9e34a5b5 722EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
1da177e4
LT
723
724/**
725 * datagram_poll - generic datagram poll
4dc3b16b
PP
726 * @file: file struct
727 * @sock: socket
728 * @wait: poll table
1da177e4
LT
729 *
730 * Datagram poll: Again totally generic. This also handles
731 * sequenced packet sockets providing the socket receive queue
732 * is only ever holding data ready to receive.
733 *
734 * Note: when you _don't_ use this routine for this protocol,
735 * and you use a different write policy from sock_writeable()
736 * then please supply your own write_space callback.
737 */
738unsigned int datagram_poll(struct file *file, struct socket *sock,
739 poll_table *wait)
740{
741 struct sock *sk = sock->sk;
742 unsigned int mask;
743
aa395145 744 sock_poll_wait(file, sk_sleep(sk), wait);
1da177e4
LT
745 mask = 0;
746
747 /* exceptional events? */
748 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
749 mask |= POLLERR;
f348d70a 750 if (sk->sk_shutdown & RCV_SHUTDOWN)
db40980f 751 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
1da177e4
LT
752 if (sk->sk_shutdown == SHUTDOWN_MASK)
753 mask |= POLLHUP;
754
755 /* readable? */
db40980f 756 if (!skb_queue_empty(&sk->sk_receive_queue))
1da177e4
LT
757 mask |= POLLIN | POLLRDNORM;
758
759 /* Connection-based need to check for termination and startup */
760 if (connection_based(sk)) {
761 if (sk->sk_state == TCP_CLOSE)
762 mask |= POLLHUP;
763 /* connection hasn't started yet? */
764 if (sk->sk_state == TCP_SYN_SENT)
765 return mask;
766 }
767
768 /* writable? */
769 if (sock_writeable(sk))
770 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
771 else
772 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
773
774 return mask;
775}
1da177e4 776EXPORT_SYMBOL(datagram_poll);