]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/core/datagram.c
Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / net / core / datagram.c
1 /*
2 * SUCS NET3:
3 *
4 * Generic datagram handling routines. These are generic for all
5 * protocols. Possibly a generic IP version on top of these would
6 * make sense. Not tonight however 8-).
7 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8 * NetROM layer all have identical poll code and mostly
9 * identical recvmsg() code. So we share it here. The poll was
10 * shared before but buried in udp.c so I moved it.
11 *
12 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
13 * udp.c code)
14 *
15 * Fixes:
16 * Alan Cox : NULL return from skb_peek_copy()
17 * understood
18 * Alan Cox : Rewrote skb_read_datagram to avoid the
19 * skb_peek_copy stuff.
20 * Alan Cox : Added support for SOCK_SEQPACKET.
21 * IPX can no longer use the SO_TYPE hack
22 * but AX.25 now works right, and SPX is
23 * feasible.
24 * Alan Cox : Fixed write poll of non IP protocol
25 * crash.
26 * Florian La Roche: Changed for my new skbuff handling.
27 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
28 * Linus Torvalds : BSD semantic fixes.
29 * Alan Cox : Datagram iovec handling
30 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
31 * Alan Cox : POSIXisms
32 * Pete Wyckoff : Unconnected accept() fix.
33 *
34 */
35
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <asm/uaccess.h>
40 #include <linux/mm.h>
41 #include <linux/interrupt.h>
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/poll.h>
48 #include <linux/highmem.h>
49 #include <linux/spinlock.h>
50 #include <linux/slab.h>
51
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54
55 #include <net/checksum.h>
56 #include <net/sock.h>
57 #include <net/tcp_states.h>
58 #include <trace/events/skb.h>
59 #include <net/ll_poll.h>
60
61 /*
62 * Is a socket 'connection oriented' ?
63 */
64 static inline int connection_based(struct sock *sk)
65 {
66 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
67 }
68
69 static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync,
70 void *key)
71 {
72 unsigned long bits = (unsigned long)key;
73
74 /*
75 * Avoid a wakeup if event not interesting for us
76 */
77 if (bits && !(bits & (POLLIN | POLLERR)))
78 return 0;
79 return autoremove_wake_function(wait, mode, sync, key);
80 }
81 /*
82 * Wait for the last received packet to be different from skb
83 */
84 static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
85 const struct sk_buff *skb)
86 {
87 int error;
88 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
89
90 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
91
92 /* Socket errors? */
93 error = sock_error(sk);
94 if (error)
95 goto out_err;
96
97 if (sk->sk_receive_queue.prev != skb)
98 goto out;
99
100 /* Socket shut down? */
101 if (sk->sk_shutdown & RCV_SHUTDOWN)
102 goto out_noerr;
103
104 /* Sequenced packets can come disconnected.
105 * If so we report the problem
106 */
107 error = -ENOTCONN;
108 if (connection_based(sk) &&
109 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
110 goto out_err;
111
112 /* handle signals */
113 if (signal_pending(current))
114 goto interrupted;
115
116 error = 0;
117 *timeo_p = schedule_timeout(*timeo_p);
118 out:
119 finish_wait(sk_sleep(sk), &wait);
120 return error;
121 interrupted:
122 error = sock_intr_errno(*timeo_p);
123 out_err:
124 *err = error;
125 goto out;
126 out_noerr:
127 *err = 0;
128 error = 1;
129 goto out;
130 }
131
132 /**
133 * __skb_recv_datagram - Receive a datagram skbuff
134 * @sk: socket
135 * @flags: MSG_ flags
136 * @peeked: returns non-zero if this packet has been seen before
137 * @off: an offset in bytes to peek skb from. Returns an offset
138 * within an skb where data actually starts
139 * @err: error code returned
140 *
141 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
142 * and possible races. This replaces identical code in packet, raw and
143 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
144 * the long standing peek and read race for datagram sockets. If you
145 * alter this routine remember it must be re-entrant.
146 *
147 * This function will lock the socket if a skb is returned, so the caller
148 * needs to unlock the socket in that case (usually by calling
149 * skb_free_datagram)
150 *
151 * * It does not lock socket since today. This function is
152 * * free of race conditions. This measure should/can improve
153 * * significantly datagram socket latencies at high loads,
154 * * when data copying to user space takes lots of time.
155 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
156 * * 8) Great win.)
157 * * --ANK (980729)
158 *
159 * The order of the tests when we find no data waiting are specified
160 * quite explicitly by POSIX 1003.1g, don't change them without having
161 * the standard around please.
162 */
163 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
164 int *peeked, int *off, int *err)
165 {
166 struct sk_buff *skb, *last;
167 long timeo;
168 /*
169 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
170 */
171 int error = sock_error(sk);
172
173 if (error)
174 goto no_packet;
175
176 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
177
178 do {
179 /* Again only user level code calls this function, so nothing
180 * interrupt level will suddenly eat the receive_queue.
181 *
182 * Look at current nfs client by the way...
183 * However, this function was correct in any case. 8)
184 */
185 unsigned long cpu_flags;
186 struct sk_buff_head *queue = &sk->sk_receive_queue;
187 int _off = *off;
188
189 last = (struct sk_buff *)queue;
190 spin_lock_irqsave(&queue->lock, cpu_flags);
191 skb_queue_walk(queue, skb) {
192 last = skb;
193 *peeked = skb->peeked;
194 if (flags & MSG_PEEK) {
195 if (_off >= skb->len && (skb->len || _off ||
196 skb->peeked)) {
197 _off -= skb->len;
198 continue;
199 }
200 skb->peeked = 1;
201 atomic_inc(&skb->users);
202 } else
203 __skb_unlink(skb, queue);
204
205 spin_unlock_irqrestore(&queue->lock, cpu_flags);
206 *off = _off;
207 return skb;
208 }
209 spin_unlock_irqrestore(&queue->lock, cpu_flags);
210
211 if (sk_can_busy_loop(sk) &&
212 sk_busy_loop(sk, flags & MSG_DONTWAIT))
213 continue;
214
215 /* User doesn't want to wait */
216 error = -EAGAIN;
217 if (!timeo)
218 goto no_packet;
219
220 } while (!wait_for_more_packets(sk, err, &timeo, last));
221
222 return NULL;
223
224 no_packet:
225 *err = error;
226 return NULL;
227 }
228 EXPORT_SYMBOL(__skb_recv_datagram);
229
230 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
231 int noblock, int *err)
232 {
233 int peeked, off = 0;
234
235 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
236 &peeked, &off, err);
237 }
238 EXPORT_SYMBOL(skb_recv_datagram);
239
240 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
241 {
242 consume_skb(skb);
243 sk_mem_reclaim_partial(sk);
244 }
245 EXPORT_SYMBOL(skb_free_datagram);
246
247 void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
248 {
249 bool slow;
250
251 if (likely(atomic_read(&skb->users) == 1))
252 smp_rmb();
253 else if (likely(!atomic_dec_and_test(&skb->users)))
254 return;
255
256 slow = lock_sock_fast(sk);
257 skb_orphan(skb);
258 sk_mem_reclaim_partial(sk);
259 unlock_sock_fast(sk, slow);
260
261 /* skb is now orphaned, can be freed outside of locked section */
262 __kfree_skb(skb);
263 }
264 EXPORT_SYMBOL(skb_free_datagram_locked);
265
266 /**
267 * skb_kill_datagram - Free a datagram skbuff forcibly
268 * @sk: socket
269 * @skb: datagram skbuff
270 * @flags: MSG_ flags
271 *
272 * This function frees a datagram skbuff that was received by
273 * skb_recv_datagram. The flags argument must match the one
274 * used for skb_recv_datagram.
275 *
276 * If the MSG_PEEK flag is set, and the packet is still on the
277 * receive queue of the socket, it will be taken off the queue
278 * before it is freed.
279 *
280 * This function currently only disables BH when acquiring the
281 * sk_receive_queue lock. Therefore it must not be used in a
282 * context where that lock is acquired in an IRQ context.
283 *
284 * It returns 0 if the packet was removed by us.
285 */
286
287 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
288 {
289 int err = 0;
290
291 if (flags & MSG_PEEK) {
292 err = -ENOENT;
293 spin_lock_bh(&sk->sk_receive_queue.lock);
294 if (skb == skb_peek(&sk->sk_receive_queue)) {
295 __skb_unlink(skb, &sk->sk_receive_queue);
296 atomic_dec(&skb->users);
297 err = 0;
298 }
299 spin_unlock_bh(&sk->sk_receive_queue.lock);
300 }
301
302 kfree_skb(skb);
303 atomic_inc(&sk->sk_drops);
304 sk_mem_reclaim_partial(sk);
305
306 return err;
307 }
308 EXPORT_SYMBOL(skb_kill_datagram);
309
310 /**
311 * skb_copy_datagram_iovec - Copy a datagram to an iovec.
312 * @skb: buffer to copy
313 * @offset: offset in the buffer to start copying from
314 * @to: io vector to copy to
315 * @len: amount of data to copy from buffer to iovec
316 *
317 * Note: the iovec is modified during the copy.
318 */
319 int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
320 struct iovec *to, int len)
321 {
322 int start = skb_headlen(skb);
323 int i, copy = start - offset;
324 struct sk_buff *frag_iter;
325
326 trace_skb_copy_datagram_iovec(skb, len);
327
328 /* Copy header. */
329 if (copy > 0) {
330 if (copy > len)
331 copy = len;
332 if (memcpy_toiovec(to, skb->data + offset, copy))
333 goto fault;
334 if ((len -= copy) == 0)
335 return 0;
336 offset += copy;
337 }
338
339 /* Copy paged appendix. Hmm... why does this look so complicated? */
340 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
341 int end;
342 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
343
344 WARN_ON(start > offset + len);
345
346 end = start + skb_frag_size(frag);
347 if ((copy = end - offset) > 0) {
348 int err;
349 u8 *vaddr;
350 struct page *page = skb_frag_page(frag);
351
352 if (copy > len)
353 copy = len;
354 vaddr = kmap(page);
355 err = memcpy_toiovec(to, vaddr + frag->page_offset +
356 offset - start, copy);
357 kunmap(page);
358 if (err)
359 goto fault;
360 if (!(len -= copy))
361 return 0;
362 offset += copy;
363 }
364 start = end;
365 }
366
367 skb_walk_frags(skb, frag_iter) {
368 int end;
369
370 WARN_ON(start > offset + len);
371
372 end = start + frag_iter->len;
373 if ((copy = end - offset) > 0) {
374 if (copy > len)
375 copy = len;
376 if (skb_copy_datagram_iovec(frag_iter,
377 offset - start,
378 to, copy))
379 goto fault;
380 if ((len -= copy) == 0)
381 return 0;
382 offset += copy;
383 }
384 start = end;
385 }
386 if (!len)
387 return 0;
388
389 fault:
390 return -EFAULT;
391 }
392 EXPORT_SYMBOL(skb_copy_datagram_iovec);
393
394 /**
395 * skb_copy_datagram_const_iovec - Copy a datagram to an iovec.
396 * @skb: buffer to copy
397 * @offset: offset in the buffer to start copying from
398 * @to: io vector to copy to
399 * @to_offset: offset in the io vector to start copying to
400 * @len: amount of data to copy from buffer to iovec
401 *
402 * Returns 0 or -EFAULT.
403 * Note: the iovec is not modified during the copy.
404 */
405 int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
406 const struct iovec *to, int to_offset,
407 int len)
408 {
409 int start = skb_headlen(skb);
410 int i, copy = start - offset;
411 struct sk_buff *frag_iter;
412
413 /* Copy header. */
414 if (copy > 0) {
415 if (copy > len)
416 copy = len;
417 if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy))
418 goto fault;
419 if ((len -= copy) == 0)
420 return 0;
421 offset += copy;
422 to_offset += copy;
423 }
424
425 /* Copy paged appendix. Hmm... why does this look so complicated? */
426 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
427 int end;
428 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
429
430 WARN_ON(start > offset + len);
431
432 end = start + skb_frag_size(frag);
433 if ((copy = end - offset) > 0) {
434 int err;
435 u8 *vaddr;
436 struct page *page = skb_frag_page(frag);
437
438 if (copy > len)
439 copy = len;
440 vaddr = kmap(page);
441 err = memcpy_toiovecend(to, vaddr + frag->page_offset +
442 offset - start, to_offset, copy);
443 kunmap(page);
444 if (err)
445 goto fault;
446 if (!(len -= copy))
447 return 0;
448 offset += copy;
449 to_offset += copy;
450 }
451 start = end;
452 }
453
454 skb_walk_frags(skb, frag_iter) {
455 int end;
456
457 WARN_ON(start > offset + len);
458
459 end = start + frag_iter->len;
460 if ((copy = end - offset) > 0) {
461 if (copy > len)
462 copy = len;
463 if (skb_copy_datagram_const_iovec(frag_iter,
464 offset - start,
465 to, to_offset,
466 copy))
467 goto fault;
468 if ((len -= copy) == 0)
469 return 0;
470 offset += copy;
471 to_offset += copy;
472 }
473 start = end;
474 }
475 if (!len)
476 return 0;
477
478 fault:
479 return -EFAULT;
480 }
481 EXPORT_SYMBOL(skb_copy_datagram_const_iovec);
482
483 /**
484 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
485 * @skb: buffer to copy
486 * @offset: offset in the buffer to start copying to
487 * @from: io vector to copy to
488 * @from_offset: offset in the io vector to start copying from
489 * @len: amount of data to copy to buffer from iovec
490 *
491 * Returns 0 or -EFAULT.
492 * Note: the iovec is not modified during the copy.
493 */
494 int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
495 const struct iovec *from, int from_offset,
496 int len)
497 {
498 int start = skb_headlen(skb);
499 int i, copy = start - offset;
500 struct sk_buff *frag_iter;
501
502 /* Copy header. */
503 if (copy > 0) {
504 if (copy > len)
505 copy = len;
506 if (memcpy_fromiovecend(skb->data + offset, from, from_offset,
507 copy))
508 goto fault;
509 if ((len -= copy) == 0)
510 return 0;
511 offset += copy;
512 from_offset += copy;
513 }
514
515 /* Copy paged appendix. Hmm... why does this look so complicated? */
516 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
517 int end;
518 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
519
520 WARN_ON(start > offset + len);
521
522 end = start + skb_frag_size(frag);
523 if ((copy = end - offset) > 0) {
524 int err;
525 u8 *vaddr;
526 struct page *page = skb_frag_page(frag);
527
528 if (copy > len)
529 copy = len;
530 vaddr = kmap(page);
531 err = memcpy_fromiovecend(vaddr + frag->page_offset +
532 offset - start,
533 from, from_offset, copy);
534 kunmap(page);
535 if (err)
536 goto fault;
537
538 if (!(len -= copy))
539 return 0;
540 offset += copy;
541 from_offset += copy;
542 }
543 start = end;
544 }
545
546 skb_walk_frags(skb, frag_iter) {
547 int end;
548
549 WARN_ON(start > offset + len);
550
551 end = start + frag_iter->len;
552 if ((copy = end - offset) > 0) {
553 if (copy > len)
554 copy = len;
555 if (skb_copy_datagram_from_iovec(frag_iter,
556 offset - start,
557 from,
558 from_offset,
559 copy))
560 goto fault;
561 if ((len -= copy) == 0)
562 return 0;
563 offset += copy;
564 from_offset += copy;
565 }
566 start = end;
567 }
568 if (!len)
569 return 0;
570
571 fault:
572 return -EFAULT;
573 }
574 EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
575
576 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
577 u8 __user *to, int len,
578 __wsum *csump)
579 {
580 int start = skb_headlen(skb);
581 int i, copy = start - offset;
582 struct sk_buff *frag_iter;
583 int pos = 0;
584
585 /* Copy header. */
586 if (copy > 0) {
587 int err = 0;
588 if (copy > len)
589 copy = len;
590 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
591 *csump, &err);
592 if (err)
593 goto fault;
594 if ((len -= copy) == 0)
595 return 0;
596 offset += copy;
597 to += copy;
598 pos = copy;
599 }
600
601 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
602 int end;
603 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
604
605 WARN_ON(start > offset + len);
606
607 end = start + skb_frag_size(frag);
608 if ((copy = end - offset) > 0) {
609 __wsum csum2;
610 int err = 0;
611 u8 *vaddr;
612 struct page *page = skb_frag_page(frag);
613
614 if (copy > len)
615 copy = len;
616 vaddr = kmap(page);
617 csum2 = csum_and_copy_to_user(vaddr +
618 frag->page_offset +
619 offset - start,
620 to, copy, 0, &err);
621 kunmap(page);
622 if (err)
623 goto fault;
624 *csump = csum_block_add(*csump, csum2, pos);
625 if (!(len -= copy))
626 return 0;
627 offset += copy;
628 to += copy;
629 pos += copy;
630 }
631 start = end;
632 }
633
634 skb_walk_frags(skb, frag_iter) {
635 int end;
636
637 WARN_ON(start > offset + len);
638
639 end = start + frag_iter->len;
640 if ((copy = end - offset) > 0) {
641 __wsum csum2 = 0;
642 if (copy > len)
643 copy = len;
644 if (skb_copy_and_csum_datagram(frag_iter,
645 offset - start,
646 to, copy,
647 &csum2))
648 goto fault;
649 *csump = csum_block_add(*csump, csum2, pos);
650 if ((len -= copy) == 0)
651 return 0;
652 offset += copy;
653 to += copy;
654 pos += copy;
655 }
656 start = end;
657 }
658 if (!len)
659 return 0;
660
661 fault:
662 return -EFAULT;
663 }
664
665 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
666 {
667 __sum16 sum;
668
669 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
670 if (likely(!sum)) {
671 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
672 netdev_rx_csum_fault(skb->dev);
673 skb->ip_summed = CHECKSUM_UNNECESSARY;
674 }
675 return sum;
676 }
677 EXPORT_SYMBOL(__skb_checksum_complete_head);
678
679 __sum16 __skb_checksum_complete(struct sk_buff *skb)
680 {
681 return __skb_checksum_complete_head(skb, skb->len);
682 }
683 EXPORT_SYMBOL(__skb_checksum_complete);
684
685 /**
686 * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
687 * @skb: skbuff
688 * @hlen: hardware length
689 * @iov: io vector
690 *
691 * Caller _must_ check that skb will fit to this iovec.
692 *
693 * Returns: 0 - success.
694 * -EINVAL - checksum failure.
695 * -EFAULT - fault during copy. Beware, in this case iovec
696 * can be modified!
697 */
698 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
699 int hlen, struct iovec *iov)
700 {
701 __wsum csum;
702 int chunk = skb->len - hlen;
703
704 if (!chunk)
705 return 0;
706
707 /* Skip filled elements.
708 * Pretty silly, look at memcpy_toiovec, though 8)
709 */
710 while (!iov->iov_len)
711 iov++;
712
713 if (iov->iov_len < chunk) {
714 if (__skb_checksum_complete(skb))
715 goto csum_error;
716 if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
717 goto fault;
718 } else {
719 csum = csum_partial(skb->data, hlen, skb->csum);
720 if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base,
721 chunk, &csum))
722 goto fault;
723 if (csum_fold(csum))
724 goto csum_error;
725 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
726 netdev_rx_csum_fault(skb->dev);
727 iov->iov_len -= chunk;
728 iov->iov_base += chunk;
729 }
730 return 0;
731 csum_error:
732 return -EINVAL;
733 fault:
734 return -EFAULT;
735 }
736 EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
737
738 /**
739 * datagram_poll - generic datagram poll
740 * @file: file struct
741 * @sock: socket
742 * @wait: poll table
743 *
744 * Datagram poll: Again totally generic. This also handles
745 * sequenced packet sockets providing the socket receive queue
746 * is only ever holding data ready to receive.
747 *
748 * Note: when you _don't_ use this routine for this protocol,
749 * and you use a different write policy from sock_writeable()
750 * then please supply your own write_space callback.
751 */
752 unsigned int datagram_poll(struct file *file, struct socket *sock,
753 poll_table *wait)
754 {
755 struct sock *sk = sock->sk;
756 unsigned int mask;
757
758 sock_poll_wait(file, sk_sleep(sk), wait);
759 mask = 0;
760
761 /* exceptional events? */
762 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
763 mask |= POLLERR |
764 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
765
766 if (sk->sk_shutdown & RCV_SHUTDOWN)
767 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
768 if (sk->sk_shutdown == SHUTDOWN_MASK)
769 mask |= POLLHUP;
770
771 /* readable? */
772 if (!skb_queue_empty(&sk->sk_receive_queue))
773 mask |= POLLIN | POLLRDNORM;
774
775 /* Connection-based need to check for termination and startup */
776 if (connection_based(sk)) {
777 if (sk->sk_state == TCP_CLOSE)
778 mask |= POLLHUP;
779 /* connection hasn't started yet? */
780 if (sk->sk_state == TCP_SYN_SENT)
781 return mask;
782 }
783
784 /* writable? */
785 if (sock_writeable(sk))
786 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
787 else
788 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
789
790 return mask;
791 }
792 EXPORT_SYMBOL(datagram_poll);