]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * SUCS NET3: | |
3 | * | |
4 | * Generic datagram handling routines. These are generic for all | |
5 | * protocols. Possibly a generic IP version on top of these would | |
6 | * make sense. Not tonight however 8-). | |
7 | * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and | |
8 | * NetROM layer all have identical poll code and mostly | |
9 | * identical recvmsg() code. So we share it here. The poll was | |
10 | * shared before but buried in udp.c so I moved it. | |
11 | * | |
113aa838 | 12 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old |
1da177e4 LT |
13 | * udp.c code) |
14 | * | |
15 | * Fixes: | |
16 | * Alan Cox : NULL return from skb_peek_copy() | |
17 | * understood | |
18 | * Alan Cox : Rewrote skb_read_datagram to avoid the | |
19 | * skb_peek_copy stuff. | |
20 | * Alan Cox : Added support for SOCK_SEQPACKET. | |
21 | * IPX can no longer use the SO_TYPE hack | |
22 | * but AX.25 now works right, and SPX is | |
23 | * feasible. | |
24 | * Alan Cox : Fixed write poll of non IP protocol | |
25 | * crash. | |
26 | * Florian La Roche: Changed for my new skbuff handling. | |
27 | * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET. | |
28 | * Linus Torvalds : BSD semantic fixes. | |
29 | * Alan Cox : Datagram iovec handling | |
30 | * Darryl Miles : Fixed non-blocking SOCK_STREAM. | |
31 | * Alan Cox : POSIXisms | |
32 | * Pete Wyckoff : Unconnected accept() fix. | |
33 | * | |
34 | */ | |
35 | ||
36 | #include <linux/module.h> | |
37 | #include <linux/types.h> | |
38 | #include <linux/kernel.h> | |
39 | #include <asm/uaccess.h> | |
1da177e4 LT |
40 | #include <linux/mm.h> |
41 | #include <linux/interrupt.h> | |
42 | #include <linux/errno.h> | |
43 | #include <linux/sched.h> | |
44 | #include <linux/inet.h> | |
1da177e4 LT |
45 | #include <linux/netdevice.h> |
46 | #include <linux/rtnetlink.h> | |
47 | #include <linux/poll.h> | |
48 | #include <linux/highmem.h> | |
3305b80c | 49 | #include <linux/spinlock.h> |
5a0e3ad6 | 50 | #include <linux/slab.h> |
0433547a | 51 | #include <linux/pagemap.h> |
1da177e4 LT |
52 | |
53 | #include <net/protocol.h> | |
54 | #include <linux/skbuff.h> | |
1da177e4 | 55 | |
c752f073 ACM |
56 | #include <net/checksum.h> |
57 | #include <net/sock.h> | |
58 | #include <net/tcp_states.h> | |
e9b3cc1b | 59 | #include <trace/events/skb.h> |
076bb0c8 | 60 | #include <net/busy_poll.h> |
1da177e4 LT |
61 | |
62 | /* | |
63 | * Is a socket 'connection oriented' ? | |
64 | */ | |
65 | static inline int connection_based(struct sock *sk) | |
66 | { | |
67 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; | |
68 | } | |
69 | ||
95c96174 | 70 | static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync, |
bf368e4e ED |
71 | void *key) |
72 | { | |
73 | unsigned long bits = (unsigned long)key; | |
74 | ||
75 | /* | |
76 | * Avoid a wakeup if event not interesting for us | |
77 | */ | |
78 | if (bits && !(bits & (POLLIN | POLLERR))) | |
79 | return 0; | |
80 | return autoremove_wake_function(wait, mode, sync, key); | |
81 | } | |
1da177e4 | 82 | /* |
39cc8613 | 83 | * Wait for the last received packet to be different from skb |
1da177e4 | 84 | */ |
39cc8613 BP |
85 | static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, |
86 | const struct sk_buff *skb) | |
1da177e4 LT |
87 | { |
88 | int error; | |
bf368e4e | 89 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); |
1da177e4 | 90 | |
aa395145 | 91 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1da177e4 LT |
92 | |
93 | /* Socket errors? */ | |
94 | error = sock_error(sk); | |
95 | if (error) | |
96 | goto out_err; | |
97 | ||
39cc8613 | 98 | if (sk->sk_receive_queue.prev != skb) |
1da177e4 LT |
99 | goto out; |
100 | ||
101 | /* Socket shut down? */ | |
102 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
103 | goto out_noerr; | |
104 | ||
105 | /* Sequenced packets can come disconnected. | |
106 | * If so we report the problem | |
107 | */ | |
108 | error = -ENOTCONN; | |
109 | if (connection_based(sk) && | |
110 | !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN)) | |
111 | goto out_err; | |
112 | ||
113 | /* handle signals */ | |
114 | if (signal_pending(current)) | |
115 | goto interrupted; | |
116 | ||
117 | error = 0; | |
118 | *timeo_p = schedule_timeout(*timeo_p); | |
119 | out: | |
aa395145 | 120 | finish_wait(sk_sleep(sk), &wait); |
1da177e4 LT |
121 | return error; |
122 | interrupted: | |
123 | error = sock_intr_errno(*timeo_p); | |
124 | out_err: | |
125 | *err = error; | |
126 | goto out; | |
127 | out_noerr: | |
128 | *err = 0; | |
129 | error = 1; | |
130 | goto out; | |
131 | } | |
132 | ||
133 | /** | |
a59322be | 134 | * __skb_recv_datagram - Receive a datagram skbuff |
4dc3b16b PP |
135 | * @sk: socket |
136 | * @flags: MSG_ flags | |
39cc8613 | 137 | * @peeked: returns non-zero if this packet has been seen before |
3f518bf7 PE |
138 | * @off: an offset in bytes to peek skb from. Returns an offset |
139 | * within an skb where data actually starts | |
4dc3b16b | 140 | * @err: error code returned |
1da177e4 LT |
141 | * |
142 | * Get a datagram skbuff, understands the peeking, nonblocking wakeups | |
143 | * and possible races. This replaces identical code in packet, raw and | |
144 | * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes | |
145 | * the long standing peek and read race for datagram sockets. If you | |
146 | * alter this routine remember it must be re-entrant. | |
147 | * | |
148 | * This function will lock the socket if a skb is returned, so the caller | |
149 | * needs to unlock the socket in that case (usually by calling | |
150 | * skb_free_datagram) | |
151 | * | |
152 | * * It does not lock socket since today. This function is | |
153 | * * free of race conditions. This measure should/can improve | |
154 | * * significantly datagram socket latencies at high loads, | |
155 | * * when data copying to user space takes lots of time. | |
156 | * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet | |
157 | * * 8) Great win.) | |
158 | * * --ANK (980729) | |
159 | * | |
160 | * The order of the tests when we find no data waiting are specified | |
161 | * quite explicitly by POSIX 1003.1g, don't change them without having | |
162 | * the standard around please. | |
163 | */ | |
95c96174 | 164 | struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, |
3f518bf7 | 165 | int *peeked, int *off, int *err) |
1da177e4 | 166 | { |
39cc8613 | 167 | struct sk_buff *skb, *last; |
1da177e4 LT |
168 | long timeo; |
169 | /* | |
170 | * Caller is allowed not to check sk->sk_err before skb_recv_datagram() | |
171 | */ | |
172 | int error = sock_error(sk); | |
173 | ||
174 | if (error) | |
175 | goto no_packet; | |
176 | ||
a59322be | 177 | timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
1da177e4 LT |
178 | |
179 | do { | |
180 | /* Again only user level code calls this function, so nothing | |
181 | * interrupt level will suddenly eat the receive_queue. | |
182 | * | |
183 | * Look at current nfs client by the way... | |
8917a3c0 | 184 | * However, this function was correct in any case. 8) |
1da177e4 | 185 | */ |
a59322be | 186 | unsigned long cpu_flags; |
4934b032 | 187 | struct sk_buff_head *queue = &sk->sk_receive_queue; |
39cc8613 | 188 | int _off = *off; |
a59322be | 189 | |
39cc8613 | 190 | last = (struct sk_buff *)queue; |
4934b032 | 191 | spin_lock_irqsave(&queue->lock, cpu_flags); |
3f518bf7 | 192 | skb_queue_walk(queue, skb) { |
39cc8613 | 193 | last = skb; |
a59322be HX |
194 | *peeked = skb->peeked; |
195 | if (flags & MSG_PEEK) { | |
39cc8613 | 196 | if (_off >= skb->len && (skb->len || _off || |
add05ad4 | 197 | skb->peeked)) { |
39cc8613 | 198 | _off -= skb->len; |
3f518bf7 PE |
199 | continue; |
200 | } | |
a59322be | 201 | skb->peeked = 1; |
1da177e4 | 202 | atomic_inc(&skb->users); |
a59322be | 203 | } else |
4934b032 | 204 | __skb_unlink(skb, queue); |
1da177e4 | 205 | |
3f518bf7 | 206 | spin_unlock_irqrestore(&queue->lock, cpu_flags); |
39cc8613 | 207 | *off = _off; |
1da177e4 | 208 | return skb; |
3f518bf7 PE |
209 | } |
210 | spin_unlock_irqrestore(&queue->lock, cpu_flags); | |
1da177e4 | 211 | |
cbf55001 ET |
212 | if (sk_can_busy_loop(sk) && |
213 | sk_busy_loop(sk, flags & MSG_DONTWAIT)) | |
a5b50476 ET |
214 | continue; |
215 | ||
1da177e4 LT |
216 | /* User doesn't want to wait */ |
217 | error = -EAGAIN; | |
218 | if (!timeo) | |
219 | goto no_packet; | |
220 | ||
39cc8613 | 221 | } while (!wait_for_more_packets(sk, err, &timeo, last)); |
1da177e4 LT |
222 | |
223 | return NULL; | |
224 | ||
225 | no_packet: | |
226 | *err = error; | |
227 | return NULL; | |
228 | } | |
a59322be HX |
229 | EXPORT_SYMBOL(__skb_recv_datagram); |
230 | ||
95c96174 | 231 | struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, |
a59322be HX |
232 | int noblock, int *err) |
233 | { | |
3f518bf7 | 234 | int peeked, off = 0; |
a59322be HX |
235 | |
236 | return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), | |
3f518bf7 | 237 | &peeked, &off, err); |
a59322be | 238 | } |
9e34a5b5 | 239 | EXPORT_SYMBOL(skb_recv_datagram); |
1da177e4 LT |
240 | |
241 | void skb_free_datagram(struct sock *sk, struct sk_buff *skb) | |
242 | { | |
ead2ceb0 | 243 | consume_skb(skb); |
270acefa | 244 | sk_mem_reclaim_partial(sk); |
1da177e4 | 245 | } |
9d410c79 ED |
246 | EXPORT_SYMBOL(skb_free_datagram); |
247 | ||
248 | void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) | |
249 | { | |
8a74ad60 ED |
250 | bool slow; |
251 | ||
93bb64ea ED |
252 | if (likely(atomic_read(&skb->users) == 1)) |
253 | smp_rmb(); | |
254 | else if (likely(!atomic_dec_and_test(&skb->users))) | |
255 | return; | |
256 | ||
8a74ad60 | 257 | slow = lock_sock_fast(sk); |
4b0b72f7 ED |
258 | skb_orphan(skb); |
259 | sk_mem_reclaim_partial(sk); | |
8a74ad60 | 260 | unlock_sock_fast(sk, slow); |
4b0b72f7 | 261 | |
93bb64ea ED |
262 | /* skb is now orphaned, can be freed outside of locked section */ |
263 | __kfree_skb(skb); | |
9d410c79 ED |
264 | } |
265 | EXPORT_SYMBOL(skb_free_datagram_locked); | |
1da177e4 | 266 | |
3305b80c HX |
267 | /** |
268 | * skb_kill_datagram - Free a datagram skbuff forcibly | |
269 | * @sk: socket | |
270 | * @skb: datagram skbuff | |
271 | * @flags: MSG_ flags | |
272 | * | |
273 | * This function frees a datagram skbuff that was received by | |
274 | * skb_recv_datagram. The flags argument must match the one | |
275 | * used for skb_recv_datagram. | |
276 | * | |
277 | * If the MSG_PEEK flag is set, and the packet is still on the | |
278 | * receive queue of the socket, it will be taken off the queue | |
279 | * before it is freed. | |
280 | * | |
281 | * This function currently only disables BH when acquiring the | |
282 | * sk_receive_queue lock. Therefore it must not be used in a | |
283 | * context where that lock is acquired in an IRQ context. | |
27ab2568 HX |
284 | * |
285 | * It returns 0 if the packet was removed by us. | |
3305b80c HX |
286 | */ |
287 | ||
27ab2568 | 288 | int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) |
3305b80c | 289 | { |
27ab2568 HX |
290 | int err = 0; |
291 | ||
3305b80c | 292 | if (flags & MSG_PEEK) { |
27ab2568 | 293 | err = -ENOENT; |
3305b80c HX |
294 | spin_lock_bh(&sk->sk_receive_queue.lock); |
295 | if (skb == skb_peek(&sk->sk_receive_queue)) { | |
296 | __skb_unlink(skb, &sk->sk_receive_queue); | |
297 | atomic_dec(&skb->users); | |
27ab2568 | 298 | err = 0; |
3305b80c HX |
299 | } |
300 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
301 | } | |
302 | ||
61de71c6 | 303 | kfree_skb(skb); |
8edf19c2 | 304 | atomic_inc(&sk->sk_drops); |
61de71c6 JD |
305 | sk_mem_reclaim_partial(sk); |
306 | ||
27ab2568 | 307 | return err; |
3305b80c | 308 | } |
3305b80c HX |
309 | EXPORT_SYMBOL(skb_kill_datagram); |
310 | ||
1da177e4 LT |
311 | /** |
312 | * skb_copy_datagram_iovec - Copy a datagram to an iovec. | |
4dc3b16b PP |
313 | * @skb: buffer to copy |
314 | * @offset: offset in the buffer to start copying from | |
67be2dd1 | 315 | * @to: io vector to copy to |
4dc3b16b | 316 | * @len: amount of data to copy from buffer to iovec |
1da177e4 LT |
317 | * |
318 | * Note: the iovec is modified during the copy. | |
319 | */ | |
320 | int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, | |
321 | struct iovec *to, int len) | |
322 | { | |
1a028e50 DM |
323 | int start = skb_headlen(skb); |
324 | int i, copy = start - offset; | |
5b1a002a | 325 | struct sk_buff *frag_iter; |
c75d721c | 326 | |
e9b3cc1b NH |
327 | trace_skb_copy_datagram_iovec(skb, len); |
328 | ||
b4d9eda0 DM |
329 | /* Copy header. */ |
330 | if (copy > 0) { | |
331 | if (copy > len) | |
332 | copy = len; | |
333 | if (memcpy_toiovec(to, skb->data + offset, copy)) | |
334 | goto fault; | |
335 | if ((len -= copy) == 0) | |
336 | return 0; | |
337 | offset += copy; | |
338 | } | |
c75d721c | 339 | |
b4d9eda0 DM |
340 | /* Copy paged appendix. Hmm... why does this look so complicated? */ |
341 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1a028e50 | 342 | int end; |
9e903e08 | 343 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1da177e4 | 344 | |
547b792c | 345 | WARN_ON(start > offset + len); |
1a028e50 | 346 | |
9e903e08 | 347 | end = start + skb_frag_size(frag); |
b4d9eda0 DM |
348 | if ((copy = end - offset) > 0) { |
349 | int err; | |
350 | u8 *vaddr; | |
ea2ab693 | 351 | struct page *page = skb_frag_page(frag); |
1da177e4 LT |
352 | |
353 | if (copy > len) | |
354 | copy = len; | |
b4d9eda0 | 355 | vaddr = kmap(page); |
1a028e50 DM |
356 | err = memcpy_toiovec(to, vaddr + frag->page_offset + |
357 | offset - start, copy); | |
b4d9eda0 | 358 | kunmap(page); |
1da177e4 LT |
359 | if (err) |
360 | goto fault; | |
361 | if (!(len -= copy)) | |
362 | return 0; | |
363 | offset += copy; | |
364 | } | |
1a028e50 | 365 | start = end; |
1da177e4 | 366 | } |
b4d9eda0 | 367 | |
5b1a002a DM |
368 | skb_walk_frags(skb, frag_iter) { |
369 | int end; | |
370 | ||
371 | WARN_ON(start > offset + len); | |
372 | ||
373 | end = start + frag_iter->len; | |
374 | if ((copy = end - offset) > 0) { | |
375 | if (copy > len) | |
376 | copy = len; | |
377 | if (skb_copy_datagram_iovec(frag_iter, | |
378 | offset - start, | |
379 | to, copy)) | |
380 | goto fault; | |
381 | if ((len -= copy) == 0) | |
382 | return 0; | |
383 | offset += copy; | |
b4d9eda0 | 384 | } |
5b1a002a | 385 | start = end; |
1da177e4 | 386 | } |
b4d9eda0 DM |
387 | if (!len) |
388 | return 0; | |
389 | ||
1da177e4 LT |
390 | fault: |
391 | return -EFAULT; | |
392 | } | |
9e34a5b5 | 393 | EXPORT_SYMBOL(skb_copy_datagram_iovec); |
1da177e4 | 394 | |
0a1ec07a MT |
395 | /** |
396 | * skb_copy_datagram_const_iovec - Copy a datagram to an iovec. | |
397 | * @skb: buffer to copy | |
398 | * @offset: offset in the buffer to start copying from | |
399 | * @to: io vector to copy to | |
400 | * @to_offset: offset in the io vector to start copying to | |
401 | * @len: amount of data to copy from buffer to iovec | |
402 | * | |
403 | * Returns 0 or -EFAULT. | |
404 | * Note: the iovec is not modified during the copy. | |
405 | */ | |
406 | int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset, | |
407 | const struct iovec *to, int to_offset, | |
408 | int len) | |
409 | { | |
410 | int start = skb_headlen(skb); | |
411 | int i, copy = start - offset; | |
5b1a002a | 412 | struct sk_buff *frag_iter; |
0a1ec07a MT |
413 | |
414 | /* Copy header. */ | |
415 | if (copy > 0) { | |
416 | if (copy > len) | |
417 | copy = len; | |
418 | if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy)) | |
419 | goto fault; | |
420 | if ((len -= copy) == 0) | |
421 | return 0; | |
422 | offset += copy; | |
423 | to_offset += copy; | |
424 | } | |
425 | ||
426 | /* Copy paged appendix. Hmm... why does this look so complicated? */ | |
427 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
428 | int end; | |
9e903e08 | 429 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
0a1ec07a MT |
430 | |
431 | WARN_ON(start > offset + len); | |
432 | ||
9e903e08 | 433 | end = start + skb_frag_size(frag); |
0a1ec07a MT |
434 | if ((copy = end - offset) > 0) { |
435 | int err; | |
436 | u8 *vaddr; | |
ea2ab693 | 437 | struct page *page = skb_frag_page(frag); |
0a1ec07a MT |
438 | |
439 | if (copy > len) | |
440 | copy = len; | |
441 | vaddr = kmap(page); | |
442 | err = memcpy_toiovecend(to, vaddr + frag->page_offset + | |
443 | offset - start, to_offset, copy); | |
444 | kunmap(page); | |
445 | if (err) | |
446 | goto fault; | |
447 | if (!(len -= copy)) | |
448 | return 0; | |
449 | offset += copy; | |
450 | to_offset += copy; | |
451 | } | |
452 | start = end; | |
453 | } | |
454 | ||
5b1a002a DM |
455 | skb_walk_frags(skb, frag_iter) { |
456 | int end; | |
457 | ||
458 | WARN_ON(start > offset + len); | |
459 | ||
460 | end = start + frag_iter->len; | |
461 | if ((copy = end - offset) > 0) { | |
462 | if (copy > len) | |
463 | copy = len; | |
464 | if (skb_copy_datagram_const_iovec(frag_iter, | |
465 | offset - start, | |
466 | to, to_offset, | |
467 | copy)) | |
468 | goto fault; | |
469 | if ((len -= copy) == 0) | |
470 | return 0; | |
471 | offset += copy; | |
472 | to_offset += copy; | |
0a1ec07a | 473 | } |
5b1a002a | 474 | start = end; |
0a1ec07a MT |
475 | } |
476 | if (!len) | |
477 | return 0; | |
478 | ||
479 | fault: | |
480 | return -EFAULT; | |
481 | } | |
482 | EXPORT_SYMBOL(skb_copy_datagram_const_iovec); | |
483 | ||
db543c1f RR |
484 | /** |
485 | * skb_copy_datagram_from_iovec - Copy a datagram from an iovec. | |
486 | * @skb: buffer to copy | |
487 | * @offset: offset in the buffer to start copying to | |
488 | * @from: io vector to copy to | |
6f26c9a7 | 489 | * @from_offset: offset in the io vector to start copying from |
db543c1f RR |
490 | * @len: amount of data to copy to buffer from iovec |
491 | * | |
492 | * Returns 0 or -EFAULT. | |
6f26c9a7 | 493 | * Note: the iovec is not modified during the copy. |
db543c1f RR |
494 | */ |
495 | int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, | |
6f26c9a7 MT |
496 | const struct iovec *from, int from_offset, |
497 | int len) | |
db543c1f RR |
498 | { |
499 | int start = skb_headlen(skb); | |
500 | int i, copy = start - offset; | |
5b1a002a | 501 | struct sk_buff *frag_iter; |
db543c1f RR |
502 | |
503 | /* Copy header. */ | |
504 | if (copy > 0) { | |
505 | if (copy > len) | |
506 | copy = len; | |
d2d27bfd SS |
507 | if (memcpy_fromiovecend(skb->data + offset, from, from_offset, |
508 | copy)) | |
db543c1f RR |
509 | goto fault; |
510 | if ((len -= copy) == 0) | |
511 | return 0; | |
512 | offset += copy; | |
6f26c9a7 | 513 | from_offset += copy; |
db543c1f RR |
514 | } |
515 | ||
516 | /* Copy paged appendix. Hmm... why does this look so complicated? */ | |
517 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
518 | int end; | |
9e903e08 | 519 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
db543c1f RR |
520 | |
521 | WARN_ON(start > offset + len); | |
522 | ||
9e903e08 | 523 | end = start + skb_frag_size(frag); |
db543c1f RR |
524 | if ((copy = end - offset) > 0) { |
525 | int err; | |
526 | u8 *vaddr; | |
ea2ab693 | 527 | struct page *page = skb_frag_page(frag); |
db543c1f RR |
528 | |
529 | if (copy > len) | |
530 | copy = len; | |
531 | vaddr = kmap(page); | |
6f26c9a7 MT |
532 | err = memcpy_fromiovecend(vaddr + frag->page_offset + |
533 | offset - start, | |
534 | from, from_offset, copy); | |
db543c1f RR |
535 | kunmap(page); |
536 | if (err) | |
537 | goto fault; | |
538 | ||
539 | if (!(len -= copy)) | |
540 | return 0; | |
541 | offset += copy; | |
6f26c9a7 | 542 | from_offset += copy; |
db543c1f RR |
543 | } |
544 | start = end; | |
545 | } | |
546 | ||
5b1a002a DM |
547 | skb_walk_frags(skb, frag_iter) { |
548 | int end; | |
549 | ||
550 | WARN_ON(start > offset + len); | |
551 | ||
552 | end = start + frag_iter->len; | |
553 | if ((copy = end - offset) > 0) { | |
554 | if (copy > len) | |
555 | copy = len; | |
556 | if (skb_copy_datagram_from_iovec(frag_iter, | |
557 | offset - start, | |
558 | from, | |
559 | from_offset, | |
560 | copy)) | |
561 | goto fault; | |
562 | if ((len -= copy) == 0) | |
563 | return 0; | |
564 | offset += copy; | |
565 | from_offset += copy; | |
db543c1f | 566 | } |
5b1a002a | 567 | start = end; |
db543c1f RR |
568 | } |
569 | if (!len) | |
570 | return 0; | |
571 | ||
572 | fault: | |
573 | return -EFAULT; | |
574 | } | |
575 | EXPORT_SYMBOL(skb_copy_datagram_from_iovec); | |
576 | ||
c3bdeb5c JW |
577 | /** |
578 | * zerocopy_sg_from_iovec - Build a zerocopy datagram from an iovec | |
579 | * @skb: buffer to copy | |
c4e819d1 | 580 | * @from: io vector to copy from |
c3bdeb5c JW |
581 | * @offset: offset in the io vector to start copying from |
582 | * @count: amount of vectors to copy to buffer from | |
583 | * | |
584 | * The function will first copy up to headlen, and then pin the userspace | |
585 | * pages and build frags through them. | |
586 | * | |
587 | * Returns 0, -EFAULT or -EMSGSIZE. | |
588 | * Note: the iovec is not modified during the copy | |
589 | */ | |
590 | int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, | |
591 | int offset, size_t count) | |
592 | { | |
593 | int len = iov_length(from, count) - offset; | |
3d9953a2 JW |
594 | int copy = min_t(int, skb_headlen(skb), len); |
595 | int size; | |
c3bdeb5c JW |
596 | int i = 0; |
597 | ||
c3bdeb5c | 598 | /* copy up to skb headlen */ |
3d9953a2 JW |
599 | if (skb_copy_datagram_from_iovec(skb, 0, from, offset, copy)) |
600 | return -EFAULT; | |
c3bdeb5c | 601 | |
3d9953a2 | 602 | if (len == copy) |
c3bdeb5c JW |
603 | return 0; |
604 | ||
3d9953a2 | 605 | offset += copy; |
c3bdeb5c JW |
606 | while (count--) { |
607 | struct page *page[MAX_SKB_FRAGS]; | |
608 | int num_pages; | |
609 | unsigned long base; | |
610 | unsigned long truesize; | |
611 | ||
3d9953a2 JW |
612 | /* Skip over from offset and copied */ |
613 | if (offset >= from->iov_len) { | |
614 | offset -= from->iov_len; | |
c3bdeb5c JW |
615 | ++from; |
616 | continue; | |
617 | } | |
3d9953a2 | 618 | len = from->iov_len - offset; |
c3bdeb5c JW |
619 | base = (unsigned long)from->iov_base + offset; |
620 | size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; | |
621 | if (i + size > MAX_SKB_FRAGS) | |
622 | return -EMSGSIZE; | |
623 | num_pages = get_user_pages_fast(base, size, 0, &page[i]); | |
624 | if (num_pages != size) { | |
0433547a | 625 | release_pages(&page[i], num_pages, 0); |
c3bdeb5c JW |
626 | return -EFAULT; |
627 | } | |
628 | truesize = size * PAGE_SIZE; | |
629 | skb->data_len += len; | |
630 | skb->len += len; | |
631 | skb->truesize += truesize; | |
632 | atomic_add(truesize, &skb->sk->sk_wmem_alloc); | |
633 | while (len) { | |
634 | int off = base & ~PAGE_MASK; | |
635 | int size = min_t(int, len, PAGE_SIZE - off); | |
0a57ec62 | 636 | skb_fill_page_desc(skb, i, page[i], off, size); |
c3bdeb5c JW |
637 | base += size; |
638 | len -= size; | |
639 | i++; | |
640 | } | |
641 | offset = 0; | |
642 | ++from; | |
643 | } | |
644 | return 0; | |
645 | } | |
646 | EXPORT_SYMBOL(zerocopy_sg_from_iovec); | |
647 | ||
1da177e4 LT |
648 | static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, |
649 | u8 __user *to, int len, | |
5084205f | 650 | __wsum *csump) |
1da177e4 | 651 | { |
1a028e50 | 652 | int start = skb_headlen(skb); |
1a028e50 | 653 | int i, copy = start - offset; |
5b1a002a DM |
654 | struct sk_buff *frag_iter; |
655 | int pos = 0; | |
1da177e4 LT |
656 | |
657 | /* Copy header. */ | |
658 | if (copy > 0) { | |
659 | int err = 0; | |
660 | if (copy > len) | |
661 | copy = len; | |
662 | *csump = csum_and_copy_to_user(skb->data + offset, to, copy, | |
663 | *csump, &err); | |
664 | if (err) | |
665 | goto fault; | |
666 | if ((len -= copy) == 0) | |
667 | return 0; | |
668 | offset += copy; | |
669 | to += copy; | |
670 | pos = copy; | |
671 | } | |
672 | ||
673 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1a028e50 | 674 | int end; |
9e903e08 | 675 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1da177e4 | 676 | |
547b792c | 677 | WARN_ON(start > offset + len); |
1a028e50 | 678 | |
9e903e08 | 679 | end = start + skb_frag_size(frag); |
1da177e4 | 680 | if ((copy = end - offset) > 0) { |
5084205f | 681 | __wsum csum2; |
1da177e4 LT |
682 | int err = 0; |
683 | u8 *vaddr; | |
ea2ab693 | 684 | struct page *page = skb_frag_page(frag); |
1da177e4 LT |
685 | |
686 | if (copy > len) | |
687 | copy = len; | |
688 | vaddr = kmap(page); | |
689 | csum2 = csum_and_copy_to_user(vaddr + | |
1a028e50 DM |
690 | frag->page_offset + |
691 | offset - start, | |
1da177e4 LT |
692 | to, copy, 0, &err); |
693 | kunmap(page); | |
694 | if (err) | |
695 | goto fault; | |
696 | *csump = csum_block_add(*csump, csum2, pos); | |
697 | if (!(len -= copy)) | |
698 | return 0; | |
699 | offset += copy; | |
700 | to += copy; | |
701 | pos += copy; | |
702 | } | |
1a028e50 | 703 | start = end; |
1da177e4 LT |
704 | } |
705 | ||
5b1a002a DM |
706 | skb_walk_frags(skb, frag_iter) { |
707 | int end; | |
708 | ||
709 | WARN_ON(start > offset + len); | |
710 | ||
711 | end = start + frag_iter->len; | |
712 | if ((copy = end - offset) > 0) { | |
713 | __wsum csum2 = 0; | |
714 | if (copy > len) | |
715 | copy = len; | |
716 | if (skb_copy_and_csum_datagram(frag_iter, | |
717 | offset - start, | |
718 | to, copy, | |
719 | &csum2)) | |
720 | goto fault; | |
721 | *csump = csum_block_add(*csump, csum2, pos); | |
722 | if ((len -= copy) == 0) | |
723 | return 0; | |
724 | offset += copy; | |
725 | to += copy; | |
726 | pos += copy; | |
1da177e4 | 727 | } |
5b1a002a | 728 | start = end; |
1da177e4 LT |
729 | } |
730 | if (!len) | |
731 | return 0; | |
732 | ||
733 | fault: | |
734 | return -EFAULT; | |
735 | } | |
736 | ||
759e5d00 | 737 | __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) |
fb286bb2 | 738 | { |
d3bc23e7 | 739 | __sum16 sum; |
fb286bb2 | 740 | |
759e5d00 | 741 | sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); |
46fb51eb TH |
742 | if (likely(!sum)) { |
743 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && | |
744 | !skb->csum_complete_sw) | |
745 | netdev_rx_csum_fault(skb->dev); | |
746 | } | |
747 | skb->csum_valid = !sum; | |
fb286bb2 HX |
748 | return sum; |
749 | } | |
759e5d00 HX |
750 | EXPORT_SYMBOL(__skb_checksum_complete_head); |
751 | ||
752 | __sum16 __skb_checksum_complete(struct sk_buff *skb) | |
753 | { | |
46fb51eb TH |
754 | __wsum csum; |
755 | __sum16 sum; | |
756 | ||
757 | csum = skb_checksum(skb, 0, skb->len, 0); | |
758 | ||
759 | /* skb->csum holds pseudo checksum */ | |
760 | sum = csum_fold(csum_add(skb->csum, csum)); | |
761 | if (likely(!sum)) { | |
762 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && | |
763 | !skb->csum_complete_sw) | |
764 | netdev_rx_csum_fault(skb->dev); | |
765 | } | |
766 | ||
767 | /* Save full packet checksum */ | |
768 | skb->csum = csum; | |
769 | skb->ip_summed = CHECKSUM_COMPLETE; | |
770 | skb->csum_complete_sw = 1; | |
771 | skb->csum_valid = !sum; | |
772 | ||
773 | return sum; | |
759e5d00 | 774 | } |
fb286bb2 HX |
775 | EXPORT_SYMBOL(__skb_checksum_complete); |
776 | ||
1da177e4 | 777 | /** |
e793c0f7 | 778 | * skb_copy_and_csum_datagram_iovec - Copy and checksum skb to user iovec. |
4dc3b16b PP |
779 | * @skb: skbuff |
780 | * @hlen: hardware length | |
67be2dd1 | 781 | * @iov: io vector |
4ec93edb | 782 | * |
1da177e4 LT |
783 | * Caller _must_ check that skb will fit to this iovec. |
784 | * | |
785 | * Returns: 0 - success. | |
786 | * -EINVAL - checksum failure. | |
787 | * -EFAULT - fault during copy. Beware, in this case iovec | |
788 | * can be modified! | |
789 | */ | |
fb286bb2 | 790 | int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, |
1da177e4 LT |
791 | int hlen, struct iovec *iov) |
792 | { | |
d3bc23e7 | 793 | __wsum csum; |
1da177e4 LT |
794 | int chunk = skb->len - hlen; |
795 | ||
ef8aef55 HX |
796 | if (!chunk) |
797 | return 0; | |
798 | ||
1da177e4 LT |
799 | /* Skip filled elements. |
800 | * Pretty silly, look at memcpy_toiovec, though 8) | |
801 | */ | |
802 | while (!iov->iov_len) | |
803 | iov++; | |
804 | ||
805 | if (iov->iov_len < chunk) { | |
fb286bb2 | 806 | if (__skb_checksum_complete(skb)) |
1da177e4 LT |
807 | goto csum_error; |
808 | if (skb_copy_datagram_iovec(skb, hlen, iov, chunk)) | |
809 | goto fault; | |
810 | } else { | |
811 | csum = csum_partial(skb->data, hlen, skb->csum); | |
812 | if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, | |
813 | chunk, &csum)) | |
814 | goto fault; | |
d3bc23e7 | 815 | if (csum_fold(csum)) |
1da177e4 | 816 | goto csum_error; |
84fa7933 | 817 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
fb286bb2 | 818 | netdev_rx_csum_fault(skb->dev); |
1da177e4 LT |
819 | iov->iov_len -= chunk; |
820 | iov->iov_base += chunk; | |
821 | } | |
822 | return 0; | |
823 | csum_error: | |
824 | return -EINVAL; | |
825 | fault: | |
826 | return -EFAULT; | |
827 | } | |
9e34a5b5 | 828 | EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec); |
1da177e4 LT |
829 | |
830 | /** | |
831 | * datagram_poll - generic datagram poll | |
4dc3b16b PP |
832 | * @file: file struct |
833 | * @sock: socket | |
834 | * @wait: poll table | |
1da177e4 LT |
835 | * |
836 | * Datagram poll: Again totally generic. This also handles | |
837 | * sequenced packet sockets providing the socket receive queue | |
838 | * is only ever holding data ready to receive. | |
839 | * | |
840 | * Note: when you _don't_ use this routine for this protocol, | |
841 | * and you use a different write policy from sock_writeable() | |
842 | * then please supply your own write_space callback. | |
843 | */ | |
844 | unsigned int datagram_poll(struct file *file, struct socket *sock, | |
845 | poll_table *wait) | |
846 | { | |
847 | struct sock *sk = sock->sk; | |
848 | unsigned int mask; | |
849 | ||
aa395145 | 850 | sock_poll_wait(file, sk_sleep(sk), wait); |
1da177e4 LT |
851 | mask = 0; |
852 | ||
853 | /* exceptional events? */ | |
854 | if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) | |
7d4c04fc | 855 | mask |= POLLERR | |
8facd5fb | 856 | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); |
7d4c04fc | 857 | |
f348d70a | 858 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
db40980f | 859 | mask |= POLLRDHUP | POLLIN | POLLRDNORM; |
1da177e4 LT |
860 | if (sk->sk_shutdown == SHUTDOWN_MASK) |
861 | mask |= POLLHUP; | |
862 | ||
863 | /* readable? */ | |
db40980f | 864 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
1da177e4 LT |
865 | mask |= POLLIN | POLLRDNORM; |
866 | ||
867 | /* Connection-based need to check for termination and startup */ | |
868 | if (connection_based(sk)) { | |
869 | if (sk->sk_state == TCP_CLOSE) | |
870 | mask |= POLLHUP; | |
871 | /* connection hasn't started yet? */ | |
872 | if (sk->sk_state == TCP_SYN_SENT) | |
873 | return mask; | |
874 | } | |
875 | ||
876 | /* writable? */ | |
877 | if (sock_writeable(sk)) | |
878 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | |
879 | else | |
880 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | |
881 | ||
882 | return mask; | |
883 | } | |
1da177e4 | 884 | EXPORT_SYMBOL(datagram_poll); |