]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv4/ip_sockglue.c
Merge remote-tracking branches 'spi/topic/atmel', 'spi/topic/cadence', 'spi/topic...
[mirror_ubuntu-artful-kernel.git] / net / ipv4 / ip_sockglue.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The IP to API glue.
7 *
8 * Authors: see ip.c
9 *
10 * Fixes:
11 * Many : Split from ip.c , see ip.c for history.
12 * Martin Mares : TOS setting fixed.
13 * Alan Cox : Fixed a couple of oopses in Martin's
14 * TOS tweaks.
15 * Mike McLagan : Routing by source
16 */
17
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/mm.h>
21 #include <linux/skbuff.h>
22 #include <linux/ip.h>
23 #include <linux/icmp.h>
24 #include <linux/inetdevice.h>
25 #include <linux/netdevice.h>
26 #include <linux/slab.h>
27 #include <net/sock.h>
28 #include <net/ip.h>
29 #include <net/icmp.h>
30 #include <net/tcp_states.h>
31 #include <linux/udp.h>
32 #include <linux/igmp.h>
33 #include <linux/netfilter.h>
34 #include <linux/route.h>
35 #include <linux/mroute.h>
36 #include <net/inet_ecn.h>
37 #include <net/route.h>
38 #include <net/xfrm.h>
39 #include <net/compat.h>
40 #if IS_ENABLED(CONFIG_IPV6)
41 #include <net/transp_v6.h>
42 #endif
43 #include <net/ip_fib.h>
44
45 #include <linux/errqueue.h>
46 #include <asm/uaccess.h>
47
48 #define IP_CMSG_PKTINFO 1
49 #define IP_CMSG_TTL 2
50 #define IP_CMSG_TOS 4
51 #define IP_CMSG_RECVOPTS 8
52 #define IP_CMSG_RETOPTS 16
53 #define IP_CMSG_PASSSEC 32
54 #define IP_CMSG_ORIGDSTADDR 64
55
56 /*
57 * SOL_IP control messages.
58 */
59
60 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
61 {
62 struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
63
64 info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
65
66 put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
67 }
68
69 static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
70 {
71 int ttl = ip_hdr(skb)->ttl;
72 put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
73 }
74
75 static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
76 {
77 put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
78 }
79
80 static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
81 {
82 if (IPCB(skb)->opt.optlen == 0)
83 return;
84
85 put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
86 ip_hdr(skb) + 1);
87 }
88
89
90 static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
91 {
92 unsigned char optbuf[sizeof(struct ip_options) + 40];
93 struct ip_options *opt = (struct ip_options *)optbuf;
94
95 if (IPCB(skb)->opt.optlen == 0)
96 return;
97
98 if (ip_options_echo(opt, skb)) {
99 msg->msg_flags |= MSG_CTRUNC;
100 return;
101 }
102 ip_options_undo(opt);
103
104 put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
105 }
106
107 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
108 {
109 char *secdata;
110 u32 seclen, secid;
111 int err;
112
113 err = security_socket_getpeersec_dgram(NULL, skb, &secid);
114 if (err)
115 return;
116
117 err = security_secid_to_secctx(secid, &secdata, &seclen);
118 if (err)
119 return;
120
121 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
122 security_release_secctx(secdata, seclen);
123 }
124
125 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
126 {
127 struct sockaddr_in sin;
128 const struct iphdr *iph = ip_hdr(skb);
129 __be16 *ports = (__be16 *)skb_transport_header(skb);
130
131 if (skb_transport_offset(skb) + 4 > skb->len)
132 return;
133
134 /* All current transport protocols have the port numbers in the
135 * first four bytes of the transport header and this function is
136 * written with this assumption in mind.
137 */
138
139 sin.sin_family = AF_INET;
140 sin.sin_addr.s_addr = iph->daddr;
141 sin.sin_port = ports[1];
142 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
143
144 put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
145 }
146
147 void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
148 {
149 struct inet_sock *inet = inet_sk(skb->sk);
150 unsigned int flags = inet->cmsg_flags;
151
152 /* Ordered by supposed usage frequency */
153 if (flags & 1)
154 ip_cmsg_recv_pktinfo(msg, skb);
155 if ((flags >>= 1) == 0)
156 return;
157
158 if (flags & 1)
159 ip_cmsg_recv_ttl(msg, skb);
160 if ((flags >>= 1) == 0)
161 return;
162
163 if (flags & 1)
164 ip_cmsg_recv_tos(msg, skb);
165 if ((flags >>= 1) == 0)
166 return;
167
168 if (flags & 1)
169 ip_cmsg_recv_opts(msg, skb);
170 if ((flags >>= 1) == 0)
171 return;
172
173 if (flags & 1)
174 ip_cmsg_recv_retopts(msg, skb);
175 if ((flags >>= 1) == 0)
176 return;
177
178 if (flags & 1)
179 ip_cmsg_recv_security(msg, skb);
180
181 if ((flags >>= 1) == 0)
182 return;
183 if (flags & 1)
184 ip_cmsg_recv_dstaddr(msg, skb);
185
186 }
187 EXPORT_SYMBOL(ip_cmsg_recv);
188
189 int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
190 bool allow_ipv6)
191 {
192 int err, val;
193 struct cmsghdr *cmsg;
194
195 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
196 if (!CMSG_OK(msg, cmsg))
197 return -EINVAL;
198 #if IS_ENABLED(CONFIG_IPV6)
199 if (allow_ipv6 &&
200 cmsg->cmsg_level == SOL_IPV6 &&
201 cmsg->cmsg_type == IPV6_PKTINFO) {
202 struct in6_pktinfo *src_info;
203
204 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
205 return -EINVAL;
206 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
207 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
208 return -EINVAL;
209 ipc->oif = src_info->ipi6_ifindex;
210 ipc->addr = src_info->ipi6_addr.s6_addr32[3];
211 continue;
212 }
213 #endif
214 if (cmsg->cmsg_level != SOL_IP)
215 continue;
216 switch (cmsg->cmsg_type) {
217 case IP_RETOPTS:
218 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
219 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
220 err < 40 ? err : 40);
221 if (err)
222 return err;
223 break;
224 case IP_PKTINFO:
225 {
226 struct in_pktinfo *info;
227 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
228 return -EINVAL;
229 info = (struct in_pktinfo *)CMSG_DATA(cmsg);
230 ipc->oif = info->ipi_ifindex;
231 ipc->addr = info->ipi_spec_dst.s_addr;
232 break;
233 }
234 case IP_TTL:
235 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
236 return -EINVAL;
237 val = *(int *)CMSG_DATA(cmsg);
238 if (val < 1 || val > 255)
239 return -EINVAL;
240 ipc->ttl = val;
241 break;
242 case IP_TOS:
243 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
244 return -EINVAL;
245 val = *(int *)CMSG_DATA(cmsg);
246 if (val < 0 || val > 255)
247 return -EINVAL;
248 ipc->tos = val;
249 ipc->priority = rt_tos2priority(ipc->tos);
250 break;
251
252 default:
253 return -EINVAL;
254 }
255 }
256 return 0;
257 }
258
259
260 /* Special input handler for packets caught by router alert option.
261 They are selected only by protocol field, and then processed likely
262 local ones; but only if someone wants them! Otherwise, router
263 not running rsvpd will kill RSVP.
264
265 It is user level problem, what it will make with them.
266 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
267 but receiver should be enough clever f.e. to forward mtrace requests,
268 sent to multicast group to reach destination designated router.
269 */
270 struct ip_ra_chain __rcu *ip_ra_chain;
271 static DEFINE_SPINLOCK(ip_ra_lock);
272
273
274 static void ip_ra_destroy_rcu(struct rcu_head *head)
275 {
276 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
277
278 sock_put(ra->saved_sk);
279 kfree(ra);
280 }
281
282 int ip_ra_control(struct sock *sk, unsigned char on,
283 void (*destructor)(struct sock *))
284 {
285 struct ip_ra_chain *ra, *new_ra;
286 struct ip_ra_chain __rcu **rap;
287
288 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
289 return -EINVAL;
290
291 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
292
293 spin_lock_bh(&ip_ra_lock);
294 for (rap = &ip_ra_chain;
295 (ra = rcu_dereference_protected(*rap,
296 lockdep_is_held(&ip_ra_lock))) != NULL;
297 rap = &ra->next) {
298 if (ra->sk == sk) {
299 if (on) {
300 spin_unlock_bh(&ip_ra_lock);
301 kfree(new_ra);
302 return -EADDRINUSE;
303 }
304 /* dont let ip_call_ra_chain() use sk again */
305 ra->sk = NULL;
306 RCU_INIT_POINTER(*rap, ra->next);
307 spin_unlock_bh(&ip_ra_lock);
308
309 if (ra->destructor)
310 ra->destructor(sk);
311 /*
312 * Delay sock_put(sk) and kfree(ra) after one rcu grace
313 * period. This guarantee ip_call_ra_chain() dont need
314 * to mess with socket refcounts.
315 */
316 ra->saved_sk = sk;
317 call_rcu(&ra->rcu, ip_ra_destroy_rcu);
318 return 0;
319 }
320 }
321 if (new_ra == NULL) {
322 spin_unlock_bh(&ip_ra_lock);
323 return -ENOBUFS;
324 }
325 new_ra->sk = sk;
326 new_ra->destructor = destructor;
327
328 RCU_INIT_POINTER(new_ra->next, ra);
329 rcu_assign_pointer(*rap, new_ra);
330 sock_hold(sk);
331 spin_unlock_bh(&ip_ra_lock);
332
333 return 0;
334 }
335
336 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
337 __be16 port, u32 info, u8 *payload)
338 {
339 struct sock_exterr_skb *serr;
340
341 skb = skb_clone(skb, GFP_ATOMIC);
342 if (!skb)
343 return;
344
345 serr = SKB_EXT_ERR(skb);
346 serr->ee.ee_errno = err;
347 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
348 serr->ee.ee_type = icmp_hdr(skb)->type;
349 serr->ee.ee_code = icmp_hdr(skb)->code;
350 serr->ee.ee_pad = 0;
351 serr->ee.ee_info = info;
352 serr->ee.ee_data = 0;
353 serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
354 skb_network_header(skb);
355 serr->port = port;
356
357 if (skb_pull(skb, payload - skb->data) != NULL) {
358 skb_reset_transport_header(skb);
359 if (sock_queue_err_skb(sk, skb) == 0)
360 return;
361 }
362 kfree_skb(skb);
363 }
364
365 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
366 {
367 struct inet_sock *inet = inet_sk(sk);
368 struct sock_exterr_skb *serr;
369 struct iphdr *iph;
370 struct sk_buff *skb;
371
372 if (!inet->recverr)
373 return;
374
375 skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
376 if (!skb)
377 return;
378
379 skb_put(skb, sizeof(struct iphdr));
380 skb_reset_network_header(skb);
381 iph = ip_hdr(skb);
382 iph->daddr = daddr;
383
384 serr = SKB_EXT_ERR(skb);
385 serr->ee.ee_errno = err;
386 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
387 serr->ee.ee_type = 0;
388 serr->ee.ee_code = 0;
389 serr->ee.ee_pad = 0;
390 serr->ee.ee_info = info;
391 serr->ee.ee_data = 0;
392 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
393 serr->port = port;
394
395 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
396 skb_reset_transport_header(skb);
397
398 if (sock_queue_err_skb(sk, skb))
399 kfree_skb(skb);
400 }
401
402 /*
403 * Handle MSG_ERRQUEUE
404 */
405 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
406 {
407 struct sock_exterr_skb *serr;
408 struct sk_buff *skb;
409 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
410 struct {
411 struct sock_extended_err ee;
412 struct sockaddr_in offender;
413 } errhdr;
414 int err;
415 int copied;
416
417 err = -EAGAIN;
418 skb = sock_dequeue_err_skb(sk);
419 if (skb == NULL)
420 goto out;
421
422 copied = skb->len;
423 if (copied > len) {
424 msg->msg_flags |= MSG_TRUNC;
425 copied = len;
426 }
427 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
428 if (err)
429 goto out_free_skb;
430
431 sock_recv_timestamp(msg, sk, skb);
432
433 serr = SKB_EXT_ERR(skb);
434
435 if (sin) {
436 sin->sin_family = AF_INET;
437 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
438 serr->addr_offset);
439 sin->sin_port = serr->port;
440 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
441 *addr_len = sizeof(*sin);
442 }
443
444 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
445 sin = &errhdr.offender;
446 sin->sin_family = AF_UNSPEC;
447 if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP) {
448 struct inet_sock *inet = inet_sk(sk);
449
450 sin->sin_family = AF_INET;
451 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
452 sin->sin_port = 0;
453 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
454 if (inet->cmsg_flags)
455 ip_cmsg_recv(msg, skb);
456 }
457
458 put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
459
460 /* Now we could try to dump offended packet options */
461
462 msg->msg_flags |= MSG_ERRQUEUE;
463 err = copied;
464
465 out_free_skb:
466 kfree_skb(skb);
467 out:
468 return err;
469 }
470
471
472 /*
473 * Socket option code for IP. This is the end of the line after any
474 * TCP,UDP etc options on an IP socket.
475 */
476
477 static int do_ip_setsockopt(struct sock *sk, int level,
478 int optname, char __user *optval, unsigned int optlen)
479 {
480 struct inet_sock *inet = inet_sk(sk);
481 int val = 0, err;
482
483 switch (optname) {
484 case IP_PKTINFO:
485 case IP_RECVTTL:
486 case IP_RECVOPTS:
487 case IP_RECVTOS:
488 case IP_RETOPTS:
489 case IP_TOS:
490 case IP_TTL:
491 case IP_HDRINCL:
492 case IP_MTU_DISCOVER:
493 case IP_RECVERR:
494 case IP_ROUTER_ALERT:
495 case IP_FREEBIND:
496 case IP_PASSSEC:
497 case IP_TRANSPARENT:
498 case IP_MINTTL:
499 case IP_NODEFRAG:
500 case IP_UNICAST_IF:
501 case IP_MULTICAST_TTL:
502 case IP_MULTICAST_ALL:
503 case IP_MULTICAST_LOOP:
504 case IP_RECVORIGDSTADDR:
505 if (optlen >= sizeof(int)) {
506 if (get_user(val, (int __user *) optval))
507 return -EFAULT;
508 } else if (optlen >= sizeof(char)) {
509 unsigned char ucval;
510
511 if (get_user(ucval, (unsigned char __user *) optval))
512 return -EFAULT;
513 val = (int) ucval;
514 }
515 }
516
517 /* If optlen==0, it is equivalent to val == 0 */
518
519 if (ip_mroute_opt(optname))
520 return ip_mroute_setsockopt(sk, optname, optval, optlen);
521
522 err = 0;
523 lock_sock(sk);
524
525 switch (optname) {
526 case IP_OPTIONS:
527 {
528 struct ip_options_rcu *old, *opt = NULL;
529
530 if (optlen > 40)
531 goto e_inval;
532 err = ip_options_get_from_user(sock_net(sk), &opt,
533 optval, optlen);
534 if (err)
535 break;
536 old = rcu_dereference_protected(inet->inet_opt,
537 sock_owned_by_user(sk));
538 if (inet->is_icsk) {
539 struct inet_connection_sock *icsk = inet_csk(sk);
540 #if IS_ENABLED(CONFIG_IPV6)
541 if (sk->sk_family == PF_INET ||
542 (!((1 << sk->sk_state) &
543 (TCPF_LISTEN | TCPF_CLOSE)) &&
544 inet->inet_daddr != LOOPBACK4_IPV6)) {
545 #endif
546 if (old)
547 icsk->icsk_ext_hdr_len -= old->opt.optlen;
548 if (opt)
549 icsk->icsk_ext_hdr_len += opt->opt.optlen;
550 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
551 #if IS_ENABLED(CONFIG_IPV6)
552 }
553 #endif
554 }
555 rcu_assign_pointer(inet->inet_opt, opt);
556 if (old)
557 kfree_rcu(old, rcu);
558 break;
559 }
560 case IP_PKTINFO:
561 if (val)
562 inet->cmsg_flags |= IP_CMSG_PKTINFO;
563 else
564 inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
565 break;
566 case IP_RECVTTL:
567 if (val)
568 inet->cmsg_flags |= IP_CMSG_TTL;
569 else
570 inet->cmsg_flags &= ~IP_CMSG_TTL;
571 break;
572 case IP_RECVTOS:
573 if (val)
574 inet->cmsg_flags |= IP_CMSG_TOS;
575 else
576 inet->cmsg_flags &= ~IP_CMSG_TOS;
577 break;
578 case IP_RECVOPTS:
579 if (val)
580 inet->cmsg_flags |= IP_CMSG_RECVOPTS;
581 else
582 inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
583 break;
584 case IP_RETOPTS:
585 if (val)
586 inet->cmsg_flags |= IP_CMSG_RETOPTS;
587 else
588 inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
589 break;
590 case IP_PASSSEC:
591 if (val)
592 inet->cmsg_flags |= IP_CMSG_PASSSEC;
593 else
594 inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
595 break;
596 case IP_RECVORIGDSTADDR:
597 if (val)
598 inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
599 else
600 inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
601 break;
602 case IP_TOS: /* This sets both TOS and Precedence */
603 if (sk->sk_type == SOCK_STREAM) {
604 val &= ~INET_ECN_MASK;
605 val |= inet->tos & INET_ECN_MASK;
606 }
607 if (inet->tos != val) {
608 inet->tos = val;
609 sk->sk_priority = rt_tos2priority(val);
610 sk_dst_reset(sk);
611 }
612 break;
613 case IP_TTL:
614 if (optlen < 1)
615 goto e_inval;
616 if (val != -1 && (val < 1 || val > 255))
617 goto e_inval;
618 inet->uc_ttl = val;
619 break;
620 case IP_HDRINCL:
621 if (sk->sk_type != SOCK_RAW) {
622 err = -ENOPROTOOPT;
623 break;
624 }
625 inet->hdrincl = val ? 1 : 0;
626 break;
627 case IP_NODEFRAG:
628 if (sk->sk_type != SOCK_RAW) {
629 err = -ENOPROTOOPT;
630 break;
631 }
632 inet->nodefrag = val ? 1 : 0;
633 break;
634 case IP_MTU_DISCOVER:
635 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
636 goto e_inval;
637 inet->pmtudisc = val;
638 break;
639 case IP_RECVERR:
640 inet->recverr = !!val;
641 if (!val)
642 skb_queue_purge(&sk->sk_error_queue);
643 break;
644 case IP_MULTICAST_TTL:
645 if (sk->sk_type == SOCK_STREAM)
646 goto e_inval;
647 if (optlen < 1)
648 goto e_inval;
649 if (val == -1)
650 val = 1;
651 if (val < 0 || val > 255)
652 goto e_inval;
653 inet->mc_ttl = val;
654 break;
655 case IP_MULTICAST_LOOP:
656 if (optlen < 1)
657 goto e_inval;
658 inet->mc_loop = !!val;
659 break;
660 case IP_UNICAST_IF:
661 {
662 struct net_device *dev = NULL;
663 int ifindex;
664
665 if (optlen != sizeof(int))
666 goto e_inval;
667
668 ifindex = (__force int)ntohl((__force __be32)val);
669 if (ifindex == 0) {
670 inet->uc_index = 0;
671 err = 0;
672 break;
673 }
674
675 dev = dev_get_by_index(sock_net(sk), ifindex);
676 err = -EADDRNOTAVAIL;
677 if (!dev)
678 break;
679 dev_put(dev);
680
681 err = -EINVAL;
682 if (sk->sk_bound_dev_if)
683 break;
684
685 inet->uc_index = ifindex;
686 err = 0;
687 break;
688 }
689 case IP_MULTICAST_IF:
690 {
691 struct ip_mreqn mreq;
692 struct net_device *dev = NULL;
693
694 if (sk->sk_type == SOCK_STREAM)
695 goto e_inval;
696 /*
697 * Check the arguments are allowable
698 */
699
700 if (optlen < sizeof(struct in_addr))
701 goto e_inval;
702
703 err = -EFAULT;
704 if (optlen >= sizeof(struct ip_mreqn)) {
705 if (copy_from_user(&mreq, optval, sizeof(mreq)))
706 break;
707 } else {
708 memset(&mreq, 0, sizeof(mreq));
709 if (optlen >= sizeof(struct ip_mreq)) {
710 if (copy_from_user(&mreq, optval,
711 sizeof(struct ip_mreq)))
712 break;
713 } else if (optlen >= sizeof(struct in_addr)) {
714 if (copy_from_user(&mreq.imr_address, optval,
715 sizeof(struct in_addr)))
716 break;
717 }
718 }
719
720 if (!mreq.imr_ifindex) {
721 if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
722 inet->mc_index = 0;
723 inet->mc_addr = 0;
724 err = 0;
725 break;
726 }
727 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
728 if (dev)
729 mreq.imr_ifindex = dev->ifindex;
730 } else
731 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
732
733
734 err = -EADDRNOTAVAIL;
735 if (!dev)
736 break;
737 dev_put(dev);
738
739 err = -EINVAL;
740 if (sk->sk_bound_dev_if &&
741 mreq.imr_ifindex != sk->sk_bound_dev_if)
742 break;
743
744 inet->mc_index = mreq.imr_ifindex;
745 inet->mc_addr = mreq.imr_address.s_addr;
746 err = 0;
747 break;
748 }
749
750 case IP_ADD_MEMBERSHIP:
751 case IP_DROP_MEMBERSHIP:
752 {
753 struct ip_mreqn mreq;
754
755 err = -EPROTO;
756 if (inet_sk(sk)->is_icsk)
757 break;
758
759 if (optlen < sizeof(struct ip_mreq))
760 goto e_inval;
761 err = -EFAULT;
762 if (optlen >= sizeof(struct ip_mreqn)) {
763 if (copy_from_user(&mreq, optval, sizeof(mreq)))
764 break;
765 } else {
766 memset(&mreq, 0, sizeof(mreq));
767 if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq)))
768 break;
769 }
770
771 if (optname == IP_ADD_MEMBERSHIP)
772 err = ip_mc_join_group(sk, &mreq);
773 else
774 err = ip_mc_leave_group(sk, &mreq);
775 break;
776 }
777 case IP_MSFILTER:
778 {
779 struct ip_msfilter *msf;
780
781 if (optlen < IP_MSFILTER_SIZE(0))
782 goto e_inval;
783 if (optlen > sysctl_optmem_max) {
784 err = -ENOBUFS;
785 break;
786 }
787 msf = kmalloc(optlen, GFP_KERNEL);
788 if (!msf) {
789 err = -ENOBUFS;
790 break;
791 }
792 err = -EFAULT;
793 if (copy_from_user(msf, optval, optlen)) {
794 kfree(msf);
795 break;
796 }
797 /* numsrc >= (1G-4) overflow in 32 bits */
798 if (msf->imsf_numsrc >= 0x3ffffffcU ||
799 msf->imsf_numsrc > sysctl_igmp_max_msf) {
800 kfree(msf);
801 err = -ENOBUFS;
802 break;
803 }
804 if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
805 kfree(msf);
806 err = -EINVAL;
807 break;
808 }
809 err = ip_mc_msfilter(sk, msf, 0);
810 kfree(msf);
811 break;
812 }
813 case IP_BLOCK_SOURCE:
814 case IP_UNBLOCK_SOURCE:
815 case IP_ADD_SOURCE_MEMBERSHIP:
816 case IP_DROP_SOURCE_MEMBERSHIP:
817 {
818 struct ip_mreq_source mreqs;
819 int omode, add;
820
821 if (optlen != sizeof(struct ip_mreq_source))
822 goto e_inval;
823 if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
824 err = -EFAULT;
825 break;
826 }
827 if (optname == IP_BLOCK_SOURCE) {
828 omode = MCAST_EXCLUDE;
829 add = 1;
830 } else if (optname == IP_UNBLOCK_SOURCE) {
831 omode = MCAST_EXCLUDE;
832 add = 0;
833 } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
834 struct ip_mreqn mreq;
835
836 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
837 mreq.imr_address.s_addr = mreqs.imr_interface;
838 mreq.imr_ifindex = 0;
839 err = ip_mc_join_group(sk, &mreq);
840 if (err && err != -EADDRINUSE)
841 break;
842 omode = MCAST_INCLUDE;
843 add = 1;
844 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
845 omode = MCAST_INCLUDE;
846 add = 0;
847 }
848 err = ip_mc_source(add, omode, sk, &mreqs, 0);
849 break;
850 }
851 case MCAST_JOIN_GROUP:
852 case MCAST_LEAVE_GROUP:
853 {
854 struct group_req greq;
855 struct sockaddr_in *psin;
856 struct ip_mreqn mreq;
857
858 if (optlen < sizeof(struct group_req))
859 goto e_inval;
860 err = -EFAULT;
861 if (copy_from_user(&greq, optval, sizeof(greq)))
862 break;
863 psin = (struct sockaddr_in *)&greq.gr_group;
864 if (psin->sin_family != AF_INET)
865 goto e_inval;
866 memset(&mreq, 0, sizeof(mreq));
867 mreq.imr_multiaddr = psin->sin_addr;
868 mreq.imr_ifindex = greq.gr_interface;
869
870 if (optname == MCAST_JOIN_GROUP)
871 err = ip_mc_join_group(sk, &mreq);
872 else
873 err = ip_mc_leave_group(sk, &mreq);
874 break;
875 }
876 case MCAST_JOIN_SOURCE_GROUP:
877 case MCAST_LEAVE_SOURCE_GROUP:
878 case MCAST_BLOCK_SOURCE:
879 case MCAST_UNBLOCK_SOURCE:
880 {
881 struct group_source_req greqs;
882 struct ip_mreq_source mreqs;
883 struct sockaddr_in *psin;
884 int omode, add;
885
886 if (optlen != sizeof(struct group_source_req))
887 goto e_inval;
888 if (copy_from_user(&greqs, optval, sizeof(greqs))) {
889 err = -EFAULT;
890 break;
891 }
892 if (greqs.gsr_group.ss_family != AF_INET ||
893 greqs.gsr_source.ss_family != AF_INET) {
894 err = -EADDRNOTAVAIL;
895 break;
896 }
897 psin = (struct sockaddr_in *)&greqs.gsr_group;
898 mreqs.imr_multiaddr = psin->sin_addr.s_addr;
899 psin = (struct sockaddr_in *)&greqs.gsr_source;
900 mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
901 mreqs.imr_interface = 0; /* use index for mc_source */
902
903 if (optname == MCAST_BLOCK_SOURCE) {
904 omode = MCAST_EXCLUDE;
905 add = 1;
906 } else if (optname == MCAST_UNBLOCK_SOURCE) {
907 omode = MCAST_EXCLUDE;
908 add = 0;
909 } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
910 struct ip_mreqn mreq;
911
912 psin = (struct sockaddr_in *)&greqs.gsr_group;
913 mreq.imr_multiaddr = psin->sin_addr;
914 mreq.imr_address.s_addr = 0;
915 mreq.imr_ifindex = greqs.gsr_interface;
916 err = ip_mc_join_group(sk, &mreq);
917 if (err && err != -EADDRINUSE)
918 break;
919 greqs.gsr_interface = mreq.imr_ifindex;
920 omode = MCAST_INCLUDE;
921 add = 1;
922 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
923 omode = MCAST_INCLUDE;
924 add = 0;
925 }
926 err = ip_mc_source(add, omode, sk, &mreqs,
927 greqs.gsr_interface);
928 break;
929 }
930 case MCAST_MSFILTER:
931 {
932 struct sockaddr_in *psin;
933 struct ip_msfilter *msf = NULL;
934 struct group_filter *gsf = NULL;
935 int msize, i, ifindex;
936
937 if (optlen < GROUP_FILTER_SIZE(0))
938 goto e_inval;
939 if (optlen > sysctl_optmem_max) {
940 err = -ENOBUFS;
941 break;
942 }
943 gsf = kmalloc(optlen, GFP_KERNEL);
944 if (!gsf) {
945 err = -ENOBUFS;
946 break;
947 }
948 err = -EFAULT;
949 if (copy_from_user(gsf, optval, optlen))
950 goto mc_msf_out;
951
952 /* numsrc >= (4G-140)/128 overflow in 32 bits */
953 if (gsf->gf_numsrc >= 0x1ffffff ||
954 gsf->gf_numsrc > sysctl_igmp_max_msf) {
955 err = -ENOBUFS;
956 goto mc_msf_out;
957 }
958 if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
959 err = -EINVAL;
960 goto mc_msf_out;
961 }
962 msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
963 msf = kmalloc(msize, GFP_KERNEL);
964 if (!msf) {
965 err = -ENOBUFS;
966 goto mc_msf_out;
967 }
968 ifindex = gsf->gf_interface;
969 psin = (struct sockaddr_in *)&gsf->gf_group;
970 if (psin->sin_family != AF_INET) {
971 err = -EADDRNOTAVAIL;
972 goto mc_msf_out;
973 }
974 msf->imsf_multiaddr = psin->sin_addr.s_addr;
975 msf->imsf_interface = 0;
976 msf->imsf_fmode = gsf->gf_fmode;
977 msf->imsf_numsrc = gsf->gf_numsrc;
978 err = -EADDRNOTAVAIL;
979 for (i = 0; i < gsf->gf_numsrc; ++i) {
980 psin = (struct sockaddr_in *)&gsf->gf_slist[i];
981
982 if (psin->sin_family != AF_INET)
983 goto mc_msf_out;
984 msf->imsf_slist[i] = psin->sin_addr.s_addr;
985 }
986 kfree(gsf);
987 gsf = NULL;
988
989 err = ip_mc_msfilter(sk, msf, ifindex);
990 mc_msf_out:
991 kfree(msf);
992 kfree(gsf);
993 break;
994 }
995 case IP_MULTICAST_ALL:
996 if (optlen < 1)
997 goto e_inval;
998 if (val != 0 && val != 1)
999 goto e_inval;
1000 inet->mc_all = val;
1001 break;
1002 case IP_ROUTER_ALERT:
1003 err = ip_ra_control(sk, val ? 1 : 0, NULL);
1004 break;
1005
1006 case IP_FREEBIND:
1007 if (optlen < 1)
1008 goto e_inval;
1009 inet->freebind = !!val;
1010 break;
1011
1012 case IP_IPSEC_POLICY:
1013 case IP_XFRM_POLICY:
1014 err = -EPERM;
1015 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1016 break;
1017 err = xfrm_user_policy(sk, optname, optval, optlen);
1018 break;
1019
1020 case IP_TRANSPARENT:
1021 if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1022 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1023 err = -EPERM;
1024 break;
1025 }
1026 if (optlen < 1)
1027 goto e_inval;
1028 inet->transparent = !!val;
1029 break;
1030
1031 case IP_MINTTL:
1032 if (optlen < 1)
1033 goto e_inval;
1034 if (val < 0 || val > 255)
1035 goto e_inval;
1036 inet->min_ttl = val;
1037 break;
1038
1039 default:
1040 err = -ENOPROTOOPT;
1041 break;
1042 }
1043 release_sock(sk);
1044 return err;
1045
1046 e_inval:
1047 release_sock(sk);
1048 return -EINVAL;
1049 }
1050
1051 /**
1052 * ipv4_pktinfo_prepare - transfert some info from rtable to skb
1053 * @sk: socket
1054 * @skb: buffer
1055 *
1056 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1057 * destination in skb->cb[] before dst drop.
1058 * This way, receiver doesn't make cache line misses to read rtable.
1059 */
1060 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
1061 {
1062 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
1063 bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) ||
1064 ipv6_sk_rxinfo(sk);
1065
1066 if (prepare && skb_rtable(skb)) {
1067 pktinfo->ipi_ifindex = inet_iif(skb);
1068 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1069 } else {
1070 pktinfo->ipi_ifindex = 0;
1071 pktinfo->ipi_spec_dst.s_addr = 0;
1072 }
1073 skb_dst_drop(skb);
1074 }
1075
1076 int ip_setsockopt(struct sock *sk, int level,
1077 int optname, char __user *optval, unsigned int optlen)
1078 {
1079 int err;
1080
1081 if (level != SOL_IP)
1082 return -ENOPROTOOPT;
1083
1084 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1085 #ifdef CONFIG_NETFILTER
1086 /* we need to exclude all possible ENOPROTOOPTs except default case */
1087 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1088 optname != IP_IPSEC_POLICY &&
1089 optname != IP_XFRM_POLICY &&
1090 !ip_mroute_opt(optname)) {
1091 lock_sock(sk);
1092 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
1093 release_sock(sk);
1094 }
1095 #endif
1096 return err;
1097 }
1098 EXPORT_SYMBOL(ip_setsockopt);
1099
1100 #ifdef CONFIG_COMPAT
1101 int compat_ip_setsockopt(struct sock *sk, int level, int optname,
1102 char __user *optval, unsigned int optlen)
1103 {
1104 int err;
1105
1106 if (level != SOL_IP)
1107 return -ENOPROTOOPT;
1108
1109 if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
1110 return compat_mc_setsockopt(sk, level, optname, optval, optlen,
1111 ip_setsockopt);
1112
1113 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1114 #ifdef CONFIG_NETFILTER
1115 /* we need to exclude all possible ENOPROTOOPTs except default case */
1116 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1117 optname != IP_IPSEC_POLICY &&
1118 optname != IP_XFRM_POLICY &&
1119 !ip_mroute_opt(optname)) {
1120 lock_sock(sk);
1121 err = compat_nf_setsockopt(sk, PF_INET, optname,
1122 optval, optlen);
1123 release_sock(sk);
1124 }
1125 #endif
1126 return err;
1127 }
1128 EXPORT_SYMBOL(compat_ip_setsockopt);
1129 #endif
1130
1131 /*
1132 * Get the options. Note for future reference. The GET of IP options gets
1133 * the _received_ ones. The set sets the _sent_ ones.
1134 */
1135
1136 static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1137 char __user *optval, int __user *optlen, unsigned int flags)
1138 {
1139 struct inet_sock *inet = inet_sk(sk);
1140 int val;
1141 int len;
1142
1143 if (level != SOL_IP)
1144 return -EOPNOTSUPP;
1145
1146 if (ip_mroute_opt(optname))
1147 return ip_mroute_getsockopt(sk, optname, optval, optlen);
1148
1149 if (get_user(len, optlen))
1150 return -EFAULT;
1151 if (len < 0)
1152 return -EINVAL;
1153
1154 lock_sock(sk);
1155
1156 switch (optname) {
1157 case IP_OPTIONS:
1158 {
1159 unsigned char optbuf[sizeof(struct ip_options)+40];
1160 struct ip_options *opt = (struct ip_options *)optbuf;
1161 struct ip_options_rcu *inet_opt;
1162
1163 inet_opt = rcu_dereference_protected(inet->inet_opt,
1164 sock_owned_by_user(sk));
1165 opt->optlen = 0;
1166 if (inet_opt)
1167 memcpy(optbuf, &inet_opt->opt,
1168 sizeof(struct ip_options) +
1169 inet_opt->opt.optlen);
1170 release_sock(sk);
1171
1172 if (opt->optlen == 0)
1173 return put_user(0, optlen);
1174
1175 ip_options_undo(opt);
1176
1177 len = min_t(unsigned int, len, opt->optlen);
1178 if (put_user(len, optlen))
1179 return -EFAULT;
1180 if (copy_to_user(optval, opt->__data, len))
1181 return -EFAULT;
1182 return 0;
1183 }
1184 case IP_PKTINFO:
1185 val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
1186 break;
1187 case IP_RECVTTL:
1188 val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
1189 break;
1190 case IP_RECVTOS:
1191 val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
1192 break;
1193 case IP_RECVOPTS:
1194 val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
1195 break;
1196 case IP_RETOPTS:
1197 val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
1198 break;
1199 case IP_PASSSEC:
1200 val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
1201 break;
1202 case IP_RECVORIGDSTADDR:
1203 val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
1204 break;
1205 case IP_TOS:
1206 val = inet->tos;
1207 break;
1208 case IP_TTL:
1209 val = (inet->uc_ttl == -1 ?
1210 sysctl_ip_default_ttl :
1211 inet->uc_ttl);
1212 break;
1213 case IP_HDRINCL:
1214 val = inet->hdrincl;
1215 break;
1216 case IP_NODEFRAG:
1217 val = inet->nodefrag;
1218 break;
1219 case IP_MTU_DISCOVER:
1220 val = inet->pmtudisc;
1221 break;
1222 case IP_MTU:
1223 {
1224 struct dst_entry *dst;
1225 val = 0;
1226 dst = sk_dst_get(sk);
1227 if (dst) {
1228 val = dst_mtu(dst);
1229 dst_release(dst);
1230 }
1231 if (!val) {
1232 release_sock(sk);
1233 return -ENOTCONN;
1234 }
1235 break;
1236 }
1237 case IP_RECVERR:
1238 val = inet->recverr;
1239 break;
1240 case IP_MULTICAST_TTL:
1241 val = inet->mc_ttl;
1242 break;
1243 case IP_MULTICAST_LOOP:
1244 val = inet->mc_loop;
1245 break;
1246 case IP_UNICAST_IF:
1247 val = (__force int)htonl((__u32) inet->uc_index);
1248 break;
1249 case IP_MULTICAST_IF:
1250 {
1251 struct in_addr addr;
1252 len = min_t(unsigned int, len, sizeof(struct in_addr));
1253 addr.s_addr = inet->mc_addr;
1254 release_sock(sk);
1255
1256 if (put_user(len, optlen))
1257 return -EFAULT;
1258 if (copy_to_user(optval, &addr, len))
1259 return -EFAULT;
1260 return 0;
1261 }
1262 case IP_MSFILTER:
1263 {
1264 struct ip_msfilter msf;
1265 int err;
1266
1267 if (len < IP_MSFILTER_SIZE(0)) {
1268 release_sock(sk);
1269 return -EINVAL;
1270 }
1271 if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
1272 release_sock(sk);
1273 return -EFAULT;
1274 }
1275 err = ip_mc_msfget(sk, &msf,
1276 (struct ip_msfilter __user *)optval, optlen);
1277 release_sock(sk);
1278 return err;
1279 }
1280 case MCAST_MSFILTER:
1281 {
1282 struct group_filter gsf;
1283 int err;
1284
1285 if (len < GROUP_FILTER_SIZE(0)) {
1286 release_sock(sk);
1287 return -EINVAL;
1288 }
1289 if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
1290 release_sock(sk);
1291 return -EFAULT;
1292 }
1293 err = ip_mc_gsfget(sk, &gsf,
1294 (struct group_filter __user *)optval,
1295 optlen);
1296 release_sock(sk);
1297 return err;
1298 }
1299 case IP_MULTICAST_ALL:
1300 val = inet->mc_all;
1301 break;
1302 case IP_PKTOPTIONS:
1303 {
1304 struct msghdr msg;
1305
1306 release_sock(sk);
1307
1308 if (sk->sk_type != SOCK_STREAM)
1309 return -ENOPROTOOPT;
1310
1311 msg.msg_control = (__force void *) optval;
1312 msg.msg_controllen = len;
1313 msg.msg_flags = flags;
1314
1315 if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
1316 struct in_pktinfo info;
1317
1318 info.ipi_addr.s_addr = inet->inet_rcv_saddr;
1319 info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
1320 info.ipi_ifindex = inet->mc_index;
1321 put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
1322 }
1323 if (inet->cmsg_flags & IP_CMSG_TTL) {
1324 int hlim = inet->mc_ttl;
1325 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
1326 }
1327 if (inet->cmsg_flags & IP_CMSG_TOS) {
1328 int tos = inet->rcv_tos;
1329 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
1330 }
1331 len -= msg.msg_controllen;
1332 return put_user(len, optlen);
1333 }
1334 case IP_FREEBIND:
1335 val = inet->freebind;
1336 break;
1337 case IP_TRANSPARENT:
1338 val = inet->transparent;
1339 break;
1340 case IP_MINTTL:
1341 val = inet->min_ttl;
1342 break;
1343 default:
1344 release_sock(sk);
1345 return -ENOPROTOOPT;
1346 }
1347 release_sock(sk);
1348
1349 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
1350 unsigned char ucval = (unsigned char)val;
1351 len = 1;
1352 if (put_user(len, optlen))
1353 return -EFAULT;
1354 if (copy_to_user(optval, &ucval, 1))
1355 return -EFAULT;
1356 } else {
1357 len = min_t(unsigned int, sizeof(int), len);
1358 if (put_user(len, optlen))
1359 return -EFAULT;
1360 if (copy_to_user(optval, &val, len))
1361 return -EFAULT;
1362 }
1363 return 0;
1364 }
1365
1366 int ip_getsockopt(struct sock *sk, int level,
1367 int optname, char __user *optval, int __user *optlen)
1368 {
1369 int err;
1370
1371 err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
1372 #ifdef CONFIG_NETFILTER
1373 /* we need to exclude all possible ENOPROTOOPTs except default case */
1374 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1375 !ip_mroute_opt(optname)) {
1376 int len;
1377
1378 if (get_user(len, optlen))
1379 return -EFAULT;
1380
1381 lock_sock(sk);
1382 err = nf_getsockopt(sk, PF_INET, optname, optval,
1383 &len);
1384 release_sock(sk);
1385 if (err >= 0)
1386 err = put_user(len, optlen);
1387 return err;
1388 }
1389 #endif
1390 return err;
1391 }
1392 EXPORT_SYMBOL(ip_getsockopt);
1393
1394 #ifdef CONFIG_COMPAT
1395 int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1396 char __user *optval, int __user *optlen)
1397 {
1398 int err;
1399
1400 if (optname == MCAST_MSFILTER)
1401 return compat_mc_getsockopt(sk, level, optname, optval, optlen,
1402 ip_getsockopt);
1403
1404 err = do_ip_getsockopt(sk, level, optname, optval, optlen,
1405 MSG_CMSG_COMPAT);
1406
1407 #ifdef CONFIG_NETFILTER
1408 /* we need to exclude all possible ENOPROTOOPTs except default case */
1409 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1410 !ip_mroute_opt(optname)) {
1411 int len;
1412
1413 if (get_user(len, optlen))
1414 return -EFAULT;
1415
1416 lock_sock(sk);
1417 err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
1418 release_sock(sk);
1419 if (err >= 0)
1420 err = put_user(len, optlen);
1421 return err;
1422 }
1423 #endif
1424 return err;
1425 }
1426 EXPORT_SYMBOL(compat_ip_getsockopt);
1427 #endif