]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv4/raw.c
[RAW]: Introduce raw_hashinfo structure
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / raw.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * RAW - implementation of IP "raw" sockets.
7 *
8 * Version: $Id: raw.c,v 1.64 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * Fixes:
14 * Alan Cox : verify_area() fixed up
15 * Alan Cox : ICMP error handling
16 * Alan Cox : EMSGSIZE if you send too big a packet
17 * Alan Cox : Now uses generic datagrams and shared
18 * skbuff library. No more peek crashes,
19 * no more backlogs
20 * Alan Cox : Checks sk->broadcast.
21 * Alan Cox : Uses skb_free_datagram/skb_copy_datagram
22 * Alan Cox : Raw passes ip options too
23 * Alan Cox : Setsocketopt added
24 * Alan Cox : Fixed error return for broadcasts
25 * Alan Cox : Removed wake_up calls
26 * Alan Cox : Use ttl/tos
27 * Alan Cox : Cleaned up old debugging
28 * Alan Cox : Use new kernel side addresses
29 * Arnt Gulbrandsen : Fixed MSG_DONTROUTE in raw sockets.
30 * Alan Cox : BSD style RAW socket demultiplexing.
31 * Alan Cox : Beginnings of mrouted support.
32 * Alan Cox : Added IP_HDRINCL option.
33 * Alan Cox : Skip broadcast check if BSDism set.
34 * David S. Miller : New socket lookup architecture.
35 *
36 * This program is free software; you can redistribute it and/or
37 * modify it under the terms of the GNU General Public License
38 * as published by the Free Software Foundation; either version
39 * 2 of the License, or (at your option) any later version.
40 */
41
42 #include <linux/types.h>
43 #include <asm/atomic.h>
44 #include <asm/byteorder.h>
45 #include <asm/current.h>
46 #include <asm/uaccess.h>
47 #include <asm/ioctls.h>
48 #include <linux/stddef.h>
49 #include <linux/slab.h>
50 #include <linux/errno.h>
51 #include <linux/aio.h>
52 #include <linux/kernel.h>
53 #include <linux/spinlock.h>
54 #include <linux/sockios.h>
55 #include <linux/socket.h>
56 #include <linux/in.h>
57 #include <linux/mroute.h>
58 #include <linux/netdevice.h>
59 #include <linux/in_route.h>
60 #include <linux/route.h>
61 #include <linux/skbuff.h>
62 #include <net/net_namespace.h>
63 #include <net/dst.h>
64 #include <net/sock.h>
65 #include <linux/gfp.h>
66 #include <linux/ip.h>
67 #include <linux/net.h>
68 #include <net/ip.h>
69 #include <net/icmp.h>
70 #include <net/udp.h>
71 #include <net/raw.h>
72 #include <net/snmp.h>
73 #include <net/tcp_states.h>
74 #include <net/inet_common.h>
75 #include <net/checksum.h>
76 #include <net/xfrm.h>
77 #include <linux/rtnetlink.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
80 #include <linux/netfilter.h>
81 #include <linux/netfilter_ipv4.h>
82
83 static struct raw_hashinfo raw_v4_hashinfo = {
84 .lock = __RW_LOCK_UNLOCKED(),
85 };
86
87 static void raw_v4_hash(struct sock *sk)
88 {
89 struct hlist_head *head = &raw_v4_hashinfo.ht[inet_sk(sk)->num &
90 (RAW_HTABLE_SIZE - 1)];
91
92 write_lock_bh(&raw_v4_hashinfo.lock);
93 sk_add_node(sk, head);
94 sock_prot_inc_use(sk->sk_prot);
95 write_unlock_bh(&raw_v4_hashinfo.lock);
96 }
97
98 static void raw_v4_unhash(struct sock *sk)
99 {
100 write_lock_bh(&raw_v4_hashinfo.lock);
101 if (sk_del_node_init(sk))
102 sock_prot_dec_use(sk->sk_prot);
103 write_unlock_bh(&raw_v4_hashinfo.lock);
104 }
105
106 static struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num,
107 __be32 raddr, __be32 laddr,
108 int dif)
109 {
110 struct hlist_node *node;
111
112 sk_for_each_from(sk, node) {
113 struct inet_sock *inet = inet_sk(sk);
114
115 if (inet->num == num &&
116 !(inet->daddr && inet->daddr != raddr) &&
117 !(inet->rcv_saddr && inet->rcv_saddr != laddr) &&
118 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
119 goto found; /* gotcha */
120 }
121 sk = NULL;
122 found:
123 return sk;
124 }
125
126 /*
127 * 0 - deliver
128 * 1 - block
129 */
130 static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
131 {
132 int type;
133
134 if (!pskb_may_pull(skb, sizeof(struct icmphdr)))
135 return 1;
136
137 type = icmp_hdr(skb)->type;
138 if (type < 32) {
139 __u32 data = raw_sk(sk)->filter.data;
140
141 return ((1 << type) & data) != 0;
142 }
143
144 /* Do not block unknown ICMP types */
145 return 0;
146 }
147
148 /* IP input processing comes here for RAW socket delivery.
149 * Caller owns SKB, so we must make clones.
150 *
151 * RFC 1122: SHOULD pass TOS value up to the transport layer.
152 * -> It does. And not only TOS, but all IP header.
153 */
154 static int raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash)
155 {
156 struct sock *sk;
157 struct hlist_head *head;
158 int delivered = 0;
159
160 read_lock(&raw_v4_hashinfo.lock);
161 head = &raw_v4_hashinfo.ht[hash];
162 if (hlist_empty(head))
163 goto out;
164 sk = __raw_v4_lookup(__sk_head(head), iph->protocol,
165 iph->saddr, iph->daddr,
166 skb->dev->ifindex);
167
168 while (sk) {
169 delivered = 1;
170 if (iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) {
171 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
172
173 /* Not releasing hash table! */
174 if (clone)
175 raw_rcv(sk, clone);
176 }
177 sk = __raw_v4_lookup(sk_next(sk), iph->protocol,
178 iph->saddr, iph->daddr,
179 skb->dev->ifindex);
180 }
181 out:
182 read_unlock(&raw_v4_hashinfo.lock);
183 return delivered;
184 }
185
186 int raw_local_deliver(struct sk_buff *skb, int protocol)
187 {
188 int hash;
189 struct sock *raw_sk;
190
191 hash = protocol & (RAW_HTABLE_SIZE - 1);
192 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
193
194 /* If there maybe a raw socket we must check - if not we
195 * don't care less
196 */
197 if (raw_sk && !raw_v4_input(skb, ip_hdr(skb), hash))
198 raw_sk = NULL;
199
200 return raw_sk != NULL;
201
202 }
203
204 static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
205 {
206 struct inet_sock *inet = inet_sk(sk);
207 const int type = icmp_hdr(skb)->type;
208 const int code = icmp_hdr(skb)->code;
209 int err = 0;
210 int harderr = 0;
211
212 /* Report error on raw socket, if:
213 1. User requested ip_recverr.
214 2. Socket is connected (otherwise the error indication
215 is useless without ip_recverr and error is hard.
216 */
217 if (!inet->recverr && sk->sk_state != TCP_ESTABLISHED)
218 return;
219
220 switch (type) {
221 default:
222 case ICMP_TIME_EXCEEDED:
223 err = EHOSTUNREACH;
224 break;
225 case ICMP_SOURCE_QUENCH:
226 return;
227 case ICMP_PARAMETERPROB:
228 err = EPROTO;
229 harderr = 1;
230 break;
231 case ICMP_DEST_UNREACH:
232 err = EHOSTUNREACH;
233 if (code > NR_ICMP_UNREACH)
234 break;
235 err = icmp_err_convert[code].errno;
236 harderr = icmp_err_convert[code].fatal;
237 if (code == ICMP_FRAG_NEEDED) {
238 harderr = inet->pmtudisc != IP_PMTUDISC_DONT;
239 err = EMSGSIZE;
240 }
241 }
242
243 if (inet->recverr) {
244 struct iphdr *iph = (struct iphdr*)skb->data;
245 u8 *payload = skb->data + (iph->ihl << 2);
246
247 if (inet->hdrincl)
248 payload = skb->data;
249 ip_icmp_error(sk, skb, err, 0, info, payload);
250 }
251
252 if (inet->recverr || harderr) {
253 sk->sk_err = err;
254 sk->sk_error_report(sk);
255 }
256 }
257
258 void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
259 {
260 int hash;
261 struct sock *raw_sk;
262 struct iphdr *iph;
263
264 hash = protocol & (RAW_HTABLE_SIZE - 1);
265
266 read_lock(&raw_v4_hashinfo.lock);
267 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
268 if (raw_sk != NULL) {
269 iph = (struct iphdr *)skb->data;
270 while ((raw_sk = __raw_v4_lookup(raw_sk, protocol, iph->daddr,
271 iph->saddr,
272 skb->dev->ifindex)) != NULL) {
273 raw_err(raw_sk, skb, info);
274 raw_sk = sk_next(raw_sk);
275 iph = (struct iphdr *)skb->data;
276 }
277 }
278 read_unlock(&raw_v4_hashinfo.lock);
279 }
280
281 static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
282 {
283 /* Charge it to the socket. */
284
285 if (sock_queue_rcv_skb(sk, skb) < 0) {
286 atomic_inc(&sk->sk_drops);
287 kfree_skb(skb);
288 return NET_RX_DROP;
289 }
290
291 return NET_RX_SUCCESS;
292 }
293
294 int raw_rcv(struct sock *sk, struct sk_buff *skb)
295 {
296 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
297 atomic_inc(&sk->sk_drops);
298 kfree_skb(skb);
299 return NET_RX_DROP;
300 }
301 nf_reset(skb);
302
303 skb_push(skb, skb->data - skb_network_header(skb));
304
305 raw_rcv_skb(sk, skb);
306 return 0;
307 }
308
309 static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
310 struct rtable *rt,
311 unsigned int flags)
312 {
313 struct inet_sock *inet = inet_sk(sk);
314 int hh_len;
315 struct iphdr *iph;
316 struct sk_buff *skb;
317 unsigned int iphlen;
318 int err;
319
320 if (length > rt->u.dst.dev->mtu) {
321 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport,
322 rt->u.dst.dev->mtu);
323 return -EMSGSIZE;
324 }
325 if (flags&MSG_PROBE)
326 goto out;
327
328 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
329
330 skb = sock_alloc_send_skb(sk, length+hh_len+15,
331 flags&MSG_DONTWAIT, &err);
332 if (skb == NULL)
333 goto error;
334 skb_reserve(skb, hh_len);
335
336 skb->priority = sk->sk_priority;
337 skb->dst = dst_clone(&rt->u.dst);
338
339 skb_reset_network_header(skb);
340 iph = ip_hdr(skb);
341 skb_put(skb, length);
342
343 skb->ip_summed = CHECKSUM_NONE;
344
345 skb->transport_header = skb->network_header;
346 err = memcpy_fromiovecend((void *)iph, from, 0, length);
347 if (err)
348 goto error_fault;
349
350 /* We don't modify invalid header */
351 iphlen = iph->ihl * 4;
352 if (iphlen >= sizeof(*iph) && iphlen <= length) {
353 if (!iph->saddr)
354 iph->saddr = rt->rt_src;
355 iph->check = 0;
356 iph->tot_len = htons(length);
357 if (!iph->id)
358 ip_select_ident(iph, &rt->u.dst, NULL);
359
360 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
361 }
362 if (iph->protocol == IPPROTO_ICMP)
363 icmp_out_count(((struct icmphdr *)
364 skb_transport_header(skb))->type);
365
366 err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
367 dst_output);
368 if (err > 0)
369 err = inet->recverr ? net_xmit_errno(err) : 0;
370 if (err)
371 goto error;
372 out:
373 return 0;
374
375 error_fault:
376 err = -EFAULT;
377 kfree_skb(skb);
378 error:
379 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
380 return err;
381 }
382
383 static int raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
384 {
385 struct iovec *iov;
386 u8 __user *type = NULL;
387 u8 __user *code = NULL;
388 int probed = 0;
389 unsigned int i;
390
391 if (!msg->msg_iov)
392 return 0;
393
394 for (i = 0; i < msg->msg_iovlen; i++) {
395 iov = &msg->msg_iov[i];
396 if (!iov)
397 continue;
398
399 switch (fl->proto) {
400 case IPPROTO_ICMP:
401 /* check if one-byte field is readable or not. */
402 if (iov->iov_base && iov->iov_len < 1)
403 break;
404
405 if (!type) {
406 type = iov->iov_base;
407 /* check if code field is readable or not. */
408 if (iov->iov_len > 1)
409 code = type + 1;
410 } else if (!code)
411 code = iov->iov_base;
412
413 if (type && code) {
414 if (get_user(fl->fl_icmp_type, type) ||
415 get_user(fl->fl_icmp_code, code))
416 return -EFAULT;
417 probed = 1;
418 }
419 break;
420 default:
421 probed = 1;
422 break;
423 }
424 if (probed)
425 break;
426 }
427 return 0;
428 }
429
430 static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
431 size_t len)
432 {
433 struct inet_sock *inet = inet_sk(sk);
434 struct ipcm_cookie ipc;
435 struct rtable *rt = NULL;
436 int free = 0;
437 __be32 daddr;
438 __be32 saddr;
439 u8 tos;
440 int err;
441
442 err = -EMSGSIZE;
443 if (len > 0xFFFF)
444 goto out;
445
446 /*
447 * Check the flags.
448 */
449
450 err = -EOPNOTSUPP;
451 if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */
452 goto out; /* compatibility */
453
454 /*
455 * Get and verify the address.
456 */
457
458 if (msg->msg_namelen) {
459 struct sockaddr_in *usin = (struct sockaddr_in*)msg->msg_name;
460 err = -EINVAL;
461 if (msg->msg_namelen < sizeof(*usin))
462 goto out;
463 if (usin->sin_family != AF_INET) {
464 static int complained;
465 if (!complained++)
466 printk(KERN_INFO "%s forgot to set AF_INET in "
467 "raw sendmsg. Fix it!\n",
468 current->comm);
469 err = -EAFNOSUPPORT;
470 if (usin->sin_family)
471 goto out;
472 }
473 daddr = usin->sin_addr.s_addr;
474 /* ANK: I did not forget to get protocol from port field.
475 * I just do not know, who uses this weirdness.
476 * IP_HDRINCL is much more convenient.
477 */
478 } else {
479 err = -EDESTADDRREQ;
480 if (sk->sk_state != TCP_ESTABLISHED)
481 goto out;
482 daddr = inet->daddr;
483 }
484
485 ipc.addr = inet->saddr;
486 ipc.opt = NULL;
487 ipc.oif = sk->sk_bound_dev_if;
488
489 if (msg->msg_controllen) {
490 err = ip_cmsg_send(msg, &ipc);
491 if (err)
492 goto out;
493 if (ipc.opt)
494 free = 1;
495 }
496
497 saddr = ipc.addr;
498 ipc.addr = daddr;
499
500 if (!ipc.opt)
501 ipc.opt = inet->opt;
502
503 if (ipc.opt) {
504 err = -EINVAL;
505 /* Linux does not mangle headers on raw sockets,
506 * so that IP options + IP_HDRINCL is non-sense.
507 */
508 if (inet->hdrincl)
509 goto done;
510 if (ipc.opt->srr) {
511 if (!daddr)
512 goto done;
513 daddr = ipc.opt->faddr;
514 }
515 }
516 tos = RT_CONN_FLAGS(sk);
517 if (msg->msg_flags & MSG_DONTROUTE)
518 tos |= RTO_ONLINK;
519
520 if (MULTICAST(daddr)) {
521 if (!ipc.oif)
522 ipc.oif = inet->mc_index;
523 if (!saddr)
524 saddr = inet->mc_addr;
525 }
526
527 {
528 struct flowi fl = { .oif = ipc.oif,
529 .nl_u = { .ip4_u =
530 { .daddr = daddr,
531 .saddr = saddr,
532 .tos = tos } },
533 .proto = inet->hdrincl ? IPPROTO_RAW :
534 sk->sk_protocol,
535 };
536 if (!inet->hdrincl) {
537 err = raw_probe_proto_opt(&fl, msg);
538 if (err)
539 goto done;
540 }
541
542 security_sk_classify_flow(sk, &fl);
543 err = ip_route_output_flow(&rt, &fl, sk, 1);
544 }
545 if (err)
546 goto done;
547
548 err = -EACCES;
549 if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
550 goto done;
551
552 if (msg->msg_flags & MSG_CONFIRM)
553 goto do_confirm;
554 back_from_confirm:
555
556 if (inet->hdrincl)
557 err = raw_send_hdrinc(sk, msg->msg_iov, len,
558 rt, msg->msg_flags);
559
560 else {
561 if (!ipc.addr)
562 ipc.addr = rt->rt_dst;
563 lock_sock(sk);
564 err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0,
565 &ipc, rt, msg->msg_flags);
566 if (err)
567 ip_flush_pending_frames(sk);
568 else if (!(msg->msg_flags & MSG_MORE))
569 err = ip_push_pending_frames(sk);
570 release_sock(sk);
571 }
572 done:
573 if (free)
574 kfree(ipc.opt);
575 ip_rt_put(rt);
576
577 out:
578 if (err < 0)
579 return err;
580 return len;
581
582 do_confirm:
583 dst_confirm(&rt->u.dst);
584 if (!(msg->msg_flags & MSG_PROBE) || len)
585 goto back_from_confirm;
586 err = 0;
587 goto done;
588 }
589
590 static void raw_close(struct sock *sk, long timeout)
591 {
592 /*
593 * Raw sockets may have direct kernel refereneces. Kill them.
594 */
595 ip_ra_control(sk, 0, NULL);
596
597 sk_common_release(sk);
598 }
599
600 /* This gets rid of all the nasties in af_inet. -DaveM */
601 static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
602 {
603 struct inet_sock *inet = inet_sk(sk);
604 struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
605 int ret = -EINVAL;
606 int chk_addr_ret;
607
608 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
609 goto out;
610 chk_addr_ret = inet_addr_type(addr->sin_addr.s_addr);
611 ret = -EADDRNOTAVAIL;
612 if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
613 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
614 goto out;
615 inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
616 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
617 inet->saddr = 0; /* Use device */
618 sk_dst_reset(sk);
619 ret = 0;
620 out: return ret;
621 }
622
623 /*
624 * This should be easy, if there is something there
625 * we return it, otherwise we block.
626 */
627
628 static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
629 size_t len, int noblock, int flags, int *addr_len)
630 {
631 struct inet_sock *inet = inet_sk(sk);
632 size_t copied = 0;
633 int err = -EOPNOTSUPP;
634 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
635 struct sk_buff *skb;
636
637 if (flags & MSG_OOB)
638 goto out;
639
640 if (addr_len)
641 *addr_len = sizeof(*sin);
642
643 if (flags & MSG_ERRQUEUE) {
644 err = ip_recv_error(sk, msg, len);
645 goto out;
646 }
647
648 skb = skb_recv_datagram(sk, flags, noblock, &err);
649 if (!skb)
650 goto out;
651
652 copied = skb->len;
653 if (len < copied) {
654 msg->msg_flags |= MSG_TRUNC;
655 copied = len;
656 }
657
658 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
659 if (err)
660 goto done;
661
662 sock_recv_timestamp(msg, sk, skb);
663
664 /* Copy the address. */
665 if (sin) {
666 sin->sin_family = AF_INET;
667 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
668 sin->sin_port = 0;
669 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
670 }
671 if (inet->cmsg_flags)
672 ip_cmsg_recv(msg, skb);
673 if (flags & MSG_TRUNC)
674 copied = skb->len;
675 done:
676 skb_free_datagram(sk, skb);
677 out:
678 if (err)
679 return err;
680 return copied;
681 }
682
683 static int raw_init(struct sock *sk)
684 {
685 struct raw_sock *rp = raw_sk(sk);
686
687 if (inet_sk(sk)->num == IPPROTO_ICMP)
688 memset(&rp->filter, 0, sizeof(rp->filter));
689 return 0;
690 }
691
692 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
693 {
694 if (optlen > sizeof(struct icmp_filter))
695 optlen = sizeof(struct icmp_filter);
696 if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
697 return -EFAULT;
698 return 0;
699 }
700
701 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
702 {
703 int len, ret = -EFAULT;
704
705 if (get_user(len, optlen))
706 goto out;
707 ret = -EINVAL;
708 if (len < 0)
709 goto out;
710 if (len > sizeof(struct icmp_filter))
711 len = sizeof(struct icmp_filter);
712 ret = -EFAULT;
713 if (put_user(len, optlen) ||
714 copy_to_user(optval, &raw_sk(sk)->filter, len))
715 goto out;
716 ret = 0;
717 out: return ret;
718 }
719
720 static int do_raw_setsockopt(struct sock *sk, int level, int optname,
721 char __user *optval, int optlen)
722 {
723 if (optname == ICMP_FILTER) {
724 if (inet_sk(sk)->num != IPPROTO_ICMP)
725 return -EOPNOTSUPP;
726 else
727 return raw_seticmpfilter(sk, optval, optlen);
728 }
729 return -ENOPROTOOPT;
730 }
731
732 static int raw_setsockopt(struct sock *sk, int level, int optname,
733 char __user *optval, int optlen)
734 {
735 if (level != SOL_RAW)
736 return ip_setsockopt(sk, level, optname, optval, optlen);
737 return do_raw_setsockopt(sk, level, optname, optval, optlen);
738 }
739
740 #ifdef CONFIG_COMPAT
741 static int compat_raw_setsockopt(struct sock *sk, int level, int optname,
742 char __user *optval, int optlen)
743 {
744 if (level != SOL_RAW)
745 return compat_ip_setsockopt(sk, level, optname, optval, optlen);
746 return do_raw_setsockopt(sk, level, optname, optval, optlen);
747 }
748 #endif
749
750 static int do_raw_getsockopt(struct sock *sk, int level, int optname,
751 char __user *optval, int __user *optlen)
752 {
753 if (optname == ICMP_FILTER) {
754 if (inet_sk(sk)->num != IPPROTO_ICMP)
755 return -EOPNOTSUPP;
756 else
757 return raw_geticmpfilter(sk, optval, optlen);
758 }
759 return -ENOPROTOOPT;
760 }
761
762 static int raw_getsockopt(struct sock *sk, int level, int optname,
763 char __user *optval, int __user *optlen)
764 {
765 if (level != SOL_RAW)
766 return ip_getsockopt(sk, level, optname, optval, optlen);
767 return do_raw_getsockopt(sk, level, optname, optval, optlen);
768 }
769
770 #ifdef CONFIG_COMPAT
771 static int compat_raw_getsockopt(struct sock *sk, int level, int optname,
772 char __user *optval, int __user *optlen)
773 {
774 if (level != SOL_RAW)
775 return compat_ip_getsockopt(sk, level, optname, optval, optlen);
776 return do_raw_getsockopt(sk, level, optname, optval, optlen);
777 }
778 #endif
779
780 static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
781 {
782 switch (cmd) {
783 case SIOCOUTQ: {
784 int amount = atomic_read(&sk->sk_wmem_alloc);
785 return put_user(amount, (int __user *)arg);
786 }
787 case SIOCINQ: {
788 struct sk_buff *skb;
789 int amount = 0;
790
791 spin_lock_bh(&sk->sk_receive_queue.lock);
792 skb = skb_peek(&sk->sk_receive_queue);
793 if (skb != NULL)
794 amount = skb->len;
795 spin_unlock_bh(&sk->sk_receive_queue.lock);
796 return put_user(amount, (int __user *)arg);
797 }
798
799 default:
800 #ifdef CONFIG_IP_MROUTE
801 return ipmr_ioctl(sk, cmd, (void __user *)arg);
802 #else
803 return -ENOIOCTLCMD;
804 #endif
805 }
806 }
807
808 DEFINE_PROTO_INUSE(raw)
809
810 struct proto raw_prot = {
811 .name = "RAW",
812 .owner = THIS_MODULE,
813 .close = raw_close,
814 .connect = ip4_datagram_connect,
815 .disconnect = udp_disconnect,
816 .ioctl = raw_ioctl,
817 .init = raw_init,
818 .setsockopt = raw_setsockopt,
819 .getsockopt = raw_getsockopt,
820 .sendmsg = raw_sendmsg,
821 .recvmsg = raw_recvmsg,
822 .bind = raw_bind,
823 .backlog_rcv = raw_rcv_skb,
824 .hash = raw_v4_hash,
825 .unhash = raw_v4_unhash,
826 .obj_size = sizeof(struct raw_sock),
827 #ifdef CONFIG_COMPAT
828 .compat_setsockopt = compat_raw_setsockopt,
829 .compat_getsockopt = compat_raw_getsockopt,
830 #endif
831 REF_PROTO_INUSE(raw)
832 };
833
834 #ifdef CONFIG_PROC_FS
835 struct raw_iter_state {
836 int bucket;
837 };
838
839 #define raw_seq_private(seq) ((struct raw_iter_state *)(seq)->private)
840
841 static struct sock *raw_get_first(struct seq_file *seq)
842 {
843 struct sock *sk;
844 struct raw_iter_state* state = raw_seq_private(seq);
845
846 for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
847 ++state->bucket) {
848 struct hlist_node *node;
849
850 sk_for_each(sk, node, &raw_v4_hashinfo.ht[state->bucket])
851 if (sk->sk_family == PF_INET)
852 goto found;
853 }
854 sk = NULL;
855 found:
856 return sk;
857 }
858
859 static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
860 {
861 struct raw_iter_state* state = raw_seq_private(seq);
862
863 do {
864 sk = sk_next(sk);
865 try_again:
866 ;
867 } while (sk && sk->sk_family != PF_INET);
868
869 if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
870 sk = sk_head(&raw_v4_hashinfo.ht[state->bucket]);
871 goto try_again;
872 }
873 return sk;
874 }
875
876 static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
877 {
878 struct sock *sk = raw_get_first(seq);
879
880 if (sk)
881 while (pos && (sk = raw_get_next(seq, sk)) != NULL)
882 --pos;
883 return pos ? NULL : sk;
884 }
885
886 static void *raw_seq_start(struct seq_file *seq, loff_t *pos)
887 {
888 read_lock(&raw_v4_hashinfo.lock);
889 return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
890 }
891
892 static void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
893 {
894 struct sock *sk;
895
896 if (v == SEQ_START_TOKEN)
897 sk = raw_get_first(seq);
898 else
899 sk = raw_get_next(seq, v);
900 ++*pos;
901 return sk;
902 }
903
904 static void raw_seq_stop(struct seq_file *seq, void *v)
905 {
906 read_unlock(&raw_v4_hashinfo.lock);
907 }
908
909 static __inline__ char *get_raw_sock(struct sock *sp, char *tmpbuf, int i)
910 {
911 struct inet_sock *inet = inet_sk(sp);
912 __be32 dest = inet->daddr,
913 src = inet->rcv_saddr;
914 __u16 destp = 0,
915 srcp = inet->num;
916
917 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
918 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d",
919 i, src, srcp, dest, destp, sp->sk_state,
920 atomic_read(&sp->sk_wmem_alloc),
921 atomic_read(&sp->sk_rmem_alloc),
922 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
923 atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
924 return tmpbuf;
925 }
926
927 #define TMPSZ 128
928
929 static int raw_seq_show(struct seq_file *seq, void *v)
930 {
931 char tmpbuf[TMPSZ+1];
932
933 if (v == SEQ_START_TOKEN)
934 seq_printf(seq, "%-*s\n", TMPSZ-1,
935 " sl local_address rem_address st tx_queue "
936 "rx_queue tr tm->when retrnsmt uid timeout "
937 "inode drops");
938 else {
939 struct raw_iter_state *state = raw_seq_private(seq);
940
941 seq_printf(seq, "%-*s\n", TMPSZ-1,
942 get_raw_sock(v, tmpbuf, state->bucket));
943 }
944 return 0;
945 }
946
947 static const struct seq_operations raw_seq_ops = {
948 .start = raw_seq_start,
949 .next = raw_seq_next,
950 .stop = raw_seq_stop,
951 .show = raw_seq_show,
952 };
953
954 static int raw_seq_open(struct inode *inode, struct file *file)
955 {
956 return seq_open_private(file, &raw_seq_ops,
957 sizeof(struct raw_iter_state));
958 }
959
960 static const struct file_operations raw_seq_fops = {
961 .owner = THIS_MODULE,
962 .open = raw_seq_open,
963 .read = seq_read,
964 .llseek = seq_lseek,
965 .release = seq_release_private,
966 };
967
968 int __init raw_proc_init(void)
969 {
970 if (!proc_net_fops_create(&init_net, "raw", S_IRUGO, &raw_seq_fops))
971 return -ENOMEM;
972 return 0;
973 }
974
975 void __init raw_proc_exit(void)
976 {
977 proc_net_remove(&init_net, "raw");
978 }
979 #endif /* CONFIG_PROC_FS */