2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
8 * Version: $Id: af_packet.c,v 1.61 2002/02/08 03:57:19 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
15 * Alan Cox : verify_area() now used correctly
16 * Alan Cox : new skbuff lists, look ma no backlogs!
17 * Alan Cox : tidied skbuff lists.
18 * Alan Cox : Now uses generic datagram routines I
19 * added. Also fixed the peek/read crash
20 * from all old Linux datagram code.
21 * Alan Cox : Uses the improved datagram code.
22 * Alan Cox : Added NULL's for socket options.
23 * Alan Cox : Re-commented the code.
24 * Alan Cox : Use new kernel side addressing
25 * Rob Janssen : Correct MTU usage.
26 * Dave Platt : Counter leaks caused by incorrect
27 * interrupt locking and some slightly
28 * dubious gcc output. Can you read
29 * compiler: it said _VOLATILE_
30 * Richard Kooijman : Timestamp fixes.
31 * Alan Cox : New buffers. Use sk->mac.raw.
32 * Alan Cox : sendmsg/recvmsg support.
33 * Alan Cox : Protocol setting support
34 * Alexey Kuznetsov : Untied from IPv4 stack.
35 * Cyrus Durgin : Fixed kerneld for kmod.
36 * Michal Ostrowski : Module initialization cleanup.
37 * Ulises Alonso : Frame number limit removal and
38 * packet_set_ring memory leak.
39 * Eric Biederman : Allow for > 8 byte hardware addresses.
40 * The convention is that longer addresses
41 * will simply extend the hardware address
42 * byte arrays at the end of sockaddr_ll
45 * This program is free software; you can redistribute it and/or
46 * modify it under the terms of the GNU General Public License
47 * as published by the Free Software Foundation; either version
48 * 2 of the License, or (at your option) any later version.
52 #include <linux/types.h>
54 #include <linux/capability.h>
55 #include <linux/fcntl.h>
56 #include <linux/socket.h>
58 #include <linux/inet.h>
59 #include <linux/netdevice.h>
60 #include <linux/if_packet.h>
61 #include <linux/wireless.h>
62 #include <linux/kernel.h>
63 #include <linux/kmod.h>
65 #include <net/protocol.h>
66 #include <linux/skbuff.h>
68 #include <linux/errno.h>
69 #include <linux/timer.h>
70 #include <asm/system.h>
71 #include <asm/uaccess.h>
72 #include <asm/ioctls.h>
74 #include <asm/cacheflush.h>
76 #include <linux/proc_fs.h>
77 #include <linux/seq_file.h>
78 #include <linux/poll.h>
79 #include <linux/module.h>
80 #include <linux/init.h>
83 #include <net/inet_common.h>
86 #define CONFIG_SOCK_PACKET 1
90 - if device has no dev->hard_header routine, it adds and removes ll header
91 inside itself. In this case ll header is invisible outside of device,
92 but higher levels still should reserve dev->hard_header_len.
93 Some devices are enough clever to reallocate skb, when header
94 will not fit to reserved space (tunnel), another ones are silly
96 - packet socket receives packets with pulled ll header,
97 so that SOCK_RAW should push it back.
102 Incoming, dev->hard_header!=NULL
103 mac_header -> ll header
106 Outgoing, dev->hard_header!=NULL
107 mac_header -> ll header
110 Incoming, dev->hard_header==NULL
111 mac_header -> UNKNOWN position. It is very likely, that it points to ll
112 header. PPP makes it, that is wrong, because introduce
113 assymetry between rx and tx paths.
116 Outgoing, dev->hard_header==NULL
117 mac_header -> data. ll header is still not built!
121 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
127 dev->hard_header != NULL
128 mac_header -> ll header
131 dev->hard_header == NULL (ll header is added by device, we cannot control it)
135 We should set nh.raw on output to correct posistion,
136 packet classifier depends on it.
139 /* List of all packet sockets. */
140 static HLIST_HEAD(packet_sklist
);
141 static DEFINE_RWLOCK(packet_sklist_lock
);
143 static atomic_t packet_socks_nr
;
146 /* Private packet socket structures. */
150 struct packet_mclist
*next
;
155 unsigned char addr
[MAX_ADDR_LEN
];
157 /* identical to struct packet_mreq except it has
158 * a longer address field.
160 struct packet_mreq_max
163 unsigned short mr_type
;
164 unsigned short mr_alen
;
165 unsigned char mr_address
[MAX_ADDR_LEN
];
168 #ifdef CONFIG_PACKET_MMAP
169 static int packet_set_ring(struct sock
*sk
, struct tpacket_req
*req
, int closing
);
172 static void packet_flush_mclist(struct sock
*sk
);
175 /* struct sock has to be the first member of packet_sock */
177 struct tpacket_stats stats
;
178 #ifdef CONFIG_PACKET_MMAP
181 unsigned int frames_per_block
;
182 unsigned int frame_size
;
183 unsigned int frame_max
;
186 struct packet_type prot_hook
;
187 spinlock_t bind_lock
;
188 unsigned int running
:1, /* prot_hook is attached*/
191 int ifindex
; /* bound device */
193 struct packet_mclist
*mclist
;
194 #ifdef CONFIG_PACKET_MMAP
196 unsigned int pg_vec_order
;
197 unsigned int pg_vec_pages
;
198 unsigned int pg_vec_len
;
202 struct packet_skb_cb
{
203 unsigned int origlen
;
205 struct sockaddr_pkt pkt
;
206 struct sockaddr_ll ll
;
210 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
212 #ifdef CONFIG_PACKET_MMAP
214 static inline struct tpacket_hdr
*packet_lookup_frame(struct packet_sock
*po
, unsigned int position
)
216 unsigned int pg_vec_pos
, frame_offset
;
218 pg_vec_pos
= position
/ po
->frames_per_block
;
219 frame_offset
= position
% po
->frames_per_block
;
221 return (struct tpacket_hdr
*)(po
->pg_vec
[pg_vec_pos
] + (frame_offset
* po
->frame_size
));
225 static inline struct packet_sock
*pkt_sk(struct sock
*sk
)
227 return (struct packet_sock
*)sk
;
230 static void packet_sock_destruct(struct sock
*sk
)
232 BUG_TRAP(!atomic_read(&sk
->sk_rmem_alloc
));
233 BUG_TRAP(!atomic_read(&sk
->sk_wmem_alloc
));
235 if (!sock_flag(sk
, SOCK_DEAD
)) {
236 printk("Attempt to release alive packet socket: %p\n", sk
);
240 atomic_dec(&packet_socks_nr
);
241 #ifdef PACKET_REFCNT_DEBUG
242 printk(KERN_DEBUG
"PACKET socket %p is free, %d are alive\n", sk
, atomic_read(&packet_socks_nr
));
247 static const struct proto_ops packet_ops
;
249 #ifdef CONFIG_SOCK_PACKET
250 static const struct proto_ops packet_ops_spkt
;
252 static int packet_rcv_spkt(struct sk_buff
*skb
, struct net_device
*dev
, struct packet_type
*pt
, struct net_device
*orig_dev
)
255 struct sockaddr_pkt
*spkt
;
258 * When we registered the protocol we saved the socket in the data
259 * field for just this event.
262 sk
= pt
->af_packet_priv
;
265 * Yank back the headers [hope the device set this
266 * right or kerboom...]
268 * Incoming packets have ll header pulled,
271 * For outgoing ones skb->data == skb_mac_header(skb)
272 * so that this procedure is noop.
275 if (skb
->pkt_type
== PACKET_LOOPBACK
)
278 if ((skb
= skb_share_check(skb
, GFP_ATOMIC
)) == NULL
)
281 /* drop any routing info */
282 dst_release(skb
->dst
);
285 /* drop conntrack reference */
288 spkt
= &PACKET_SKB_CB(skb
)->sa
.pkt
;
290 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
293 * The SOCK_PACKET socket receives _all_ frames.
296 spkt
->spkt_family
= dev
->type
;
297 strlcpy(spkt
->spkt_device
, dev
->name
, sizeof(spkt
->spkt_device
));
298 spkt
->spkt_protocol
= skb
->protocol
;
301 * Charge the memory to the socket. This is done specifically
302 * to prevent sockets using all the memory up.
305 if (sock_queue_rcv_skb(sk
,skb
) == 0)
316 * Output a raw packet to a device layer. This bypasses all the other
317 * protocol layers and you must therefore supply it with a complete frame
320 static int packet_sendmsg_spkt(struct kiocb
*iocb
, struct socket
*sock
,
321 struct msghdr
*msg
, size_t len
)
323 struct sock
*sk
= sock
->sk
;
324 struct sockaddr_pkt
*saddr
=(struct sockaddr_pkt
*)msg
->msg_name
;
326 struct net_device
*dev
;
331 * Get and verify the address.
336 if (msg
->msg_namelen
< sizeof(struct sockaddr
))
338 if (msg
->msg_namelen
==sizeof(struct sockaddr_pkt
))
339 proto
=saddr
->spkt_protocol
;
342 return(-ENOTCONN
); /* SOCK_PACKET must be sent giving an address */
345 * Find the device first to size check it
348 saddr
->spkt_device
[13] = 0;
349 dev
= dev_get_by_name(saddr
->spkt_device
);
355 if (!(dev
->flags
& IFF_UP
))
359 * You may not queue a frame bigger than the mtu. This is the lowest level
360 * raw protocol and you must do your own fragmentation at this level.
364 if (len
> dev
->mtu
+ dev
->hard_header_len
)
368 skb
= sock_wmalloc(sk
, len
+ LL_RESERVED_SPACE(dev
), 0, GFP_KERNEL
);
371 * If the write buffer is full, then tough. At this level the user gets to
372 * deal with the problem - do your own algorithmic backoffs. That's far
383 /* FIXME: Save some space for broken drivers that write a
384 * hard header at transmission time by themselves. PPP is the
385 * notable one here. This should really be fixed at the driver level.
387 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
388 skb_reset_network_header(skb
);
390 /* Try to align data part correctly */
391 if (dev
->hard_header
) {
392 skb
->data
-= dev
->hard_header_len
;
393 skb
->tail
-= dev
->hard_header_len
;
394 if (len
< dev
->hard_header_len
)
395 skb_reset_network_header(skb
);
398 /* Returns -EFAULT on error */
399 err
= memcpy_fromiovec(skb_put(skb
,len
), msg
->msg_iov
, len
);
400 skb
->protocol
= proto
;
402 skb
->priority
= sk
->sk_priority
;
423 static inline unsigned int run_filter(struct sk_buff
*skb
, struct sock
*sk
,
426 struct sk_filter
*filter
;
429 filter
= rcu_dereference(sk
->sk_filter
);
431 res
= sk_run_filter(skb
, filter
->insns
, filter
->len
);
432 rcu_read_unlock_bh();
438 This function makes lazy skb cloning in hope that most of packets
439 are discarded by BPF.
441 Note tricky part: we DO mangle shared skb! skb->data, skb->len
442 and skb->cb are mangled. It works because (and until) packets
443 falling here are owned by current CPU. Output packets are cloned
444 by dev_queue_xmit_nit(), input packets are processed by net_bh
445 sequencially, so that if we return skb to original state on exit,
446 we will not harm anyone.
449 static int packet_rcv(struct sk_buff
*skb
, struct net_device
*dev
, struct packet_type
*pt
, struct net_device
*orig_dev
)
452 struct sockaddr_ll
*sll
;
453 struct packet_sock
*po
;
454 u8
* skb_head
= skb
->data
;
455 int skb_len
= skb
->len
;
456 unsigned int snaplen
, res
;
458 if (skb
->pkt_type
== PACKET_LOOPBACK
)
461 sk
= pt
->af_packet_priv
;
466 if (dev
->hard_header
) {
467 /* The device has an explicit notion of ll header,
468 exported to higher levels.
470 Otherwise, the device hides datails of it frame
471 structure, so that corresponding packet head
472 never delivered to user.
474 if (sk
->sk_type
!= SOCK_DGRAM
)
475 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
476 else if (skb
->pkt_type
== PACKET_OUTGOING
) {
477 /* Special case: outgoing packets have ll header at head */
478 skb_pull(skb
, skb_network_offset(skb
));
484 res
= run_filter(skb
, sk
, snaplen
);
490 if (atomic_read(&sk
->sk_rmem_alloc
) + skb
->truesize
>=
491 (unsigned)sk
->sk_rcvbuf
)
494 if (skb_shared(skb
)) {
495 struct sk_buff
*nskb
= skb_clone(skb
, GFP_ATOMIC
);
499 if (skb_head
!= skb
->data
) {
500 skb
->data
= skb_head
;
507 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb
)) + MAX_ADDR_LEN
- 8 >
510 sll
= &PACKET_SKB_CB(skb
)->sa
.ll
;
511 sll
->sll_family
= AF_PACKET
;
512 sll
->sll_hatype
= dev
->type
;
513 sll
->sll_protocol
= skb
->protocol
;
514 sll
->sll_pkttype
= skb
->pkt_type
;
515 if (unlikely(po
->origdev
) && skb
->pkt_type
== PACKET_HOST
)
516 sll
->sll_ifindex
= orig_dev
->ifindex
;
518 sll
->sll_ifindex
= dev
->ifindex
;
521 if (dev
->hard_header_parse
)
522 sll
->sll_halen
= dev
->hard_header_parse(skb
, sll
->sll_addr
);
524 PACKET_SKB_CB(skb
)->origlen
= skb
->len
;
526 if (pskb_trim(skb
, snaplen
))
529 skb_set_owner_r(skb
, sk
);
531 dst_release(skb
->dst
);
534 /* drop conntrack reference */
537 spin_lock(&sk
->sk_receive_queue
.lock
);
538 po
->stats
.tp_packets
++;
539 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
540 spin_unlock(&sk
->sk_receive_queue
.lock
);
541 sk
->sk_data_ready(sk
, skb
->len
);
545 spin_lock(&sk
->sk_receive_queue
.lock
);
546 po
->stats
.tp_drops
++;
547 spin_unlock(&sk
->sk_receive_queue
.lock
);
550 if (skb_head
!= skb
->data
&& skb_shared(skb
)) {
551 skb
->data
= skb_head
;
559 #ifdef CONFIG_PACKET_MMAP
560 static int tpacket_rcv(struct sk_buff
*skb
, struct net_device
*dev
, struct packet_type
*pt
, struct net_device
*orig_dev
)
563 struct packet_sock
*po
;
564 struct sockaddr_ll
*sll
;
565 struct tpacket_hdr
*h
;
566 u8
* skb_head
= skb
->data
;
567 int skb_len
= skb
->len
;
568 unsigned int snaplen
, res
;
569 unsigned long status
= TP_STATUS_LOSING
|TP_STATUS_USER
;
570 unsigned short macoff
, netoff
;
571 struct sk_buff
*copy_skb
= NULL
;
574 if (skb
->pkt_type
== PACKET_LOOPBACK
)
577 sk
= pt
->af_packet_priv
;
580 if (dev
->hard_header
) {
581 if (sk
->sk_type
!= SOCK_DGRAM
)
582 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
583 else if (skb
->pkt_type
== PACKET_OUTGOING
) {
584 /* Special case: outgoing packets have ll header at head */
585 skb_pull(skb
, skb_network_offset(skb
));
589 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
590 status
|= TP_STATUS_CSUMNOTREADY
;
594 res
= run_filter(skb
, sk
, snaplen
);
600 if (sk
->sk_type
== SOCK_DGRAM
) {
601 macoff
= netoff
= TPACKET_ALIGN(TPACKET_HDRLEN
) + 16;
603 unsigned maclen
= skb_network_offset(skb
);
604 netoff
= TPACKET_ALIGN(TPACKET_HDRLEN
+ (maclen
< 16 ? 16 : maclen
));
605 macoff
= netoff
- maclen
;
608 if (macoff
+ snaplen
> po
->frame_size
) {
609 if (po
->copy_thresh
&&
610 atomic_read(&sk
->sk_rmem_alloc
) + skb
->truesize
<
611 (unsigned)sk
->sk_rcvbuf
) {
612 if (skb_shared(skb
)) {
613 copy_skb
= skb_clone(skb
, GFP_ATOMIC
);
615 copy_skb
= skb_get(skb
);
616 skb_head
= skb
->data
;
619 skb_set_owner_r(copy_skb
, sk
);
621 snaplen
= po
->frame_size
- macoff
;
622 if ((int)snaplen
< 0)
626 spin_lock(&sk
->sk_receive_queue
.lock
);
627 h
= packet_lookup_frame(po
, po
->head
);
631 po
->head
= po
->head
!= po
->frame_max
? po
->head
+1 : 0;
632 po
->stats
.tp_packets
++;
634 status
|= TP_STATUS_COPY
;
635 __skb_queue_tail(&sk
->sk_receive_queue
, copy_skb
);
637 if (!po
->stats
.tp_drops
)
638 status
&= ~TP_STATUS_LOSING
;
639 spin_unlock(&sk
->sk_receive_queue
.lock
);
641 skb_copy_bits(skb
, 0, (u8
*)h
+ macoff
, snaplen
);
643 h
->tp_len
= skb
->len
;
644 h
->tp_snaplen
= snaplen
;
647 if (skb
->tstamp
.tv64
== 0) {
648 __net_timestamp(skb
);
649 sock_enable_timestamp(sk
);
651 tv
= ktime_to_timeval(skb
->tstamp
);
652 h
->tp_sec
= tv
.tv_sec
;
653 h
->tp_usec
= tv
.tv_usec
;
655 sll
= (struct sockaddr_ll
*)((u8
*)h
+ TPACKET_ALIGN(sizeof(*h
)));
657 if (dev
->hard_header_parse
)
658 sll
->sll_halen
= dev
->hard_header_parse(skb
, sll
->sll_addr
);
659 sll
->sll_family
= AF_PACKET
;
660 sll
->sll_hatype
= dev
->type
;
661 sll
->sll_protocol
= skb
->protocol
;
662 sll
->sll_pkttype
= skb
->pkt_type
;
663 if (unlikely(po
->origdev
) && skb
->pkt_type
== PACKET_HOST
)
664 sll
->sll_ifindex
= orig_dev
->ifindex
;
666 sll
->sll_ifindex
= dev
->ifindex
;
668 h
->tp_status
= status
;
672 struct page
*p_start
, *p_end
;
673 u8
*h_end
= (u8
*)h
+ macoff
+ snaplen
- 1;
675 p_start
= virt_to_page(h
);
676 p_end
= virt_to_page(h_end
);
677 while (p_start
<= p_end
) {
678 flush_dcache_page(p_start
);
683 sk
->sk_data_ready(sk
, 0);
686 if (skb_head
!= skb
->data
&& skb_shared(skb
)) {
687 skb
->data
= skb_head
;
695 po
->stats
.tp_drops
++;
696 spin_unlock(&sk
->sk_receive_queue
.lock
);
698 sk
->sk_data_ready(sk
, 0);
707 static int packet_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
708 struct msghdr
*msg
, size_t len
)
710 struct sock
*sk
= sock
->sk
;
711 struct sockaddr_ll
*saddr
=(struct sockaddr_ll
*)msg
->msg_name
;
713 struct net_device
*dev
;
716 int ifindex
, err
, reserve
= 0;
719 * Get and verify the address.
723 struct packet_sock
*po
= pkt_sk(sk
);
725 ifindex
= po
->ifindex
;
730 if (msg
->msg_namelen
< sizeof(struct sockaddr_ll
))
732 if (msg
->msg_namelen
< (saddr
->sll_halen
+ offsetof(struct sockaddr_ll
, sll_addr
)))
734 ifindex
= saddr
->sll_ifindex
;
735 proto
= saddr
->sll_protocol
;
736 addr
= saddr
->sll_addr
;
740 dev
= dev_get_by_index(ifindex
);
744 if (sock
->type
== SOCK_RAW
)
745 reserve
= dev
->hard_header_len
;
748 if (!(dev
->flags
& IFF_UP
))
752 if (len
> dev
->mtu
+reserve
)
755 skb
= sock_alloc_send_skb(sk
, len
+ LL_RESERVED_SPACE(dev
),
756 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
760 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
761 skb_reset_network_header(skb
);
763 if (dev
->hard_header
) {
766 res
= dev
->hard_header(skb
, dev
, ntohs(proto
), addr
, NULL
, len
);
767 if (sock
->type
!= SOCK_DGRAM
) {
768 skb_reset_tail_pointer(skb
);
774 /* Returns -EFAULT on error */
775 err
= memcpy_fromiovec(skb_put(skb
,len
), msg
->msg_iov
, len
);
779 skb
->protocol
= proto
;
781 skb
->priority
= sk
->sk_priority
;
787 err
= dev_queue_xmit(skb
);
788 if (err
> 0 && (err
= net_xmit_errno(err
)) != 0)
805 * Close a PACKET socket. This is fairly simple. We immediately go
806 * to 'closed' state and remove our protocol entry in the device list.
809 static int packet_release(struct socket
*sock
)
811 struct sock
*sk
= sock
->sk
;
812 struct packet_sock
*po
;
819 write_lock_bh(&packet_sklist_lock
);
820 sk_del_node_init(sk
);
821 write_unlock_bh(&packet_sklist_lock
);
824 * Unhook packet receive handler.
829 * Remove the protocol hook
831 dev_remove_pack(&po
->prot_hook
);
837 packet_flush_mclist(sk
);
839 #ifdef CONFIG_PACKET_MMAP
841 struct tpacket_req req
;
842 memset(&req
, 0, sizeof(req
));
843 packet_set_ring(sk
, &req
, 1);
848 * Now the socket is dead. No more input will appear.
856 skb_queue_purge(&sk
->sk_receive_queue
);
863 * Attach a packet hook.
866 static int packet_do_bind(struct sock
*sk
, struct net_device
*dev
, __be16 protocol
)
868 struct packet_sock
*po
= pkt_sk(sk
);
870 * Detach an existing hook if present.
875 spin_lock(&po
->bind_lock
);
880 spin_unlock(&po
->bind_lock
);
881 dev_remove_pack(&po
->prot_hook
);
882 spin_lock(&po
->bind_lock
);
886 po
->prot_hook
.type
= protocol
;
887 po
->prot_hook
.dev
= dev
;
889 po
->ifindex
= dev
? dev
->ifindex
: 0;
895 if (dev
->flags
&IFF_UP
) {
896 dev_add_pack(&po
->prot_hook
);
900 sk
->sk_err
= ENETDOWN
;
901 if (!sock_flag(sk
, SOCK_DEAD
))
902 sk
->sk_error_report(sk
);
905 dev_add_pack(&po
->prot_hook
);
911 spin_unlock(&po
->bind_lock
);
917 * Bind a packet socket to a device
920 #ifdef CONFIG_SOCK_PACKET
922 static int packet_bind_spkt(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
924 struct sock
*sk
=sock
->sk
;
926 struct net_device
*dev
;
933 if (addr_len
!= sizeof(struct sockaddr
))
935 strlcpy(name
,uaddr
->sa_data
,sizeof(name
));
937 dev
= dev_get_by_name(name
);
939 err
= packet_do_bind(sk
, dev
, pkt_sk(sk
)->num
);
946 static int packet_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
948 struct sockaddr_ll
*sll
= (struct sockaddr_ll
*)uaddr
;
949 struct sock
*sk
=sock
->sk
;
950 struct net_device
*dev
= NULL
;
958 if (addr_len
< sizeof(struct sockaddr_ll
))
960 if (sll
->sll_family
!= AF_PACKET
)
963 if (sll
->sll_ifindex
) {
965 dev
= dev_get_by_index(sll
->sll_ifindex
);
969 err
= packet_do_bind(sk
, dev
, sll
->sll_protocol
? : pkt_sk(sk
)->num
);
977 static struct proto packet_proto
= {
979 .owner
= THIS_MODULE
,
980 .obj_size
= sizeof(struct packet_sock
),
984 * Create a packet of type SOCK_PACKET.
987 static int packet_create(struct socket
*sock
, int protocol
)
990 struct packet_sock
*po
;
991 __be16 proto
= (__force __be16
)protocol
; /* weird, but documented */
994 if (!capable(CAP_NET_RAW
))
996 if (sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
997 #ifdef CONFIG_SOCK_PACKET
998 && sock
->type
!= SOCK_PACKET
1001 return -ESOCKTNOSUPPORT
;
1003 sock
->state
= SS_UNCONNECTED
;
1006 sk
= sk_alloc(PF_PACKET
, GFP_KERNEL
, &packet_proto
, 1);
1010 sock
->ops
= &packet_ops
;
1011 #ifdef CONFIG_SOCK_PACKET
1012 if (sock
->type
== SOCK_PACKET
)
1013 sock
->ops
= &packet_ops_spkt
;
1015 sock_init_data(sock
, sk
);
1018 sk
->sk_family
= PF_PACKET
;
1021 sk
->sk_destruct
= packet_sock_destruct
;
1022 atomic_inc(&packet_socks_nr
);
1025 * Attach a protocol block
1028 spin_lock_init(&po
->bind_lock
);
1029 po
->prot_hook
.func
= packet_rcv
;
1030 #ifdef CONFIG_SOCK_PACKET
1031 if (sock
->type
== SOCK_PACKET
)
1032 po
->prot_hook
.func
= packet_rcv_spkt
;
1034 po
->prot_hook
.af_packet_priv
= sk
;
1037 po
->prot_hook
.type
= proto
;
1038 dev_add_pack(&po
->prot_hook
);
1043 write_lock_bh(&packet_sklist_lock
);
1044 sk_add_node(sk
, &packet_sklist
);
1045 write_unlock_bh(&packet_sklist_lock
);
1052 * Pull a packet from our receive queue and hand it to the user.
1053 * If necessary we block.
1056 static int packet_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1057 struct msghdr
*msg
, size_t len
, int flags
)
1059 struct sock
*sk
= sock
->sk
;
1060 struct sk_buff
*skb
;
1062 struct sockaddr_ll
*sll
;
1065 if (flags
& ~(MSG_PEEK
|MSG_DONTWAIT
|MSG_TRUNC
|MSG_CMSG_COMPAT
))
1069 /* What error should we return now? EUNATTACH? */
1070 if (pkt_sk(sk
)->ifindex
< 0)
1075 * Call the generic datagram receiver. This handles all sorts
1076 * of horrible races and re-entrancy so we can forget about it
1077 * in the protocol layers.
1079 * Now it will return ENETDOWN, if device have just gone down,
1080 * but then it will block.
1083 skb
=skb_recv_datagram(sk
,flags
,flags
&MSG_DONTWAIT
,&err
);
1086 * An error occurred so return it. Because skb_recv_datagram()
1087 * handles the blocking we don't see and worry about blocking
1095 * If the address length field is there to be filled in, we fill
1099 sll
= &PACKET_SKB_CB(skb
)->sa
.ll
;
1100 if (sock
->type
== SOCK_PACKET
)
1101 msg
->msg_namelen
= sizeof(struct sockaddr_pkt
);
1103 msg
->msg_namelen
= sll
->sll_halen
+ offsetof(struct sockaddr_ll
, sll_addr
);
1106 * You lose any data beyond the buffer you gave. If it worries a
1107 * user program they can ask the device for its MTU anyway.
1114 msg
->msg_flags
|=MSG_TRUNC
;
1117 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
1121 sock_recv_timestamp(msg
, sk
, skb
);
1124 memcpy(msg
->msg_name
, &PACKET_SKB_CB(skb
)->sa
,
1127 if (pkt_sk(sk
)->auxdata
) {
1128 struct tpacket_auxdata aux
;
1130 aux
.tp_status
= TP_STATUS_USER
;
1131 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1132 aux
.tp_status
|= TP_STATUS_CSUMNOTREADY
;
1133 aux
.tp_len
= PACKET_SKB_CB(skb
)->origlen
;
1134 aux
.tp_snaplen
= skb
->len
;
1136 aux
.tp_net
= skb_network_offset(skb
);
1138 put_cmsg(msg
, SOL_PACKET
, PACKET_AUXDATA
, sizeof(aux
), &aux
);
1142 * Free or return the buffer as appropriate. Again this
1143 * hides all the races and re-entrancy issues from us.
1145 err
= (flags
&MSG_TRUNC
) ? skb
->len
: copied
;
1148 skb_free_datagram(sk
, skb
);
1153 #ifdef CONFIG_SOCK_PACKET
1154 static int packet_getname_spkt(struct socket
*sock
, struct sockaddr
*uaddr
,
1155 int *uaddr_len
, int peer
)
1157 struct net_device
*dev
;
1158 struct sock
*sk
= sock
->sk
;
1163 uaddr
->sa_family
= AF_PACKET
;
1164 dev
= dev_get_by_index(pkt_sk(sk
)->ifindex
);
1166 strlcpy(uaddr
->sa_data
, dev
->name
, 15);
1169 memset(uaddr
->sa_data
, 0, 14);
1170 *uaddr_len
= sizeof(*uaddr
);
1176 static int packet_getname(struct socket
*sock
, struct sockaddr
*uaddr
,
1177 int *uaddr_len
, int peer
)
1179 struct net_device
*dev
;
1180 struct sock
*sk
= sock
->sk
;
1181 struct packet_sock
*po
= pkt_sk(sk
);
1182 struct sockaddr_ll
*sll
= (struct sockaddr_ll
*)uaddr
;
1187 sll
->sll_family
= AF_PACKET
;
1188 sll
->sll_ifindex
= po
->ifindex
;
1189 sll
->sll_protocol
= po
->num
;
1190 dev
= dev_get_by_index(po
->ifindex
);
1192 sll
->sll_hatype
= dev
->type
;
1193 sll
->sll_halen
= dev
->addr_len
;
1194 memcpy(sll
->sll_addr
, dev
->dev_addr
, dev
->addr_len
);
1197 sll
->sll_hatype
= 0; /* Bad: we have no ARPHRD_UNSPEC */
1200 *uaddr_len
= offsetof(struct sockaddr_ll
, sll_addr
) + sll
->sll_halen
;
1205 static void packet_dev_mc(struct net_device
*dev
, struct packet_mclist
*i
, int what
)
1208 case PACKET_MR_MULTICAST
:
1210 dev_mc_add(dev
, i
->addr
, i
->alen
, 0);
1212 dev_mc_delete(dev
, i
->addr
, i
->alen
, 0);
1214 case PACKET_MR_PROMISC
:
1215 dev_set_promiscuity(dev
, what
);
1217 case PACKET_MR_ALLMULTI
:
1218 dev_set_allmulti(dev
, what
);
1224 static void packet_dev_mclist(struct net_device
*dev
, struct packet_mclist
*i
, int what
)
1226 for ( ; i
; i
=i
->next
) {
1227 if (i
->ifindex
== dev
->ifindex
)
1228 packet_dev_mc(dev
, i
, what
);
1232 static int packet_mc_add(struct sock
*sk
, struct packet_mreq_max
*mreq
)
1234 struct packet_sock
*po
= pkt_sk(sk
);
1235 struct packet_mclist
*ml
, *i
;
1236 struct net_device
*dev
;
1242 dev
= __dev_get_by_index(mreq
->mr_ifindex
);
1247 if (mreq
->mr_alen
> dev
->addr_len
)
1251 i
= kmalloc(sizeof(*i
), GFP_KERNEL
);
1256 for (ml
= po
->mclist
; ml
; ml
= ml
->next
) {
1257 if (ml
->ifindex
== mreq
->mr_ifindex
&&
1258 ml
->type
== mreq
->mr_type
&&
1259 ml
->alen
== mreq
->mr_alen
&&
1260 memcmp(ml
->addr
, mreq
->mr_address
, ml
->alen
) == 0) {
1262 /* Free the new element ... */
1268 i
->type
= mreq
->mr_type
;
1269 i
->ifindex
= mreq
->mr_ifindex
;
1270 i
->alen
= mreq
->mr_alen
;
1271 memcpy(i
->addr
, mreq
->mr_address
, i
->alen
);
1273 i
->next
= po
->mclist
;
1275 packet_dev_mc(dev
, i
, +1);
1282 static int packet_mc_drop(struct sock
*sk
, struct packet_mreq_max
*mreq
)
1284 struct packet_mclist
*ml
, **mlp
;
1288 for (mlp
= &pkt_sk(sk
)->mclist
; (ml
= *mlp
) != NULL
; mlp
= &ml
->next
) {
1289 if (ml
->ifindex
== mreq
->mr_ifindex
&&
1290 ml
->type
== mreq
->mr_type
&&
1291 ml
->alen
== mreq
->mr_alen
&&
1292 memcmp(ml
->addr
, mreq
->mr_address
, ml
->alen
) == 0) {
1293 if (--ml
->count
== 0) {
1294 struct net_device
*dev
;
1296 dev
= dev_get_by_index(ml
->ifindex
);
1298 packet_dev_mc(dev
, ml
, -1);
1308 return -EADDRNOTAVAIL
;
1311 static void packet_flush_mclist(struct sock
*sk
)
1313 struct packet_sock
*po
= pkt_sk(sk
);
1314 struct packet_mclist
*ml
;
1320 while ((ml
= po
->mclist
) != NULL
) {
1321 struct net_device
*dev
;
1323 po
->mclist
= ml
->next
;
1324 if ((dev
= dev_get_by_index(ml
->ifindex
)) != NULL
) {
1325 packet_dev_mc(dev
, ml
, -1);
1334 packet_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int optlen
)
1336 struct sock
*sk
= sock
->sk
;
1337 struct packet_sock
*po
= pkt_sk(sk
);
1340 if (level
!= SOL_PACKET
)
1341 return -ENOPROTOOPT
;
1344 case PACKET_ADD_MEMBERSHIP
:
1345 case PACKET_DROP_MEMBERSHIP
:
1347 struct packet_mreq_max mreq
;
1349 memset(&mreq
, 0, sizeof(mreq
));
1350 if (len
< sizeof(struct packet_mreq
))
1352 if (len
> sizeof(mreq
))
1354 if (copy_from_user(&mreq
,optval
,len
))
1356 if (len
< (mreq
.mr_alen
+ offsetof(struct packet_mreq
, mr_address
)))
1358 if (optname
== PACKET_ADD_MEMBERSHIP
)
1359 ret
= packet_mc_add(sk
, &mreq
);
1361 ret
= packet_mc_drop(sk
, &mreq
);
1365 #ifdef CONFIG_PACKET_MMAP
1366 case PACKET_RX_RING
:
1368 struct tpacket_req req
;
1370 if (optlen
<sizeof(req
))
1372 if (copy_from_user(&req
,optval
,sizeof(req
)))
1374 return packet_set_ring(sk
, &req
, 0);
1376 case PACKET_COPY_THRESH
:
1380 if (optlen
!=sizeof(val
))
1382 if (copy_from_user(&val
,optval
,sizeof(val
)))
1385 pkt_sk(sk
)->copy_thresh
= val
;
1389 case PACKET_AUXDATA
:
1393 if (optlen
< sizeof(val
))
1395 if (copy_from_user(&val
, optval
, sizeof(val
)))
1398 po
->auxdata
= !!val
;
1401 case PACKET_ORIGDEV
:
1405 if (optlen
< sizeof(val
))
1407 if (copy_from_user(&val
, optval
, sizeof(val
)))
1410 po
->origdev
= !!val
;
1414 return -ENOPROTOOPT
;
1418 static int packet_getsockopt(struct socket
*sock
, int level
, int optname
,
1419 char __user
*optval
, int __user
*optlen
)
1423 struct sock
*sk
= sock
->sk
;
1424 struct packet_sock
*po
= pkt_sk(sk
);
1426 struct tpacket_stats st
;
1428 if (level
!= SOL_PACKET
)
1429 return -ENOPROTOOPT
;
1431 if (get_user(len
, optlen
))
1438 case PACKET_STATISTICS
:
1439 if (len
> sizeof(struct tpacket_stats
))
1440 len
= sizeof(struct tpacket_stats
);
1441 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1443 memset(&po
->stats
, 0, sizeof(st
));
1444 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1445 st
.tp_packets
+= st
.tp_drops
;
1449 case PACKET_AUXDATA
:
1450 if (len
> sizeof(int))
1456 case PACKET_ORIGDEV
:
1457 if (len
> sizeof(int))
1464 return -ENOPROTOOPT
;
1467 if (put_user(len
, optlen
))
1469 if (copy_to_user(optval
, data
, len
))
1475 static int packet_notifier(struct notifier_block
*this, unsigned long msg
, void *data
)
1478 struct hlist_node
*node
;
1479 struct net_device
*dev
= data
;
1481 read_lock(&packet_sklist_lock
);
1482 sk_for_each(sk
, node
, &packet_sklist
) {
1483 struct packet_sock
*po
= pkt_sk(sk
);
1486 case NETDEV_UNREGISTER
:
1488 packet_dev_mclist(dev
, po
->mclist
, -1);
1492 if (dev
->ifindex
== po
->ifindex
) {
1493 spin_lock(&po
->bind_lock
);
1495 __dev_remove_pack(&po
->prot_hook
);
1498 sk
->sk_err
= ENETDOWN
;
1499 if (!sock_flag(sk
, SOCK_DEAD
))
1500 sk
->sk_error_report(sk
);
1502 if (msg
== NETDEV_UNREGISTER
) {
1504 po
->prot_hook
.dev
= NULL
;
1506 spin_unlock(&po
->bind_lock
);
1510 spin_lock(&po
->bind_lock
);
1511 if (dev
->ifindex
== po
->ifindex
&& po
->num
&&
1513 dev_add_pack(&po
->prot_hook
);
1517 spin_unlock(&po
->bind_lock
);
1521 read_unlock(&packet_sklist_lock
);
1526 static int packet_ioctl(struct socket
*sock
, unsigned int cmd
,
1529 struct sock
*sk
= sock
->sk
;
1534 int amount
= atomic_read(&sk
->sk_wmem_alloc
);
1535 return put_user(amount
, (int __user
*)arg
);
1539 struct sk_buff
*skb
;
1542 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1543 skb
= skb_peek(&sk
->sk_receive_queue
);
1546 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1547 return put_user(amount
, (int __user
*)arg
);
1550 return sock_get_timestamp(sk
, (struct timeval __user
*)arg
);
1552 return sock_get_timestampns(sk
, (struct timespec __user
*)arg
);
1562 case SIOCGIFBRDADDR
:
1563 case SIOCSIFBRDADDR
:
1564 case SIOCGIFNETMASK
:
1565 case SIOCSIFNETMASK
:
1566 case SIOCGIFDSTADDR
:
1567 case SIOCSIFDSTADDR
:
1569 return inet_dgram_ops
.ioctl(sock
, cmd
, arg
);
1573 return -ENOIOCTLCMD
;
1578 #ifndef CONFIG_PACKET_MMAP
1579 #define packet_mmap sock_no_mmap
1580 #define packet_poll datagram_poll
1583 static unsigned int packet_poll(struct file
* file
, struct socket
*sock
,
1586 struct sock
*sk
= sock
->sk
;
1587 struct packet_sock
*po
= pkt_sk(sk
);
1588 unsigned int mask
= datagram_poll(file
, sock
, wait
);
1590 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1592 unsigned last
= po
->head
? po
->head
-1 : po
->frame_max
;
1593 struct tpacket_hdr
*h
;
1595 h
= packet_lookup_frame(po
, last
);
1598 mask
|= POLLIN
| POLLRDNORM
;
1600 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1605 /* Dirty? Well, I still did not learn better way to account
1609 static void packet_mm_open(struct vm_area_struct
*vma
)
1611 struct file
*file
= vma
->vm_file
;
1612 struct socket
* sock
= file
->private_data
;
1613 struct sock
*sk
= sock
->sk
;
1616 atomic_inc(&pkt_sk(sk
)->mapped
);
1619 static void packet_mm_close(struct vm_area_struct
*vma
)
1621 struct file
*file
= vma
->vm_file
;
1622 struct socket
* sock
= file
->private_data
;
1623 struct sock
*sk
= sock
->sk
;
1626 atomic_dec(&pkt_sk(sk
)->mapped
);
1629 static struct vm_operations_struct packet_mmap_ops
= {
1630 .open
= packet_mm_open
,
1631 .close
=packet_mm_close
,
1634 static inline struct page
*pg_vec_endpage(char *one_pg_vec
, unsigned int order
)
1636 return virt_to_page(one_pg_vec
+ (PAGE_SIZE
<< order
) - 1);
1639 static void free_pg_vec(char **pg_vec
, unsigned int order
, unsigned int len
)
1643 for (i
= 0; i
< len
; i
++) {
1644 if (likely(pg_vec
[i
]))
1645 free_pages((unsigned long) pg_vec
[i
], order
);
1650 static inline char *alloc_one_pg_vec_page(unsigned long order
)
1652 return (char *) __get_free_pages(GFP_KERNEL
| __GFP_COMP
| __GFP_ZERO
,
1656 static char **alloc_pg_vec(struct tpacket_req
*req
, int order
)
1658 unsigned int block_nr
= req
->tp_block_nr
;
1662 pg_vec
= kzalloc(block_nr
* sizeof(char *), GFP_KERNEL
);
1663 if (unlikely(!pg_vec
))
1666 for (i
= 0; i
< block_nr
; i
++) {
1667 pg_vec
[i
] = alloc_one_pg_vec_page(order
);
1668 if (unlikely(!pg_vec
[i
]))
1669 goto out_free_pgvec
;
1676 free_pg_vec(pg_vec
, order
, block_nr
);
1681 static int packet_set_ring(struct sock
*sk
, struct tpacket_req
*req
, int closing
)
1683 char **pg_vec
= NULL
;
1684 struct packet_sock
*po
= pkt_sk(sk
);
1685 int was_running
, order
= 0;
1689 if (req
->tp_block_nr
) {
1692 /* Sanity tests and some calculations */
1694 if (unlikely(po
->pg_vec
))
1697 if (unlikely((int)req
->tp_block_size
<= 0))
1699 if (unlikely(req
->tp_block_size
& (PAGE_SIZE
- 1)))
1701 if (unlikely(req
->tp_frame_size
< TPACKET_HDRLEN
))
1703 if (unlikely(req
->tp_frame_size
& (TPACKET_ALIGNMENT
- 1)))
1706 po
->frames_per_block
= req
->tp_block_size
/req
->tp_frame_size
;
1707 if (unlikely(po
->frames_per_block
<= 0))
1709 if (unlikely((po
->frames_per_block
* req
->tp_block_nr
) !=
1714 order
= get_order(req
->tp_block_size
);
1715 pg_vec
= alloc_pg_vec(req
, order
);
1716 if (unlikely(!pg_vec
))
1720 for (i
= 0; i
< req
->tp_block_nr
; i
++) {
1721 char *ptr
= pg_vec
[i
];
1722 struct tpacket_hdr
*header
;
1725 for (k
= 0; k
< po
->frames_per_block
; k
++) {
1726 header
= (struct tpacket_hdr
*) ptr
;
1727 header
->tp_status
= TP_STATUS_KERNEL
;
1728 ptr
+= req
->tp_frame_size
;
1733 if (unlikely(req
->tp_frame_nr
))
1739 /* Detach socket from network */
1740 spin_lock(&po
->bind_lock
);
1741 was_running
= po
->running
;
1744 __dev_remove_pack(&po
->prot_hook
);
1749 spin_unlock(&po
->bind_lock
);
1754 if (closing
|| atomic_read(&po
->mapped
) == 0) {
1756 #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
1758 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1759 pg_vec
= XC(po
->pg_vec
, pg_vec
);
1760 po
->frame_max
= (req
->tp_frame_nr
- 1);
1762 po
->frame_size
= req
->tp_frame_size
;
1763 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1765 order
= XC(po
->pg_vec_order
, order
);
1766 req
->tp_block_nr
= XC(po
->pg_vec_len
, req
->tp_block_nr
);
1768 po
->pg_vec_pages
= req
->tp_block_size
/PAGE_SIZE
;
1769 po
->prot_hook
.func
= po
->pg_vec
? tpacket_rcv
: packet_rcv
;
1770 skb_queue_purge(&sk
->sk_receive_queue
);
1772 if (atomic_read(&po
->mapped
))
1773 printk(KERN_DEBUG
"packet_mmap: vma is busy: %d\n", atomic_read(&po
->mapped
));
1776 spin_lock(&po
->bind_lock
);
1777 if (was_running
&& !po
->running
) {
1781 dev_add_pack(&po
->prot_hook
);
1783 spin_unlock(&po
->bind_lock
);
1788 free_pg_vec(pg_vec
, order
, req
->tp_block_nr
);
1793 static int packet_mmap(struct file
*file
, struct socket
*sock
, struct vm_area_struct
*vma
)
1795 struct sock
*sk
= sock
->sk
;
1796 struct packet_sock
*po
= pkt_sk(sk
);
1798 unsigned long start
;
1805 size
= vma
->vm_end
- vma
->vm_start
;
1808 if (po
->pg_vec
== NULL
)
1810 if (size
!= po
->pg_vec_len
*po
->pg_vec_pages
*PAGE_SIZE
)
1813 start
= vma
->vm_start
;
1814 for (i
= 0; i
< po
->pg_vec_len
; i
++) {
1815 struct page
*page
= virt_to_page(po
->pg_vec
[i
]);
1818 for (pg_num
= 0; pg_num
< po
->pg_vec_pages
; pg_num
++, page
++) {
1819 err
= vm_insert_page(vma
, start
, page
);
1825 atomic_inc(&po
->mapped
);
1826 vma
->vm_ops
= &packet_mmap_ops
;
1836 #ifdef CONFIG_SOCK_PACKET
1837 static const struct proto_ops packet_ops_spkt
= {
1838 .family
= PF_PACKET
,
1839 .owner
= THIS_MODULE
,
1840 .release
= packet_release
,
1841 .bind
= packet_bind_spkt
,
1842 .connect
= sock_no_connect
,
1843 .socketpair
= sock_no_socketpair
,
1844 .accept
= sock_no_accept
,
1845 .getname
= packet_getname_spkt
,
1846 .poll
= datagram_poll
,
1847 .ioctl
= packet_ioctl
,
1848 .listen
= sock_no_listen
,
1849 .shutdown
= sock_no_shutdown
,
1850 .setsockopt
= sock_no_setsockopt
,
1851 .getsockopt
= sock_no_getsockopt
,
1852 .sendmsg
= packet_sendmsg_spkt
,
1853 .recvmsg
= packet_recvmsg
,
1854 .mmap
= sock_no_mmap
,
1855 .sendpage
= sock_no_sendpage
,
1859 static const struct proto_ops packet_ops
= {
1860 .family
= PF_PACKET
,
1861 .owner
= THIS_MODULE
,
1862 .release
= packet_release
,
1863 .bind
= packet_bind
,
1864 .connect
= sock_no_connect
,
1865 .socketpair
= sock_no_socketpair
,
1866 .accept
= sock_no_accept
,
1867 .getname
= packet_getname
,
1868 .poll
= packet_poll
,
1869 .ioctl
= packet_ioctl
,
1870 .listen
= sock_no_listen
,
1871 .shutdown
= sock_no_shutdown
,
1872 .setsockopt
= packet_setsockopt
,
1873 .getsockopt
= packet_getsockopt
,
1874 .sendmsg
= packet_sendmsg
,
1875 .recvmsg
= packet_recvmsg
,
1876 .mmap
= packet_mmap
,
1877 .sendpage
= sock_no_sendpage
,
1880 static struct net_proto_family packet_family_ops
= {
1881 .family
= PF_PACKET
,
1882 .create
= packet_create
,
1883 .owner
= THIS_MODULE
,
1886 static struct notifier_block packet_netdev_notifier
= {
1887 .notifier_call
=packet_notifier
,
1890 #ifdef CONFIG_PROC_FS
1891 static inline struct sock
*packet_seq_idx(loff_t off
)
1894 struct hlist_node
*node
;
1896 sk_for_each(s
, node
, &packet_sklist
) {
1903 static void *packet_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1905 read_lock(&packet_sklist_lock
);
1906 return *pos
? packet_seq_idx(*pos
- 1) : SEQ_START_TOKEN
;
1909 static void *packet_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1912 return (v
== SEQ_START_TOKEN
)
1913 ? sk_head(&packet_sklist
)
1914 : sk_next((struct sock
*)v
) ;
1917 static void packet_seq_stop(struct seq_file
*seq
, void *v
)
1919 read_unlock(&packet_sklist_lock
);
1922 static int packet_seq_show(struct seq_file
*seq
, void *v
)
1924 if (v
== SEQ_START_TOKEN
)
1925 seq_puts(seq
, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
1928 const struct packet_sock
*po
= pkt_sk(s
);
1931 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
1933 atomic_read(&s
->sk_refcnt
),
1938 atomic_read(&s
->sk_rmem_alloc
),
1946 static struct seq_operations packet_seq_ops
= {
1947 .start
= packet_seq_start
,
1948 .next
= packet_seq_next
,
1949 .stop
= packet_seq_stop
,
1950 .show
= packet_seq_show
,
1953 static int packet_seq_open(struct inode
*inode
, struct file
*file
)
1955 return seq_open(file
, &packet_seq_ops
);
1958 static const struct file_operations packet_seq_fops
= {
1959 .owner
= THIS_MODULE
,
1960 .open
= packet_seq_open
,
1962 .llseek
= seq_lseek
,
1963 .release
= seq_release
,
1968 static void __exit
packet_exit(void)
1970 proc_net_remove("packet");
1971 unregister_netdevice_notifier(&packet_netdev_notifier
);
1972 sock_unregister(PF_PACKET
);
1973 proto_unregister(&packet_proto
);
1976 static int __init
packet_init(void)
1978 int rc
= proto_register(&packet_proto
, 0);
1983 sock_register(&packet_family_ops
);
1984 register_netdevice_notifier(&packet_netdev_notifier
);
1985 proc_net_fops_create("packet", 0, &packet_seq_fops
);
1990 module_init(packet_init
);
1991 module_exit(packet_exit
);
1992 MODULE_LICENSE("GPL");
1993 MODULE_ALIAS_NETPROTO(PF_PACKET
);