2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
42 * Johann Baudy : Added TX RING.
43 * Chetan Loke : Implemented TPACKET_V3 block abstraction
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
55 #include <linux/types.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <linux/uaccess.h>
77 #include <asm/ioctls.h>
79 #include <asm/cacheflush.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/poll.h>
84 #include <linux/module.h>
85 #include <linux/init.h>
86 #include <linux/mutex.h>
87 #include <linux/if_vlan.h>
88 #include <linux/virtio_net.h>
89 #include <linux/errqueue.h>
90 #include <linux/net_tstamp.h>
91 #include <linux/percpu.h>
93 #include <net/inet_common.h>
95 #include <linux/bpf.h>
96 #include <net/compat.h>
102 - if device has no dev->hard_header routine, it adds and removes ll header
103 inside itself. In this case ll header is invisible outside of device,
104 but higher levels still should reserve dev->hard_header_len.
105 Some devices are enough clever to reallocate skb, when header
106 will not fit to reserved space (tunnel), another ones are silly
108 - packet socket receives packets with pulled ll header,
109 so that SOCK_RAW should push it back.
114 Incoming, dev->hard_header!=NULL
115 mac_header -> ll header
118 Outgoing, dev->hard_header!=NULL
119 mac_header -> ll header
122 Incoming, dev->hard_header==NULL
123 mac_header -> UNKNOWN position. It is very likely, that it points to ll
124 header. PPP makes it, that is wrong, because introduce
125 assymetry between rx and tx paths.
128 Outgoing, dev->hard_header==NULL
129 mac_header -> data. ll header is still not built!
133 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
139 dev->hard_header != NULL
140 mac_header -> ll header
143 dev->hard_header == NULL (ll header is added by device, we cannot control it)
147 We should set nh.raw on output to correct posistion,
148 packet classifier depends on it.
151 /* Private packet socket structures. */
153 /* identical to struct packet_mreq except it has
154 * a longer address field.
156 struct packet_mreq_max
{
158 unsigned short mr_type
;
159 unsigned short mr_alen
;
160 unsigned char mr_address
[MAX_ADDR_LEN
];
164 struct tpacket_hdr
*h1
;
165 struct tpacket2_hdr
*h2
;
166 struct tpacket3_hdr
*h3
;
170 static int packet_set_ring(struct sock
*sk
, union tpacket_req_u
*req_u
,
171 int closing
, int tx_ring
);
173 #define V3_ALIGNMENT (8)
175 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
177 #define BLK_PLUS_PRIV(sz_of_priv) \
178 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
180 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
181 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
182 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
183 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
184 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
185 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
186 #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
189 static int tpacket_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
190 struct packet_type
*pt
, struct net_device
*orig_dev
);
192 static void *packet_previous_frame(struct packet_sock
*po
,
193 struct packet_ring_buffer
*rb
,
195 static void packet_increment_head(struct packet_ring_buffer
*buff
);
196 static int prb_curr_blk_in_use(struct tpacket_block_desc
*);
197 static void *prb_dispatch_next_block(struct tpacket_kbdq_core
*,
198 struct packet_sock
*);
199 static void prb_retire_current_block(struct tpacket_kbdq_core
*,
200 struct packet_sock
*, unsigned int status
);
201 static int prb_queue_frozen(struct tpacket_kbdq_core
*);
202 static void prb_open_block(struct tpacket_kbdq_core
*,
203 struct tpacket_block_desc
*);
204 static void prb_retire_rx_blk_timer_expired(struct timer_list
*);
205 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core
*);
206 static void prb_fill_rxhash(struct tpacket_kbdq_core
*, struct tpacket3_hdr
*);
207 static void prb_clear_rxhash(struct tpacket_kbdq_core
*,
208 struct tpacket3_hdr
*);
209 static void prb_fill_vlan_info(struct tpacket_kbdq_core
*,
210 struct tpacket3_hdr
*);
211 static void packet_flush_mclist(struct sock
*sk
);
212 static void packet_pick_tx_queue(struct net_device
*dev
, struct sk_buff
*skb
);
214 struct packet_skb_cb
{
216 struct sockaddr_pkt pkt
;
218 /* Trick: alias skb original length with
219 * ll.sll_family and ll.protocol in order
222 unsigned int origlen
;
223 struct sockaddr_ll ll
;
228 #define vio_le() virtio_legacy_is_little_endian()
230 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
232 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
233 #define GET_PBLOCK_DESC(x, bid) \
234 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
235 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
236 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
237 #define GET_NEXT_PRB_BLK_NUM(x) \
238 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
239 ((x)->kactive_blk_num+1) : 0)
241 static void __fanout_unlink(struct sock
*sk
, struct packet_sock
*po
);
242 static void __fanout_link(struct sock
*sk
, struct packet_sock
*po
);
244 static int packet_direct_xmit(struct sk_buff
*skb
)
246 struct net_device
*dev
= skb
->dev
;
247 struct sk_buff
*orig_skb
= skb
;
248 struct netdev_queue
*txq
;
249 int ret
= NETDEV_TX_BUSY
;
251 if (unlikely(!netif_running(dev
) ||
252 !netif_carrier_ok(dev
)))
255 skb
= validate_xmit_skb_list(skb
, dev
);
259 packet_pick_tx_queue(dev
, skb
);
260 txq
= skb_get_tx_queue(dev
, skb
);
264 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
265 if (!netif_xmit_frozen_or_drv_stopped(txq
))
266 ret
= netdev_start_xmit(skb
, dev
, txq
, false);
267 HARD_TX_UNLOCK(dev
, txq
);
271 if (!dev_xmit_complete(ret
))
276 atomic_long_inc(&dev
->tx_dropped
);
278 return NET_XMIT_DROP
;
281 static struct net_device
*packet_cached_dev_get(struct packet_sock
*po
)
283 struct net_device
*dev
;
286 dev
= rcu_dereference(po
->cached_dev
);
294 static void packet_cached_dev_assign(struct packet_sock
*po
,
295 struct net_device
*dev
)
297 rcu_assign_pointer(po
->cached_dev
, dev
);
300 static void packet_cached_dev_reset(struct packet_sock
*po
)
302 RCU_INIT_POINTER(po
->cached_dev
, NULL
);
305 static bool packet_use_direct_xmit(const struct packet_sock
*po
)
307 return po
->xmit
== packet_direct_xmit
;
310 static u16
__packet_pick_tx_queue(struct net_device
*dev
, struct sk_buff
*skb
)
312 return (u16
) raw_smp_processor_id() % dev
->real_num_tx_queues
;
315 static void packet_pick_tx_queue(struct net_device
*dev
, struct sk_buff
*skb
)
317 const struct net_device_ops
*ops
= dev
->netdev_ops
;
320 if (ops
->ndo_select_queue
) {
321 queue_index
= ops
->ndo_select_queue(dev
, skb
, NULL
,
322 __packet_pick_tx_queue
);
323 queue_index
= netdev_cap_txqueue(dev
, queue_index
);
325 queue_index
= __packet_pick_tx_queue(dev
, skb
);
328 skb_set_queue_mapping(skb
, queue_index
);
331 /* register_prot_hook must be invoked with the po->bind_lock held,
332 * or from a context in which asynchronous accesses to the packet
333 * socket is not possible (packet_create()).
335 static void register_prot_hook(struct sock
*sk
)
337 struct packet_sock
*po
= pkt_sk(sk
);
341 __fanout_link(sk
, po
);
343 dev_add_pack(&po
->prot_hook
);
350 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
351 * held. If the sync parameter is true, we will temporarily drop
352 * the po->bind_lock and do a synchronize_net to make sure no
353 * asynchronous packet processing paths still refer to the elements
354 * of po->prot_hook. If the sync parameter is false, it is the
355 * callers responsibility to take care of this.
357 static void __unregister_prot_hook(struct sock
*sk
, bool sync
)
359 struct packet_sock
*po
= pkt_sk(sk
);
364 __fanout_unlink(sk
, po
);
366 __dev_remove_pack(&po
->prot_hook
);
371 spin_unlock(&po
->bind_lock
);
373 spin_lock(&po
->bind_lock
);
377 static void unregister_prot_hook(struct sock
*sk
, bool sync
)
379 struct packet_sock
*po
= pkt_sk(sk
);
382 __unregister_prot_hook(sk
, sync
);
385 static inline struct page
* __pure
pgv_to_page(void *addr
)
387 if (is_vmalloc_addr(addr
))
388 return vmalloc_to_page(addr
);
389 return virt_to_page(addr
);
392 static void __packet_set_status(struct packet_sock
*po
, void *frame
, int status
)
394 union tpacket_uhdr h
;
397 switch (po
->tp_version
) {
399 h
.h1
->tp_status
= status
;
400 flush_dcache_page(pgv_to_page(&h
.h1
->tp_status
));
403 h
.h2
->tp_status
= status
;
404 flush_dcache_page(pgv_to_page(&h
.h2
->tp_status
));
407 h
.h3
->tp_status
= status
;
408 flush_dcache_page(pgv_to_page(&h
.h3
->tp_status
));
411 WARN(1, "TPACKET version not supported.\n");
418 static int __packet_get_status(struct packet_sock
*po
, void *frame
)
420 union tpacket_uhdr h
;
425 switch (po
->tp_version
) {
427 flush_dcache_page(pgv_to_page(&h
.h1
->tp_status
));
428 return h
.h1
->tp_status
;
430 flush_dcache_page(pgv_to_page(&h
.h2
->tp_status
));
431 return h
.h2
->tp_status
;
433 flush_dcache_page(pgv_to_page(&h
.h3
->tp_status
));
434 return h
.h3
->tp_status
;
436 WARN(1, "TPACKET version not supported.\n");
442 static __u32
tpacket_get_timestamp(struct sk_buff
*skb
, struct timespec
*ts
,
445 struct skb_shared_hwtstamps
*shhwtstamps
= skb_hwtstamps(skb
);
448 (flags
& SOF_TIMESTAMPING_RAW_HARDWARE
) &&
449 ktime_to_timespec_cond(shhwtstamps
->hwtstamp
, ts
))
450 return TP_STATUS_TS_RAW_HARDWARE
;
452 if (ktime_to_timespec_cond(skb
->tstamp
, ts
))
453 return TP_STATUS_TS_SOFTWARE
;
458 static __u32
__packet_set_timestamp(struct packet_sock
*po
, void *frame
,
461 union tpacket_uhdr h
;
465 if (!(ts_status
= tpacket_get_timestamp(skb
, &ts
, po
->tp_tstamp
)))
469 switch (po
->tp_version
) {
471 h
.h1
->tp_sec
= ts
.tv_sec
;
472 h
.h1
->tp_usec
= ts
.tv_nsec
/ NSEC_PER_USEC
;
475 h
.h2
->tp_sec
= ts
.tv_sec
;
476 h
.h2
->tp_nsec
= ts
.tv_nsec
;
479 h
.h3
->tp_sec
= ts
.tv_sec
;
480 h
.h3
->tp_nsec
= ts
.tv_nsec
;
483 WARN(1, "TPACKET version not supported.\n");
487 /* one flush is safe, as both fields always lie on the same cacheline */
488 flush_dcache_page(pgv_to_page(&h
.h1
->tp_sec
));
494 static void *packet_lookup_frame(struct packet_sock
*po
,
495 struct packet_ring_buffer
*rb
,
496 unsigned int position
,
499 unsigned int pg_vec_pos
, frame_offset
;
500 union tpacket_uhdr h
;
502 pg_vec_pos
= position
/ rb
->frames_per_block
;
503 frame_offset
= position
% rb
->frames_per_block
;
505 h
.raw
= rb
->pg_vec
[pg_vec_pos
].buffer
+
506 (frame_offset
* rb
->frame_size
);
508 if (status
!= __packet_get_status(po
, h
.raw
))
514 static void *packet_current_frame(struct packet_sock
*po
,
515 struct packet_ring_buffer
*rb
,
518 return packet_lookup_frame(po
, rb
, rb
->head
, status
);
521 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core
*pkc
)
523 del_timer_sync(&pkc
->retire_blk_timer
);
526 static void prb_shutdown_retire_blk_timer(struct packet_sock
*po
,
527 struct sk_buff_head
*rb_queue
)
529 struct tpacket_kbdq_core
*pkc
;
531 pkc
= GET_PBDQC_FROM_RB(&po
->rx_ring
);
533 spin_lock_bh(&rb_queue
->lock
);
534 pkc
->delete_blk_timer
= 1;
535 spin_unlock_bh(&rb_queue
->lock
);
537 prb_del_retire_blk_timer(pkc
);
540 static void prb_setup_retire_blk_timer(struct packet_sock
*po
)
542 struct tpacket_kbdq_core
*pkc
;
544 pkc
= GET_PBDQC_FROM_RB(&po
->rx_ring
);
545 timer_setup(&pkc
->retire_blk_timer
, prb_retire_rx_blk_timer_expired
,
547 pkc
->retire_blk_timer
.expires
= jiffies
;
550 static int prb_calc_retire_blk_tmo(struct packet_sock
*po
,
551 int blk_size_in_bytes
)
553 struct net_device
*dev
;
554 unsigned int mbits
= 0, msec
= 0, div
= 0, tmo
= 0;
555 struct ethtool_link_ksettings ecmd
;
559 dev
= __dev_get_by_index(sock_net(&po
->sk
), po
->ifindex
);
560 if (unlikely(!dev
)) {
562 return DEFAULT_PRB_RETIRE_TOV
;
564 err
= __ethtool_get_link_ksettings(dev
, &ecmd
);
568 * If the link speed is so slow you don't really
569 * need to worry about perf anyways
571 if (ecmd
.base
.speed
< SPEED_1000
||
572 ecmd
.base
.speed
== SPEED_UNKNOWN
) {
573 return DEFAULT_PRB_RETIRE_TOV
;
576 div
= ecmd
.base
.speed
/ 1000;
580 mbits
= (blk_size_in_bytes
* 8) / (1024 * 1024);
592 static void prb_init_ft_ops(struct tpacket_kbdq_core
*p1
,
593 union tpacket_req_u
*req_u
)
595 p1
->feature_req_word
= req_u
->req3
.tp_feature_req_word
;
598 static void init_prb_bdqc(struct packet_sock
*po
,
599 struct packet_ring_buffer
*rb
,
601 union tpacket_req_u
*req_u
)
603 struct tpacket_kbdq_core
*p1
= GET_PBDQC_FROM_RB(rb
);
604 struct tpacket_block_desc
*pbd
;
606 memset(p1
, 0x0, sizeof(*p1
));
608 p1
->knxt_seq_num
= 1;
610 pbd
= (struct tpacket_block_desc
*)pg_vec
[0].buffer
;
611 p1
->pkblk_start
= pg_vec
[0].buffer
;
612 p1
->kblk_size
= req_u
->req3
.tp_block_size
;
613 p1
->knum_blocks
= req_u
->req3
.tp_block_nr
;
614 p1
->hdrlen
= po
->tp_hdrlen
;
615 p1
->version
= po
->tp_version
;
616 p1
->last_kactive_blk_num
= 0;
617 po
->stats
.stats3
.tp_freeze_q_cnt
= 0;
618 if (req_u
->req3
.tp_retire_blk_tov
)
619 p1
->retire_blk_tov
= req_u
->req3
.tp_retire_blk_tov
;
621 p1
->retire_blk_tov
= prb_calc_retire_blk_tmo(po
,
622 req_u
->req3
.tp_block_size
);
623 p1
->tov_in_jiffies
= msecs_to_jiffies(p1
->retire_blk_tov
);
624 p1
->blk_sizeof_priv
= req_u
->req3
.tp_sizeof_priv
;
626 p1
->max_frame_len
= p1
->kblk_size
- BLK_PLUS_PRIV(p1
->blk_sizeof_priv
);
627 prb_init_ft_ops(p1
, req_u
);
628 prb_setup_retire_blk_timer(po
);
629 prb_open_block(p1
, pbd
);
632 /* Do NOT update the last_blk_num first.
633 * Assumes sk_buff_head lock is held.
635 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core
*pkc
)
637 mod_timer(&pkc
->retire_blk_timer
,
638 jiffies
+ pkc
->tov_in_jiffies
);
639 pkc
->last_kactive_blk_num
= pkc
->kactive_blk_num
;
644 * 1) We refresh the timer only when we open a block.
645 * By doing this we don't waste cycles refreshing the timer
646 * on packet-by-packet basis.
648 * With a 1MB block-size, on a 1Gbps line, it will take
649 * i) ~8 ms to fill a block + ii) memcpy etc.
650 * In this cut we are not accounting for the memcpy time.
652 * So, if the user sets the 'tmo' to 10ms then the timer
653 * will never fire while the block is still getting filled
654 * (which is what we want). However, the user could choose
655 * to close a block early and that's fine.
657 * But when the timer does fire, we check whether or not to refresh it.
658 * Since the tmo granularity is in msecs, it is not too expensive
659 * to refresh the timer, lets say every '8' msecs.
660 * Either the user can set the 'tmo' or we can derive it based on
661 * a) line-speed and b) block-size.
662 * prb_calc_retire_blk_tmo() calculates the tmo.
665 static void prb_retire_rx_blk_timer_expired(struct timer_list
*t
)
667 struct packet_sock
*po
=
668 from_timer(po
, t
, rx_ring
.prb_bdqc
.retire_blk_timer
);
669 struct tpacket_kbdq_core
*pkc
= GET_PBDQC_FROM_RB(&po
->rx_ring
);
671 struct tpacket_block_desc
*pbd
;
673 spin_lock(&po
->sk
.sk_receive_queue
.lock
);
675 frozen
= prb_queue_frozen(pkc
);
676 pbd
= GET_CURR_PBLOCK_DESC_FROM_CORE(pkc
);
678 if (unlikely(pkc
->delete_blk_timer
))
681 /* We only need to plug the race when the block is partially filled.
683 * lock(); increment BLOCK_NUM_PKTS; unlock()
684 * copy_bits() is in progress ...
685 * timer fires on other cpu:
686 * we can't retire the current block because copy_bits
690 if (BLOCK_NUM_PKTS(pbd
)) {
691 while (atomic_read(&pkc
->blk_fill_in_prog
)) {
692 /* Waiting for skb_copy_bits to finish... */
697 if (pkc
->last_kactive_blk_num
== pkc
->kactive_blk_num
) {
699 if (!BLOCK_NUM_PKTS(pbd
)) {
700 /* An empty block. Just refresh the timer. */
703 prb_retire_current_block(pkc
, po
, TP_STATUS_BLK_TMO
);
704 if (!prb_dispatch_next_block(pkc
, po
))
709 /* Case 1. Queue was frozen because user-space was
712 if (prb_curr_blk_in_use(pbd
)) {
714 * Ok, user-space is still behind.
715 * So just refresh the timer.
719 /* Case 2. queue was frozen,user-space caught up,
720 * now the link went idle && the timer fired.
721 * We don't have a block to close.So we open this
722 * block and restart the timer.
723 * opening a block thaws the queue,restarts timer
724 * Thawing/timer-refresh is a side effect.
726 prb_open_block(pkc
, pbd
);
733 _prb_refresh_rx_retire_blk_timer(pkc
);
736 spin_unlock(&po
->sk
.sk_receive_queue
.lock
);
739 static void prb_flush_block(struct tpacket_kbdq_core
*pkc1
,
740 struct tpacket_block_desc
*pbd1
, __u32 status
)
742 /* Flush everything minus the block header */
744 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
749 /* Skip the block header(we know header WILL fit in 4K) */
752 end
= (u8
*)PAGE_ALIGN((unsigned long)pkc1
->pkblk_end
);
753 for (; start
< end
; start
+= PAGE_SIZE
)
754 flush_dcache_page(pgv_to_page(start
));
759 /* Now update the block status. */
761 BLOCK_STATUS(pbd1
) = status
;
763 /* Flush the block header */
765 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
767 flush_dcache_page(pgv_to_page(start
));
777 * 2) Increment active_blk_num
779 * Note:We DONT refresh the timer on purpose.
780 * Because almost always the next block will be opened.
782 static void prb_close_block(struct tpacket_kbdq_core
*pkc1
,
783 struct tpacket_block_desc
*pbd1
,
784 struct packet_sock
*po
, unsigned int stat
)
786 __u32 status
= TP_STATUS_USER
| stat
;
788 struct tpacket3_hdr
*last_pkt
;
789 struct tpacket_hdr_v1
*h1
= &pbd1
->hdr
.bh1
;
790 struct sock
*sk
= &po
->sk
;
792 if (po
->stats
.stats3
.tp_drops
)
793 status
|= TP_STATUS_LOSING
;
795 last_pkt
= (struct tpacket3_hdr
*)pkc1
->prev
;
796 last_pkt
->tp_next_offset
= 0;
798 /* Get the ts of the last pkt */
799 if (BLOCK_NUM_PKTS(pbd1
)) {
800 h1
->ts_last_pkt
.ts_sec
= last_pkt
->tp_sec
;
801 h1
->ts_last_pkt
.ts_nsec
= last_pkt
->tp_nsec
;
803 /* Ok, we tmo'd - so get the current time.
805 * It shouldn't really happen as we don't close empty
806 * blocks. See prb_retire_rx_blk_timer_expired().
810 h1
->ts_last_pkt
.ts_sec
= ts
.tv_sec
;
811 h1
->ts_last_pkt
.ts_nsec
= ts
.tv_nsec
;
816 /* Flush the block */
817 prb_flush_block(pkc1
, pbd1
, status
);
819 sk
->sk_data_ready(sk
);
821 pkc1
->kactive_blk_num
= GET_NEXT_PRB_BLK_NUM(pkc1
);
824 static void prb_thaw_queue(struct tpacket_kbdq_core
*pkc
)
826 pkc
->reset_pending_on_curr_blk
= 0;
830 * Side effect of opening a block:
832 * 1) prb_queue is thawed.
833 * 2) retire_blk_timer is refreshed.
836 static void prb_open_block(struct tpacket_kbdq_core
*pkc1
,
837 struct tpacket_block_desc
*pbd1
)
840 struct tpacket_hdr_v1
*h1
= &pbd1
->hdr
.bh1
;
844 /* We could have just memset this but we will lose the
845 * flexibility of making the priv area sticky
848 BLOCK_SNUM(pbd1
) = pkc1
->knxt_seq_num
++;
849 BLOCK_NUM_PKTS(pbd1
) = 0;
850 BLOCK_LEN(pbd1
) = BLK_PLUS_PRIV(pkc1
->blk_sizeof_priv
);
854 h1
->ts_first_pkt
.ts_sec
= ts
.tv_sec
;
855 h1
->ts_first_pkt
.ts_nsec
= ts
.tv_nsec
;
857 pkc1
->pkblk_start
= (char *)pbd1
;
858 pkc1
->nxt_offset
= pkc1
->pkblk_start
+ BLK_PLUS_PRIV(pkc1
->blk_sizeof_priv
);
860 BLOCK_O2FP(pbd1
) = (__u32
)BLK_PLUS_PRIV(pkc1
->blk_sizeof_priv
);
861 BLOCK_O2PRIV(pbd1
) = BLK_HDR_LEN
;
863 pbd1
->version
= pkc1
->version
;
864 pkc1
->prev
= pkc1
->nxt_offset
;
865 pkc1
->pkblk_end
= pkc1
->pkblk_start
+ pkc1
->kblk_size
;
867 prb_thaw_queue(pkc1
);
868 _prb_refresh_rx_retire_blk_timer(pkc1
);
874 * Queue freeze logic:
875 * 1) Assume tp_block_nr = 8 blocks.
876 * 2) At time 't0', user opens Rx ring.
877 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
878 * 4) user-space is either sleeping or processing block '0'.
879 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
880 * it will close block-7,loop around and try to fill block '0'.
882 * __packet_lookup_frame_in_block
883 * prb_retire_current_block()
884 * prb_dispatch_next_block()
885 * |->(BLOCK_STATUS == USER) evaluates to true
886 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
887 * 6) Now there are two cases:
888 * 6.1) Link goes idle right after the queue is frozen.
889 * But remember, the last open_block() refreshed the timer.
890 * When this timer expires,it will refresh itself so that we can
891 * re-open block-0 in near future.
892 * 6.2) Link is busy and keeps on receiving packets. This is a simple
893 * case and __packet_lookup_frame_in_block will check if block-0
894 * is free and can now be re-used.
896 static void prb_freeze_queue(struct tpacket_kbdq_core
*pkc
,
897 struct packet_sock
*po
)
899 pkc
->reset_pending_on_curr_blk
= 1;
900 po
->stats
.stats3
.tp_freeze_q_cnt
++;
903 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
906 * If the next block is free then we will dispatch it
907 * and return a good offset.
908 * Else, we will freeze the queue.
909 * So, caller must check the return value.
911 static void *prb_dispatch_next_block(struct tpacket_kbdq_core
*pkc
,
912 struct packet_sock
*po
)
914 struct tpacket_block_desc
*pbd
;
918 /* 1. Get current block num */
919 pbd
= GET_CURR_PBLOCK_DESC_FROM_CORE(pkc
);
921 /* 2. If this block is currently in_use then freeze the queue */
922 if (TP_STATUS_USER
& BLOCK_STATUS(pbd
)) {
923 prb_freeze_queue(pkc
, po
);
929 * open this block and return the offset where the first packet
930 * needs to get stored.
932 prb_open_block(pkc
, pbd
);
933 return (void *)pkc
->nxt_offset
;
936 static void prb_retire_current_block(struct tpacket_kbdq_core
*pkc
,
937 struct packet_sock
*po
, unsigned int status
)
939 struct tpacket_block_desc
*pbd
= GET_CURR_PBLOCK_DESC_FROM_CORE(pkc
);
941 /* retire/close the current block */
942 if (likely(TP_STATUS_KERNEL
== BLOCK_STATUS(pbd
))) {
944 * Plug the case where copy_bits() is in progress on
945 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
946 * have space to copy the pkt in the current block and
947 * called prb_retire_current_block()
949 * We don't need to worry about the TMO case because
950 * the timer-handler already handled this case.
952 if (!(status
& TP_STATUS_BLK_TMO
)) {
953 while (atomic_read(&pkc
->blk_fill_in_prog
)) {
954 /* Waiting for skb_copy_bits to finish... */
958 prb_close_block(pkc
, pbd
, po
, status
);
963 static int prb_curr_blk_in_use(struct tpacket_block_desc
*pbd
)
965 return TP_STATUS_USER
& BLOCK_STATUS(pbd
);
968 static int prb_queue_frozen(struct tpacket_kbdq_core
*pkc
)
970 return pkc
->reset_pending_on_curr_blk
;
973 static void prb_clear_blk_fill_status(struct packet_ring_buffer
*rb
)
975 struct tpacket_kbdq_core
*pkc
= GET_PBDQC_FROM_RB(rb
);
976 atomic_dec(&pkc
->blk_fill_in_prog
);
979 static void prb_fill_rxhash(struct tpacket_kbdq_core
*pkc
,
980 struct tpacket3_hdr
*ppd
)
982 ppd
->hv1
.tp_rxhash
= skb_get_hash(pkc
->skb
);
985 static void prb_clear_rxhash(struct tpacket_kbdq_core
*pkc
,
986 struct tpacket3_hdr
*ppd
)
988 ppd
->hv1
.tp_rxhash
= 0;
991 static void prb_fill_vlan_info(struct tpacket_kbdq_core
*pkc
,
992 struct tpacket3_hdr
*ppd
)
994 if (skb_vlan_tag_present(pkc
->skb
)) {
995 ppd
->hv1
.tp_vlan_tci
= skb_vlan_tag_get(pkc
->skb
);
996 ppd
->hv1
.tp_vlan_tpid
= ntohs(pkc
->skb
->vlan_proto
);
997 ppd
->tp_status
= TP_STATUS_VLAN_VALID
| TP_STATUS_VLAN_TPID_VALID
;
999 ppd
->hv1
.tp_vlan_tci
= 0;
1000 ppd
->hv1
.tp_vlan_tpid
= 0;
1001 ppd
->tp_status
= TP_STATUS_AVAILABLE
;
1005 static void prb_run_all_ft_ops(struct tpacket_kbdq_core
*pkc
,
1006 struct tpacket3_hdr
*ppd
)
1008 ppd
->hv1
.tp_padding
= 0;
1009 prb_fill_vlan_info(pkc
, ppd
);
1011 if (pkc
->feature_req_word
& TP_FT_REQ_FILL_RXHASH
)
1012 prb_fill_rxhash(pkc
, ppd
);
1014 prb_clear_rxhash(pkc
, ppd
);
1017 static void prb_fill_curr_block(char *curr
,
1018 struct tpacket_kbdq_core
*pkc
,
1019 struct tpacket_block_desc
*pbd
,
1022 struct tpacket3_hdr
*ppd
;
1024 ppd
= (struct tpacket3_hdr
*)curr
;
1025 ppd
->tp_next_offset
= TOTAL_PKT_LEN_INCL_ALIGN(len
);
1027 pkc
->nxt_offset
+= TOTAL_PKT_LEN_INCL_ALIGN(len
);
1028 BLOCK_LEN(pbd
) += TOTAL_PKT_LEN_INCL_ALIGN(len
);
1029 BLOCK_NUM_PKTS(pbd
) += 1;
1030 atomic_inc(&pkc
->blk_fill_in_prog
);
1031 prb_run_all_ft_ops(pkc
, ppd
);
1034 /* Assumes caller has the sk->rx_queue.lock */
1035 static void *__packet_lookup_frame_in_block(struct packet_sock
*po
,
1036 struct sk_buff
*skb
,
1041 struct tpacket_kbdq_core
*pkc
;
1042 struct tpacket_block_desc
*pbd
;
1045 pkc
= GET_PBDQC_FROM_RB(&po
->rx_ring
);
1046 pbd
= GET_CURR_PBLOCK_DESC_FROM_CORE(pkc
);
1048 /* Queue is frozen when user space is lagging behind */
1049 if (prb_queue_frozen(pkc
)) {
1051 * Check if that last block which caused the queue to freeze,
1052 * is still in_use by user-space.
1054 if (prb_curr_blk_in_use(pbd
)) {
1055 /* Can't record this packet */
1059 * Ok, the block was released by user-space.
1060 * Now let's open that block.
1061 * opening a block also thaws the queue.
1062 * Thawing is a side effect.
1064 prb_open_block(pkc
, pbd
);
1069 curr
= pkc
->nxt_offset
;
1071 end
= (char *)pbd
+ pkc
->kblk_size
;
1073 /* first try the current block */
1074 if (curr
+TOTAL_PKT_LEN_INCL_ALIGN(len
) < end
) {
1075 prb_fill_curr_block(curr
, pkc
, pbd
, len
);
1076 return (void *)curr
;
1079 /* Ok, close the current block */
1080 prb_retire_current_block(pkc
, po
, 0);
1082 /* Now, try to dispatch the next block */
1083 curr
= (char *)prb_dispatch_next_block(pkc
, po
);
1085 pbd
= GET_CURR_PBLOCK_DESC_FROM_CORE(pkc
);
1086 prb_fill_curr_block(curr
, pkc
, pbd
, len
);
1087 return (void *)curr
;
1091 * No free blocks are available.user_space hasn't caught up yet.
1092 * Queue was just frozen and now this packet will get dropped.
1097 static void *packet_current_rx_frame(struct packet_sock
*po
,
1098 struct sk_buff
*skb
,
1099 int status
, unsigned int len
)
1102 switch (po
->tp_version
) {
1105 curr
= packet_lookup_frame(po
, &po
->rx_ring
,
1106 po
->rx_ring
.head
, status
);
1109 return __packet_lookup_frame_in_block(po
, skb
, status
, len
);
1111 WARN(1, "TPACKET version not supported\n");
1117 static void *prb_lookup_block(struct packet_sock
*po
,
1118 struct packet_ring_buffer
*rb
,
1122 struct tpacket_kbdq_core
*pkc
= GET_PBDQC_FROM_RB(rb
);
1123 struct tpacket_block_desc
*pbd
= GET_PBLOCK_DESC(pkc
, idx
);
1125 if (status
!= BLOCK_STATUS(pbd
))
1130 static int prb_previous_blk_num(struct packet_ring_buffer
*rb
)
1133 if (rb
->prb_bdqc
.kactive_blk_num
)
1134 prev
= rb
->prb_bdqc
.kactive_blk_num
-1;
1136 prev
= rb
->prb_bdqc
.knum_blocks
-1;
1140 /* Assumes caller has held the rx_queue.lock */
1141 static void *__prb_previous_block(struct packet_sock
*po
,
1142 struct packet_ring_buffer
*rb
,
1145 unsigned int previous
= prb_previous_blk_num(rb
);
1146 return prb_lookup_block(po
, rb
, previous
, status
);
1149 static void *packet_previous_rx_frame(struct packet_sock
*po
,
1150 struct packet_ring_buffer
*rb
,
1153 if (po
->tp_version
<= TPACKET_V2
)
1154 return packet_previous_frame(po
, rb
, status
);
1156 return __prb_previous_block(po
, rb
, status
);
1159 static void packet_increment_rx_head(struct packet_sock
*po
,
1160 struct packet_ring_buffer
*rb
)
1162 switch (po
->tp_version
) {
1165 return packet_increment_head(rb
);
1168 WARN(1, "TPACKET version not supported.\n");
1174 static void *packet_previous_frame(struct packet_sock
*po
,
1175 struct packet_ring_buffer
*rb
,
1178 unsigned int previous
= rb
->head
? rb
->head
- 1 : rb
->frame_max
;
1179 return packet_lookup_frame(po
, rb
, previous
, status
);
1182 static void packet_increment_head(struct packet_ring_buffer
*buff
)
1184 buff
->head
= buff
->head
!= buff
->frame_max
? buff
->head
+1 : 0;
1187 static void packet_inc_pending(struct packet_ring_buffer
*rb
)
1189 this_cpu_inc(*rb
->pending_refcnt
);
1192 static void packet_dec_pending(struct packet_ring_buffer
*rb
)
1194 this_cpu_dec(*rb
->pending_refcnt
);
1197 static unsigned int packet_read_pending(const struct packet_ring_buffer
*rb
)
1199 unsigned int refcnt
= 0;
1202 /* We don't use pending refcount in rx_ring. */
1203 if (rb
->pending_refcnt
== NULL
)
1206 for_each_possible_cpu(cpu
)
1207 refcnt
+= *per_cpu_ptr(rb
->pending_refcnt
, cpu
);
1212 static int packet_alloc_pending(struct packet_sock
*po
)
1214 po
->rx_ring
.pending_refcnt
= NULL
;
1216 po
->tx_ring
.pending_refcnt
= alloc_percpu(unsigned int);
1217 if (unlikely(po
->tx_ring
.pending_refcnt
== NULL
))
1223 static void packet_free_pending(struct packet_sock
*po
)
1225 free_percpu(po
->tx_ring
.pending_refcnt
);
1228 #define ROOM_POW_OFF 2
1229 #define ROOM_NONE 0x0
1230 #define ROOM_LOW 0x1
1231 #define ROOM_NORMAL 0x2
1233 static bool __tpacket_has_room(struct packet_sock
*po
, int pow_off
)
1237 len
= po
->rx_ring
.frame_max
+ 1;
1238 idx
= po
->rx_ring
.head
;
1240 idx
+= len
>> pow_off
;
1243 return packet_lookup_frame(po
, &po
->rx_ring
, idx
, TP_STATUS_KERNEL
);
1246 static bool __tpacket_v3_has_room(struct packet_sock
*po
, int pow_off
)
1250 len
= po
->rx_ring
.prb_bdqc
.knum_blocks
;
1251 idx
= po
->rx_ring
.prb_bdqc
.kactive_blk_num
;
1253 idx
+= len
>> pow_off
;
1256 return prb_lookup_block(po
, &po
->rx_ring
, idx
, TP_STATUS_KERNEL
);
1259 static int __packet_rcv_has_room(struct packet_sock
*po
, struct sk_buff
*skb
)
1261 struct sock
*sk
= &po
->sk
;
1262 int ret
= ROOM_NONE
;
1264 if (po
->prot_hook
.func
!= tpacket_rcv
) {
1265 int avail
= sk
->sk_rcvbuf
- atomic_read(&sk
->sk_rmem_alloc
)
1266 - (skb
? skb
->truesize
: 0);
1267 if (avail
> (sk
->sk_rcvbuf
>> ROOM_POW_OFF
))
1275 if (po
->tp_version
== TPACKET_V3
) {
1276 if (__tpacket_v3_has_room(po
, ROOM_POW_OFF
))
1278 else if (__tpacket_v3_has_room(po
, 0))
1281 if (__tpacket_has_room(po
, ROOM_POW_OFF
))
1283 else if (__tpacket_has_room(po
, 0))
1290 static int packet_rcv_has_room(struct packet_sock
*po
, struct sk_buff
*skb
)
1295 spin_lock_bh(&po
->sk
.sk_receive_queue
.lock
);
1296 ret
= __packet_rcv_has_room(po
, skb
);
1297 has_room
= ret
== ROOM_NORMAL
;
1298 if (po
->pressure
== has_room
)
1299 po
->pressure
= !has_room
;
1300 spin_unlock_bh(&po
->sk
.sk_receive_queue
.lock
);
1305 static void packet_sock_destruct(struct sock
*sk
)
1307 skb_queue_purge(&sk
->sk_error_queue
);
1309 WARN_ON(atomic_read(&sk
->sk_rmem_alloc
));
1310 WARN_ON(refcount_read(&sk
->sk_wmem_alloc
));
1312 if (!sock_flag(sk
, SOCK_DEAD
)) {
1313 pr_err("Attempt to release alive packet socket: %p\n", sk
);
1317 sk_refcnt_debug_dec(sk
);
1320 static bool fanout_flow_is_huge(struct packet_sock
*po
, struct sk_buff
*skb
)
1325 rxhash
= skb_get_hash(skb
);
1326 for (i
= 0; i
< ROLLOVER_HLEN
; i
++)
1327 if (po
->rollover
->history
[i
] == rxhash
)
1330 po
->rollover
->history
[prandom_u32() % ROLLOVER_HLEN
] = rxhash
;
1331 return count
> (ROLLOVER_HLEN
>> 1);
1334 static unsigned int fanout_demux_hash(struct packet_fanout
*f
,
1335 struct sk_buff
*skb
,
1338 return reciprocal_scale(__skb_get_hash_symmetric(skb
), num
);
1341 static unsigned int fanout_demux_lb(struct packet_fanout
*f
,
1342 struct sk_buff
*skb
,
1345 unsigned int val
= atomic_inc_return(&f
->rr_cur
);
1350 static unsigned int fanout_demux_cpu(struct packet_fanout
*f
,
1351 struct sk_buff
*skb
,
1354 return smp_processor_id() % num
;
1357 static unsigned int fanout_demux_rnd(struct packet_fanout
*f
,
1358 struct sk_buff
*skb
,
1361 return prandom_u32_max(num
);
1364 static unsigned int fanout_demux_rollover(struct packet_fanout
*f
,
1365 struct sk_buff
*skb
,
1366 unsigned int idx
, bool try_self
,
1369 struct packet_sock
*po
, *po_next
, *po_skip
= NULL
;
1370 unsigned int i
, j
, room
= ROOM_NONE
;
1372 po
= pkt_sk(f
->arr
[idx
]);
1375 room
= packet_rcv_has_room(po
, skb
);
1376 if (room
== ROOM_NORMAL
||
1377 (room
== ROOM_LOW
&& !fanout_flow_is_huge(po
, skb
)))
1382 i
= j
= min_t(int, po
->rollover
->sock
, num
- 1);
1384 po_next
= pkt_sk(f
->arr
[i
]);
1385 if (po_next
!= po_skip
&& !po_next
->pressure
&&
1386 packet_rcv_has_room(po_next
, skb
) == ROOM_NORMAL
) {
1388 po
->rollover
->sock
= i
;
1389 atomic_long_inc(&po
->rollover
->num
);
1390 if (room
== ROOM_LOW
)
1391 atomic_long_inc(&po
->rollover
->num_huge
);
1399 atomic_long_inc(&po
->rollover
->num_failed
);
1403 static unsigned int fanout_demux_qm(struct packet_fanout
*f
,
1404 struct sk_buff
*skb
,
1407 return skb_get_queue_mapping(skb
) % num
;
1410 static unsigned int fanout_demux_bpf(struct packet_fanout
*f
,
1411 struct sk_buff
*skb
,
1414 struct bpf_prog
*prog
;
1415 unsigned int ret
= 0;
1418 prog
= rcu_dereference(f
->bpf_prog
);
1420 ret
= bpf_prog_run_clear_cb(prog
, skb
) % num
;
1426 static bool fanout_has_flag(struct packet_fanout
*f
, u16 flag
)
1428 return f
->flags
& (flag
>> 8);
1431 static int packet_rcv_fanout(struct sk_buff
*skb
, struct net_device
*dev
,
1432 struct packet_type
*pt
, struct net_device
*orig_dev
)
1434 struct packet_fanout
*f
= pt
->af_packet_priv
;
1435 unsigned int num
= READ_ONCE(f
->num_members
);
1436 struct net
*net
= read_pnet(&f
->net
);
1437 struct packet_sock
*po
;
1440 if (!net_eq(dev_net(dev
), net
) || !num
) {
1445 if (fanout_has_flag(f
, PACKET_FANOUT_FLAG_DEFRAG
)) {
1446 skb
= ip_check_defrag(net
, skb
, IP_DEFRAG_AF_PACKET
);
1451 case PACKET_FANOUT_HASH
:
1453 idx
= fanout_demux_hash(f
, skb
, num
);
1455 case PACKET_FANOUT_LB
:
1456 idx
= fanout_demux_lb(f
, skb
, num
);
1458 case PACKET_FANOUT_CPU
:
1459 idx
= fanout_demux_cpu(f
, skb
, num
);
1461 case PACKET_FANOUT_RND
:
1462 idx
= fanout_demux_rnd(f
, skb
, num
);
1464 case PACKET_FANOUT_QM
:
1465 idx
= fanout_demux_qm(f
, skb
, num
);
1467 case PACKET_FANOUT_ROLLOVER
:
1468 idx
= fanout_demux_rollover(f
, skb
, 0, false, num
);
1470 case PACKET_FANOUT_CBPF
:
1471 case PACKET_FANOUT_EBPF
:
1472 idx
= fanout_demux_bpf(f
, skb
, num
);
1476 if (fanout_has_flag(f
, PACKET_FANOUT_FLAG_ROLLOVER
))
1477 idx
= fanout_demux_rollover(f
, skb
, idx
, true, num
);
1479 po
= pkt_sk(f
->arr
[idx
]);
1480 return po
->prot_hook
.func(skb
, dev
, &po
->prot_hook
, orig_dev
);
1483 DEFINE_MUTEX(fanout_mutex
);
1484 EXPORT_SYMBOL_GPL(fanout_mutex
);
1485 static LIST_HEAD(fanout_list
);
1486 static u16 fanout_next_id
;
1488 static void __fanout_link(struct sock
*sk
, struct packet_sock
*po
)
1490 struct packet_fanout
*f
= po
->fanout
;
1492 spin_lock(&f
->lock
);
1493 f
->arr
[f
->num_members
] = sk
;
1496 if (f
->num_members
== 1)
1497 dev_add_pack(&f
->prot_hook
);
1498 spin_unlock(&f
->lock
);
1501 static void __fanout_unlink(struct sock
*sk
, struct packet_sock
*po
)
1503 struct packet_fanout
*f
= po
->fanout
;
1506 spin_lock(&f
->lock
);
1507 for (i
= 0; i
< f
->num_members
; i
++) {
1508 if (f
->arr
[i
] == sk
)
1511 BUG_ON(i
>= f
->num_members
);
1512 f
->arr
[i
] = f
->arr
[f
->num_members
- 1];
1514 if (f
->num_members
== 0)
1515 __dev_remove_pack(&f
->prot_hook
);
1516 spin_unlock(&f
->lock
);
1519 static bool match_fanout_group(struct packet_type
*ptype
, struct sock
*sk
)
1521 if (sk
->sk_family
!= PF_PACKET
)
1524 return ptype
->af_packet_priv
== pkt_sk(sk
)->fanout
;
1527 static void fanout_init_data(struct packet_fanout
*f
)
1530 case PACKET_FANOUT_LB
:
1531 atomic_set(&f
->rr_cur
, 0);
1533 case PACKET_FANOUT_CBPF
:
1534 case PACKET_FANOUT_EBPF
:
1535 RCU_INIT_POINTER(f
->bpf_prog
, NULL
);
1540 static void __fanout_set_data_bpf(struct packet_fanout
*f
, struct bpf_prog
*new)
1542 struct bpf_prog
*old
;
1544 spin_lock(&f
->lock
);
1545 old
= rcu_dereference_protected(f
->bpf_prog
, lockdep_is_held(&f
->lock
));
1546 rcu_assign_pointer(f
->bpf_prog
, new);
1547 spin_unlock(&f
->lock
);
1551 bpf_prog_destroy(old
);
1555 static int fanout_set_data_cbpf(struct packet_sock
*po
, char __user
*data
,
1558 struct bpf_prog
*new;
1559 struct sock_fprog fprog
;
1562 if (sock_flag(&po
->sk
, SOCK_FILTER_LOCKED
))
1564 if (len
!= sizeof(fprog
))
1566 if (copy_from_user(&fprog
, data
, len
))
1569 ret
= bpf_prog_create_from_user(&new, &fprog
, NULL
, false);
1573 __fanout_set_data_bpf(po
->fanout
, new);
1577 static int fanout_set_data_ebpf(struct packet_sock
*po
, char __user
*data
,
1580 struct bpf_prog
*new;
1583 if (sock_flag(&po
->sk
, SOCK_FILTER_LOCKED
))
1585 if (len
!= sizeof(fd
))
1587 if (copy_from_user(&fd
, data
, len
))
1590 new = bpf_prog_get_type(fd
, BPF_PROG_TYPE_SOCKET_FILTER
);
1592 return PTR_ERR(new);
1594 __fanout_set_data_bpf(po
->fanout
, new);
1598 static int fanout_set_data(struct packet_sock
*po
, char __user
*data
,
1601 switch (po
->fanout
->type
) {
1602 case PACKET_FANOUT_CBPF
:
1603 return fanout_set_data_cbpf(po
, data
, len
);
1604 case PACKET_FANOUT_EBPF
:
1605 return fanout_set_data_ebpf(po
, data
, len
);
1611 static void fanout_release_data(struct packet_fanout
*f
)
1614 case PACKET_FANOUT_CBPF
:
1615 case PACKET_FANOUT_EBPF
:
1616 __fanout_set_data_bpf(f
, NULL
);
1620 static bool __fanout_id_is_free(struct sock
*sk
, u16 candidate_id
)
1622 struct packet_fanout
*f
;
1624 list_for_each_entry(f
, &fanout_list
, list
) {
1625 if (f
->id
== candidate_id
&&
1626 read_pnet(&f
->net
) == sock_net(sk
)) {
1633 static bool fanout_find_new_id(struct sock
*sk
, u16
*new_id
)
1635 u16 id
= fanout_next_id
;
1638 if (__fanout_id_is_free(sk
, id
)) {
1640 fanout_next_id
= id
+ 1;
1645 } while (id
!= fanout_next_id
);
1650 static int fanout_add(struct sock
*sk
, u16 id
, u16 type_flags
)
1652 struct packet_rollover
*rollover
= NULL
;
1653 struct packet_sock
*po
= pkt_sk(sk
);
1654 struct packet_fanout
*f
, *match
;
1655 u8 type
= type_flags
& 0xff;
1656 u8 flags
= type_flags
>> 8;
1660 case PACKET_FANOUT_ROLLOVER
:
1661 if (type_flags
& PACKET_FANOUT_FLAG_ROLLOVER
)
1663 case PACKET_FANOUT_HASH
:
1664 case PACKET_FANOUT_LB
:
1665 case PACKET_FANOUT_CPU
:
1666 case PACKET_FANOUT_RND
:
1667 case PACKET_FANOUT_QM
:
1668 case PACKET_FANOUT_CBPF
:
1669 case PACKET_FANOUT_EBPF
:
1675 mutex_lock(&fanout_mutex
);
1681 if (type
== PACKET_FANOUT_ROLLOVER
||
1682 (type_flags
& PACKET_FANOUT_FLAG_ROLLOVER
)) {
1684 rollover
= kzalloc(sizeof(*rollover
), GFP_KERNEL
);
1687 atomic_long_set(&rollover
->num
, 0);
1688 atomic_long_set(&rollover
->num_huge
, 0);
1689 atomic_long_set(&rollover
->num_failed
, 0);
1692 if (type_flags
& PACKET_FANOUT_FLAG_UNIQUEID
) {
1697 if (!fanout_find_new_id(sk
, &id
)) {
1701 /* ephemeral flag for the first socket in the group: drop it */
1702 flags
&= ~(PACKET_FANOUT_FLAG_UNIQUEID
>> 8);
1706 list_for_each_entry(f
, &fanout_list
, list
) {
1708 read_pnet(&f
->net
) == sock_net(sk
)) {
1714 if (match
&& match
->flags
!= flags
)
1718 match
= kzalloc(sizeof(*match
), GFP_KERNEL
);
1721 write_pnet(&match
->net
, sock_net(sk
));
1724 match
->flags
= flags
;
1725 INIT_LIST_HEAD(&match
->list
);
1726 spin_lock_init(&match
->lock
);
1727 refcount_set(&match
->sk_ref
, 0);
1728 fanout_init_data(match
);
1729 match
->prot_hook
.type
= po
->prot_hook
.type
;
1730 match
->prot_hook
.dev
= po
->prot_hook
.dev
;
1731 match
->prot_hook
.func
= packet_rcv_fanout
;
1732 match
->prot_hook
.af_packet_priv
= match
;
1733 match
->prot_hook
.id_match
= match_fanout_group
;
1734 list_add(&match
->list
, &fanout_list
);
1738 spin_lock(&po
->bind_lock
);
1740 match
->type
== type
&&
1741 match
->prot_hook
.type
== po
->prot_hook
.type
&&
1742 match
->prot_hook
.dev
== po
->prot_hook
.dev
) {
1744 if (refcount_read(&match
->sk_ref
) < PACKET_FANOUT_MAX
) {
1745 __dev_remove_pack(&po
->prot_hook
);
1747 po
->rollover
= rollover
;
1749 refcount_set(&match
->sk_ref
, refcount_read(&match
->sk_ref
) + 1);
1750 __fanout_link(sk
, po
);
1754 spin_unlock(&po
->bind_lock
);
1756 if (err
&& !refcount_read(&match
->sk_ref
)) {
1757 list_del(&match
->list
);
1763 mutex_unlock(&fanout_mutex
);
1767 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1768 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1769 * It is the responsibility of the caller to call fanout_release_data() and
1770 * free the returned packet_fanout (after synchronize_net())
1772 static struct packet_fanout
*fanout_release(struct sock
*sk
)
1774 struct packet_sock
*po
= pkt_sk(sk
);
1775 struct packet_fanout
*f
;
1777 mutex_lock(&fanout_mutex
);
1782 if (refcount_dec_and_test(&f
->sk_ref
))
1787 mutex_unlock(&fanout_mutex
);
1792 static bool packet_extra_vlan_len_allowed(const struct net_device
*dev
,
1793 struct sk_buff
*skb
)
1795 /* Earlier code assumed this would be a VLAN pkt, double-check
1796 * this now that we have the actual packet in hand. We can only
1797 * do this check on Ethernet devices.
1799 if (unlikely(dev
->type
!= ARPHRD_ETHER
))
1802 skb_reset_mac_header(skb
);
1803 return likely(eth_hdr(skb
)->h_proto
== htons(ETH_P_8021Q
));
1806 static const struct proto_ops packet_ops
;
1808 static const struct proto_ops packet_ops_spkt
;
1810 static int packet_rcv_spkt(struct sk_buff
*skb
, struct net_device
*dev
,
1811 struct packet_type
*pt
, struct net_device
*orig_dev
)
1814 struct sockaddr_pkt
*spkt
;
1817 * When we registered the protocol we saved the socket in the data
1818 * field for just this event.
1821 sk
= pt
->af_packet_priv
;
1824 * Yank back the headers [hope the device set this
1825 * right or kerboom...]
1827 * Incoming packets have ll header pulled,
1830 * For outgoing ones skb->data == skb_mac_header(skb)
1831 * so that this procedure is noop.
1834 if (skb
->pkt_type
== PACKET_LOOPBACK
)
1837 if (!net_eq(dev_net(dev
), sock_net(sk
)))
1840 skb
= skb_share_check(skb
, GFP_ATOMIC
);
1844 /* drop any routing info */
1847 /* drop conntrack reference */
1850 spkt
= &PACKET_SKB_CB(skb
)->sa
.pkt
;
1852 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
1855 * The SOCK_PACKET socket receives _all_ frames.
1858 spkt
->spkt_family
= dev
->type
;
1859 strlcpy(spkt
->spkt_device
, dev
->name
, sizeof(spkt
->spkt_device
));
1860 spkt
->spkt_protocol
= skb
->protocol
;
1863 * Charge the memory to the socket. This is done specifically
1864 * to prevent sockets using all the memory up.
1867 if (sock_queue_rcv_skb(sk
, skb
) == 0)
1878 * Output a raw packet to a device layer. This bypasses all the other
1879 * protocol layers and you must therefore supply it with a complete frame
1882 static int packet_sendmsg_spkt(struct socket
*sock
, struct msghdr
*msg
,
1885 struct sock
*sk
= sock
->sk
;
1886 DECLARE_SOCKADDR(struct sockaddr_pkt
*, saddr
, msg
->msg_name
);
1887 struct sk_buff
*skb
= NULL
;
1888 struct net_device
*dev
;
1889 struct sockcm_cookie sockc
;
1895 * Get and verify the address.
1899 if (msg
->msg_namelen
< sizeof(struct sockaddr
))
1901 if (msg
->msg_namelen
== sizeof(struct sockaddr_pkt
))
1902 proto
= saddr
->spkt_protocol
;
1904 return -ENOTCONN
; /* SOCK_PACKET must be sent giving an address */
1907 * Find the device first to size check it
1910 saddr
->spkt_device
[sizeof(saddr
->spkt_device
) - 1] = 0;
1913 dev
= dev_get_by_name_rcu(sock_net(sk
), saddr
->spkt_device
);
1919 if (!(dev
->flags
& IFF_UP
))
1923 * You may not queue a frame bigger than the mtu. This is the lowest level
1924 * raw protocol and you must do your own fragmentation at this level.
1927 if (unlikely(sock_flag(sk
, SOCK_NOFCS
))) {
1928 if (!netif_supports_nofcs(dev
)) {
1929 err
= -EPROTONOSUPPORT
;
1932 extra_len
= 4; /* We're doing our own CRC */
1936 if (len
> dev
->mtu
+ dev
->hard_header_len
+ VLAN_HLEN
+ extra_len
)
1940 size_t reserved
= LL_RESERVED_SPACE(dev
);
1941 int tlen
= dev
->needed_tailroom
;
1942 unsigned int hhlen
= dev
->header_ops
? dev
->hard_header_len
: 0;
1945 skb
= sock_wmalloc(sk
, len
+ reserved
+ tlen
, 0, GFP_KERNEL
);
1948 /* FIXME: Save some space for broken drivers that write a hard
1949 * header at transmission time by themselves. PPP is the notable
1950 * one here. This should really be fixed at the driver level.
1952 skb_reserve(skb
, reserved
);
1953 skb_reset_network_header(skb
);
1955 /* Try to align data part correctly */
1960 skb_reset_network_header(skb
);
1962 err
= memcpy_from_msg(skb_put(skb
, len
), msg
, len
);
1968 if (!dev_validate_header(dev
, skb
->data
, len
)) {
1972 if (len
> (dev
->mtu
+ dev
->hard_header_len
+ extra_len
) &&
1973 !packet_extra_vlan_len_allowed(dev
, skb
)) {
1978 sockc
.tsflags
= sk
->sk_tsflags
;
1979 if (msg
->msg_controllen
) {
1980 err
= sock_cmsg_send(sk
, msg
, &sockc
);
1985 skb
->protocol
= proto
;
1987 skb
->priority
= sk
->sk_priority
;
1988 skb
->mark
= sk
->sk_mark
;
1990 sock_tx_timestamp(sk
, sockc
.tsflags
, &skb_shinfo(skb
)->tx_flags
);
1992 if (unlikely(extra_len
== 4))
1995 skb_probe_transport_header(skb
, 0);
1997 dev_queue_xmit(skb
);
2008 static unsigned int run_filter(struct sk_buff
*skb
,
2009 const struct sock
*sk
,
2012 struct sk_filter
*filter
;
2015 filter
= rcu_dereference(sk
->sk_filter
);
2017 res
= bpf_prog_run_clear_cb(filter
->prog
, skb
);
2023 static int packet_rcv_vnet(struct msghdr
*msg
, const struct sk_buff
*skb
,
2026 struct virtio_net_hdr vnet_hdr
;
2028 if (*len
< sizeof(vnet_hdr
))
2030 *len
-= sizeof(vnet_hdr
);
2032 if (virtio_net_hdr_from_skb(skb
, &vnet_hdr
, vio_le(), true))
2035 return memcpy_to_msg(msg
, (void *)&vnet_hdr
, sizeof(vnet_hdr
));
2039 * This function makes lazy skb cloning in hope that most of packets
2040 * are discarded by BPF.
2042 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2043 * and skb->cb are mangled. It works because (and until) packets
2044 * falling here are owned by current CPU. Output packets are cloned
2045 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2046 * sequencially, so that if we return skb to original state on exit,
2047 * we will not harm anyone.
2050 static int packet_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
2051 struct packet_type
*pt
, struct net_device
*orig_dev
)
2054 struct sockaddr_ll
*sll
;
2055 struct packet_sock
*po
;
2056 u8
*skb_head
= skb
->data
;
2057 int skb_len
= skb
->len
;
2058 unsigned int snaplen
, res
;
2059 bool is_drop_n_account
= false;
2061 if (skb
->pkt_type
== PACKET_LOOPBACK
)
2064 sk
= pt
->af_packet_priv
;
2067 if (!net_eq(dev_net(dev
), sock_net(sk
)))
2072 if (dev
->header_ops
) {
2073 /* The device has an explicit notion of ll header,
2074 * exported to higher levels.
2076 * Otherwise, the device hides details of its frame
2077 * structure, so that corresponding packet head is
2078 * never delivered to user.
2080 if (sk
->sk_type
!= SOCK_DGRAM
)
2081 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
2082 else if (skb
->pkt_type
== PACKET_OUTGOING
) {
2083 /* Special case: outgoing packets have ll header at head */
2084 skb_pull(skb
, skb_network_offset(skb
));
2090 res
= run_filter(skb
, sk
, snaplen
);
2092 goto drop_n_restore
;
2096 if (atomic_read(&sk
->sk_rmem_alloc
) >= sk
->sk_rcvbuf
)
2099 if (skb_shared(skb
)) {
2100 struct sk_buff
*nskb
= skb_clone(skb
, GFP_ATOMIC
);
2104 if (skb_head
!= skb
->data
) {
2105 skb
->data
= skb_head
;
2112 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb
)) + MAX_ADDR_LEN
- 8);
2114 sll
= &PACKET_SKB_CB(skb
)->sa
.ll
;
2115 sll
->sll_hatype
= dev
->type
;
2116 sll
->sll_pkttype
= skb
->pkt_type
;
2117 if (unlikely(po
->origdev
))
2118 sll
->sll_ifindex
= orig_dev
->ifindex
;
2120 sll
->sll_ifindex
= dev
->ifindex
;
2122 sll
->sll_halen
= dev_parse_header(skb
, sll
->sll_addr
);
2124 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2125 * Use their space for storing the original skb length.
2127 PACKET_SKB_CB(skb
)->sa
.origlen
= skb
->len
;
2129 if (pskb_trim(skb
, snaplen
))
2132 skb_set_owner_r(skb
, sk
);
2136 /* drop conntrack reference */
2139 spin_lock(&sk
->sk_receive_queue
.lock
);
2140 po
->stats
.stats1
.tp_packets
++;
2141 sock_skb_set_dropcount(sk
, skb
);
2142 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
2143 spin_unlock(&sk
->sk_receive_queue
.lock
);
2144 sk
->sk_data_ready(sk
);
2148 is_drop_n_account
= true;
2149 spin_lock(&sk
->sk_receive_queue
.lock
);
2150 po
->stats
.stats1
.tp_drops
++;
2151 atomic_inc(&sk
->sk_drops
);
2152 spin_unlock(&sk
->sk_receive_queue
.lock
);
2155 if (skb_head
!= skb
->data
&& skb_shared(skb
)) {
2156 skb
->data
= skb_head
;
2160 if (!is_drop_n_account
)
2167 static int tpacket_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
2168 struct packet_type
*pt
, struct net_device
*orig_dev
)
2171 struct packet_sock
*po
;
2172 struct sockaddr_ll
*sll
;
2173 union tpacket_uhdr h
;
2174 u8
*skb_head
= skb
->data
;
2175 int skb_len
= skb
->len
;
2176 unsigned int snaplen
, res
;
2177 unsigned long status
= TP_STATUS_USER
;
2178 unsigned short macoff
, netoff
, hdrlen
;
2179 struct sk_buff
*copy_skb
= NULL
;
2182 bool is_drop_n_account
= false;
2183 bool do_vnet
= false;
2185 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2186 * We may add members to them until current aligned size without forcing
2187 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2189 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h
.h2
)) != 32);
2190 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h
.h3
)) != 48);
2192 if (skb
->pkt_type
== PACKET_LOOPBACK
)
2195 sk
= pt
->af_packet_priv
;
2198 if (!net_eq(dev_net(dev
), sock_net(sk
)))
2201 if (dev
->header_ops
) {
2202 if (sk
->sk_type
!= SOCK_DGRAM
)
2203 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
2204 else if (skb
->pkt_type
== PACKET_OUTGOING
) {
2205 /* Special case: outgoing packets have ll header at head */
2206 skb_pull(skb
, skb_network_offset(skb
));
2212 res
= run_filter(skb
, sk
, snaplen
);
2214 goto drop_n_restore
;
2216 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2217 status
|= TP_STATUS_CSUMNOTREADY
;
2218 else if (skb
->pkt_type
!= PACKET_OUTGOING
&&
2219 (skb
->ip_summed
== CHECKSUM_COMPLETE
||
2220 skb_csum_unnecessary(skb
)))
2221 status
|= TP_STATUS_CSUM_VALID
;
2226 if (sk
->sk_type
== SOCK_DGRAM
) {
2227 macoff
= netoff
= TPACKET_ALIGN(po
->tp_hdrlen
) + 16 +
2230 unsigned int maclen
= skb_network_offset(skb
);
2231 netoff
= TPACKET_ALIGN(po
->tp_hdrlen
+
2232 (maclen
< 16 ? 16 : maclen
)) +
2234 if (po
->has_vnet_hdr
) {
2235 netoff
+= sizeof(struct virtio_net_hdr
);
2238 macoff
= netoff
- maclen
;
2240 if (po
->tp_version
<= TPACKET_V2
) {
2241 if (macoff
+ snaplen
> po
->rx_ring
.frame_size
) {
2242 if (po
->copy_thresh
&&
2243 atomic_read(&sk
->sk_rmem_alloc
) < sk
->sk_rcvbuf
) {
2244 if (skb_shared(skb
)) {
2245 copy_skb
= skb_clone(skb
, GFP_ATOMIC
);
2247 copy_skb
= skb_get(skb
);
2248 skb_head
= skb
->data
;
2251 skb_set_owner_r(copy_skb
, sk
);
2253 snaplen
= po
->rx_ring
.frame_size
- macoff
;
2254 if ((int)snaplen
< 0) {
2259 } else if (unlikely(macoff
+ snaplen
>
2260 GET_PBDQC_FROM_RB(&po
->rx_ring
)->max_frame_len
)) {
2263 nval
= GET_PBDQC_FROM_RB(&po
->rx_ring
)->max_frame_len
- macoff
;
2264 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2265 snaplen
, nval
, macoff
);
2267 if (unlikely((int)snaplen
< 0)) {
2269 macoff
= GET_PBDQC_FROM_RB(&po
->rx_ring
)->max_frame_len
;
2273 spin_lock(&sk
->sk_receive_queue
.lock
);
2274 h
.raw
= packet_current_rx_frame(po
, skb
,
2275 TP_STATUS_KERNEL
, (macoff
+snaplen
));
2277 goto drop_n_account
;
2278 if (po
->tp_version
<= TPACKET_V2
) {
2279 packet_increment_rx_head(po
, &po
->rx_ring
);
2281 * LOSING will be reported till you read the stats,
2282 * because it's COR - Clear On Read.
2283 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2286 if (po
->stats
.stats1
.tp_drops
)
2287 status
|= TP_STATUS_LOSING
;
2289 po
->stats
.stats1
.tp_packets
++;
2291 status
|= TP_STATUS_COPY
;
2292 __skb_queue_tail(&sk
->sk_receive_queue
, copy_skb
);
2294 spin_unlock(&sk
->sk_receive_queue
.lock
);
2297 if (virtio_net_hdr_from_skb(skb
, h
.raw
+ macoff
-
2298 sizeof(struct virtio_net_hdr
),
2300 spin_lock(&sk
->sk_receive_queue
.lock
);
2301 goto drop_n_account
;
2305 skb_copy_bits(skb
, 0, h
.raw
+ macoff
, snaplen
);
2307 if (!(ts_status
= tpacket_get_timestamp(skb
, &ts
, po
->tp_tstamp
)))
2308 getnstimeofday(&ts
);
2310 status
|= ts_status
;
2312 switch (po
->tp_version
) {
2314 h
.h1
->tp_len
= skb
->len
;
2315 h
.h1
->tp_snaplen
= snaplen
;
2316 h
.h1
->tp_mac
= macoff
;
2317 h
.h1
->tp_net
= netoff
;
2318 h
.h1
->tp_sec
= ts
.tv_sec
;
2319 h
.h1
->tp_usec
= ts
.tv_nsec
/ NSEC_PER_USEC
;
2320 hdrlen
= sizeof(*h
.h1
);
2323 h
.h2
->tp_len
= skb
->len
;
2324 h
.h2
->tp_snaplen
= snaplen
;
2325 h
.h2
->tp_mac
= macoff
;
2326 h
.h2
->tp_net
= netoff
;
2327 h
.h2
->tp_sec
= ts
.tv_sec
;
2328 h
.h2
->tp_nsec
= ts
.tv_nsec
;
2329 if (skb_vlan_tag_present(skb
)) {
2330 h
.h2
->tp_vlan_tci
= skb_vlan_tag_get(skb
);
2331 h
.h2
->tp_vlan_tpid
= ntohs(skb
->vlan_proto
);
2332 status
|= TP_STATUS_VLAN_VALID
| TP_STATUS_VLAN_TPID_VALID
;
2334 h
.h2
->tp_vlan_tci
= 0;
2335 h
.h2
->tp_vlan_tpid
= 0;
2337 memset(h
.h2
->tp_padding
, 0, sizeof(h
.h2
->tp_padding
));
2338 hdrlen
= sizeof(*h
.h2
);
2341 /* tp_nxt_offset,vlan are already populated above.
2342 * So DONT clear those fields here
2344 h
.h3
->tp_status
|= status
;
2345 h
.h3
->tp_len
= skb
->len
;
2346 h
.h3
->tp_snaplen
= snaplen
;
2347 h
.h3
->tp_mac
= macoff
;
2348 h
.h3
->tp_net
= netoff
;
2349 h
.h3
->tp_sec
= ts
.tv_sec
;
2350 h
.h3
->tp_nsec
= ts
.tv_nsec
;
2351 memset(h
.h3
->tp_padding
, 0, sizeof(h
.h3
->tp_padding
));
2352 hdrlen
= sizeof(*h
.h3
);
2358 sll
= h
.raw
+ TPACKET_ALIGN(hdrlen
);
2359 sll
->sll_halen
= dev_parse_header(skb
, sll
->sll_addr
);
2360 sll
->sll_family
= AF_PACKET
;
2361 sll
->sll_hatype
= dev
->type
;
2362 sll
->sll_protocol
= skb
->protocol
;
2363 sll
->sll_pkttype
= skb
->pkt_type
;
2364 if (unlikely(po
->origdev
))
2365 sll
->sll_ifindex
= orig_dev
->ifindex
;
2367 sll
->sll_ifindex
= dev
->ifindex
;
2371 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2372 if (po
->tp_version
<= TPACKET_V2
) {
2375 end
= (u8
*) PAGE_ALIGN((unsigned long) h
.raw
+
2378 for (start
= h
.raw
; start
< end
; start
+= PAGE_SIZE
)
2379 flush_dcache_page(pgv_to_page(start
));
2384 if (po
->tp_version
<= TPACKET_V2
) {
2385 __packet_set_status(po
, h
.raw
, status
);
2386 sk
->sk_data_ready(sk
);
2388 prb_clear_blk_fill_status(&po
->rx_ring
);
2392 if (skb_head
!= skb
->data
&& skb_shared(skb
)) {
2393 skb
->data
= skb_head
;
2397 if (!is_drop_n_account
)
2404 is_drop_n_account
= true;
2405 po
->stats
.stats1
.tp_drops
++;
2406 spin_unlock(&sk
->sk_receive_queue
.lock
);
2408 sk
->sk_data_ready(sk
);
2409 kfree_skb(copy_skb
);
2410 goto drop_n_restore
;
2413 static void tpacket_destruct_skb(struct sk_buff
*skb
)
2415 struct packet_sock
*po
= pkt_sk(skb
->sk
);
2417 if (likely(po
->tx_ring
.pg_vec
)) {
2421 ph
= skb_shinfo(skb
)->destructor_arg
;
2422 packet_dec_pending(&po
->tx_ring
);
2424 ts
= __packet_set_timestamp(po
, ph
, skb
);
2425 __packet_set_status(po
, ph
, TP_STATUS_AVAILABLE
| ts
);
2431 static void tpacket_set_protocol(const struct net_device
*dev
,
2432 struct sk_buff
*skb
)
2434 if (dev
->type
== ARPHRD_ETHER
) {
2435 skb_reset_mac_header(skb
);
2436 skb
->protocol
= eth_hdr(skb
)->h_proto
;
2440 static int __packet_snd_vnet_parse(struct virtio_net_hdr
*vnet_hdr
, size_t len
)
2442 if ((vnet_hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) &&
2443 (__virtio16_to_cpu(vio_le(), vnet_hdr
->csum_start
) +
2444 __virtio16_to_cpu(vio_le(), vnet_hdr
->csum_offset
) + 2 >
2445 __virtio16_to_cpu(vio_le(), vnet_hdr
->hdr_len
)))
2446 vnet_hdr
->hdr_len
= __cpu_to_virtio16(vio_le(),
2447 __virtio16_to_cpu(vio_le(), vnet_hdr
->csum_start
) +
2448 __virtio16_to_cpu(vio_le(), vnet_hdr
->csum_offset
) + 2);
2450 if (__virtio16_to_cpu(vio_le(), vnet_hdr
->hdr_len
) > len
)
2456 static int packet_snd_vnet_parse(struct msghdr
*msg
, size_t *len
,
2457 struct virtio_net_hdr
*vnet_hdr
)
2459 if (*len
< sizeof(*vnet_hdr
))
2461 *len
-= sizeof(*vnet_hdr
);
2463 if (!copy_from_iter_full(vnet_hdr
, sizeof(*vnet_hdr
), &msg
->msg_iter
))
2466 return __packet_snd_vnet_parse(vnet_hdr
, *len
);
2469 static int tpacket_fill_skb(struct packet_sock
*po
, struct sk_buff
*skb
,
2470 void *frame
, struct net_device
*dev
, void *data
, int tp_len
,
2471 __be16 proto
, unsigned char *addr
, int hlen
, int copylen
,
2472 const struct sockcm_cookie
*sockc
)
2474 union tpacket_uhdr ph
;
2475 int to_write
, offset
, len
, nr_frags
, len_max
;
2476 struct socket
*sock
= po
->sk
.sk_socket
;
2482 skb
->protocol
= proto
;
2484 skb
->priority
= po
->sk
.sk_priority
;
2485 skb
->mark
= po
->sk
.sk_mark
;
2486 sock_tx_timestamp(&po
->sk
, sockc
->tsflags
, &skb_shinfo(skb
)->tx_flags
);
2487 skb_shinfo(skb
)->destructor_arg
= ph
.raw
;
2489 skb_reserve(skb
, hlen
);
2490 skb_reset_network_header(skb
);
2494 if (sock
->type
== SOCK_DGRAM
) {
2495 err
= dev_hard_header(skb
, dev
, ntohs(proto
), addr
,
2497 if (unlikely(err
< 0))
2499 } else if (copylen
) {
2500 int hdrlen
= min_t(int, copylen
, tp_len
);
2502 skb_push(skb
, dev
->hard_header_len
);
2503 skb_put(skb
, copylen
- dev
->hard_header_len
);
2504 err
= skb_store_bits(skb
, 0, data
, hdrlen
);
2507 if (!dev_validate_header(dev
, skb
->data
, hdrlen
))
2510 tpacket_set_protocol(dev
, skb
);
2516 offset
= offset_in_page(data
);
2517 len_max
= PAGE_SIZE
- offset
;
2518 len
= ((to_write
> len_max
) ? len_max
: to_write
);
2520 skb
->data_len
= to_write
;
2521 skb
->len
+= to_write
;
2522 skb
->truesize
+= to_write
;
2523 refcount_add(to_write
, &po
->sk
.sk_wmem_alloc
);
2525 while (likely(to_write
)) {
2526 nr_frags
= skb_shinfo(skb
)->nr_frags
;
2528 if (unlikely(nr_frags
>= MAX_SKB_FRAGS
)) {
2529 pr_err("Packet exceed the number of skb frags(%lu)\n",
2534 page
= pgv_to_page(data
);
2536 flush_dcache_page(page
);
2538 skb_fill_page_desc(skb
, nr_frags
, page
, offset
, len
);
2541 len_max
= PAGE_SIZE
;
2542 len
= ((to_write
> len_max
) ? len_max
: to_write
);
2545 skb_probe_transport_header(skb
, 0);
2550 static int tpacket_parse_header(struct packet_sock
*po
, void *frame
,
2551 int size_max
, void **data
)
2553 union tpacket_uhdr ph
;
2558 switch (po
->tp_version
) {
2560 if (ph
.h3
->tp_next_offset
!= 0) {
2561 pr_warn_once("variable sized slot not supported");
2564 tp_len
= ph
.h3
->tp_len
;
2567 tp_len
= ph
.h2
->tp_len
;
2570 tp_len
= ph
.h1
->tp_len
;
2573 if (unlikely(tp_len
> size_max
)) {
2574 pr_err("packet size is too long (%d > %d)\n", tp_len
, size_max
);
2578 if (unlikely(po
->tp_tx_has_off
)) {
2579 int off_min
, off_max
;
2581 off_min
= po
->tp_hdrlen
- sizeof(struct sockaddr_ll
);
2582 off_max
= po
->tx_ring
.frame_size
- tp_len
;
2583 if (po
->sk
.sk_type
== SOCK_DGRAM
) {
2584 switch (po
->tp_version
) {
2586 off
= ph
.h3
->tp_net
;
2589 off
= ph
.h2
->tp_net
;
2592 off
= ph
.h1
->tp_net
;
2596 switch (po
->tp_version
) {
2598 off
= ph
.h3
->tp_mac
;
2601 off
= ph
.h2
->tp_mac
;
2604 off
= ph
.h1
->tp_mac
;
2608 if (unlikely((off
< off_min
) || (off_max
< off
)))
2611 off
= po
->tp_hdrlen
- sizeof(struct sockaddr_ll
);
2614 *data
= frame
+ off
;
2618 static int tpacket_snd(struct packet_sock
*po
, struct msghdr
*msg
)
2620 struct sk_buff
*skb
;
2621 struct net_device
*dev
;
2622 struct virtio_net_hdr
*vnet_hdr
= NULL
;
2623 struct sockcm_cookie sockc
;
2625 int err
, reserve
= 0;
2627 DECLARE_SOCKADDR(struct sockaddr_ll
*, saddr
, msg
->msg_name
);
2628 bool need_wait
= !(msg
->msg_flags
& MSG_DONTWAIT
);
2629 int tp_len
, size_max
;
2630 unsigned char *addr
;
2633 int status
= TP_STATUS_AVAILABLE
;
2634 int hlen
, tlen
, copylen
= 0;
2636 mutex_lock(&po
->pg_vec_lock
);
2638 if (likely(saddr
== NULL
)) {
2639 dev
= packet_cached_dev_get(po
);
2644 if (msg
->msg_namelen
< sizeof(struct sockaddr_ll
))
2646 if (msg
->msg_namelen
< (saddr
->sll_halen
2647 + offsetof(struct sockaddr_ll
,
2650 proto
= saddr
->sll_protocol
;
2651 addr
= saddr
->sll_addr
;
2652 dev
= dev_get_by_index(sock_net(&po
->sk
), saddr
->sll_ifindex
);
2656 if (unlikely(dev
== NULL
))
2659 if (unlikely(!(dev
->flags
& IFF_UP
)))
2662 sockc
.tsflags
= po
->sk
.sk_tsflags
;
2663 if (msg
->msg_controllen
) {
2664 err
= sock_cmsg_send(&po
->sk
, msg
, &sockc
);
2669 if (po
->sk
.sk_socket
->type
== SOCK_RAW
)
2670 reserve
= dev
->hard_header_len
;
2671 size_max
= po
->tx_ring
.frame_size
2672 - (po
->tp_hdrlen
- sizeof(struct sockaddr_ll
));
2674 if ((size_max
> dev
->mtu
+ reserve
+ VLAN_HLEN
) && !po
->has_vnet_hdr
)
2675 size_max
= dev
->mtu
+ reserve
+ VLAN_HLEN
;
2678 ph
= packet_current_frame(po
, &po
->tx_ring
,
2679 TP_STATUS_SEND_REQUEST
);
2680 if (unlikely(ph
== NULL
)) {
2681 if (need_wait
&& need_resched())
2687 tp_len
= tpacket_parse_header(po
, ph
, size_max
, &data
);
2691 status
= TP_STATUS_SEND_REQUEST
;
2692 hlen
= LL_RESERVED_SPACE(dev
);
2693 tlen
= dev
->needed_tailroom
;
2694 if (po
->has_vnet_hdr
) {
2696 data
+= sizeof(*vnet_hdr
);
2697 tp_len
-= sizeof(*vnet_hdr
);
2699 __packet_snd_vnet_parse(vnet_hdr
, tp_len
)) {
2703 copylen
= __virtio16_to_cpu(vio_le(),
2706 copylen
= max_t(int, copylen
, dev
->hard_header_len
);
2707 skb
= sock_alloc_send_skb(&po
->sk
,
2708 hlen
+ tlen
+ sizeof(struct sockaddr_ll
) +
2709 (copylen
- dev
->hard_header_len
),
2712 if (unlikely(skb
== NULL
)) {
2713 /* we assume the socket was initially writeable ... */
2714 if (likely(len_sum
> 0))
2718 tp_len
= tpacket_fill_skb(po
, skb
, ph
, dev
, data
, tp_len
, proto
,
2719 addr
, hlen
, copylen
, &sockc
);
2720 if (likely(tp_len
>= 0) &&
2721 tp_len
> dev
->mtu
+ reserve
&&
2722 !po
->has_vnet_hdr
&&
2723 !packet_extra_vlan_len_allowed(dev
, skb
))
2726 if (unlikely(tp_len
< 0)) {
2729 __packet_set_status(po
, ph
,
2730 TP_STATUS_AVAILABLE
);
2731 packet_increment_head(&po
->tx_ring
);
2735 status
= TP_STATUS_WRONG_FORMAT
;
2741 if (po
->has_vnet_hdr
&& virtio_net_hdr_to_skb(skb
, vnet_hdr
,
2747 skb
->destructor
= tpacket_destruct_skb
;
2748 __packet_set_status(po
, ph
, TP_STATUS_SENDING
);
2749 packet_inc_pending(&po
->tx_ring
);
2751 status
= TP_STATUS_SEND_REQUEST
;
2752 err
= po
->xmit(skb
);
2753 if (unlikely(err
> 0)) {
2754 err
= net_xmit_errno(err
);
2755 if (err
&& __packet_get_status(po
, ph
) ==
2756 TP_STATUS_AVAILABLE
) {
2757 /* skb was destructed already */
2762 * skb was dropped but not destructed yet;
2763 * let's treat it like congestion or err < 0
2767 packet_increment_head(&po
->tx_ring
);
2769 } while (likely((ph
!= NULL
) ||
2770 /* Note: packet_read_pending() might be slow if we have
2771 * to call it as it's per_cpu variable, but in fast-path
2772 * we already short-circuit the loop with the first
2773 * condition, and luckily don't have to go that path
2776 (need_wait
&& packet_read_pending(&po
->tx_ring
))));
2782 __packet_set_status(po
, ph
, status
);
2787 mutex_unlock(&po
->pg_vec_lock
);
2791 static struct sk_buff
*packet_alloc_skb(struct sock
*sk
, size_t prepad
,
2792 size_t reserve
, size_t len
,
2793 size_t linear
, int noblock
,
2796 struct sk_buff
*skb
;
2798 /* Under a page? Don't bother with paged skb. */
2799 if (prepad
+ len
< PAGE_SIZE
|| !linear
)
2802 skb
= sock_alloc_send_pskb(sk
, prepad
+ linear
, len
- linear
, noblock
,
2807 skb_reserve(skb
, reserve
);
2808 skb_put(skb
, linear
);
2809 skb
->data_len
= len
- linear
;
2810 skb
->len
+= len
- linear
;
2815 static int packet_snd(struct socket
*sock
, struct msghdr
*msg
, size_t len
)
2817 struct sock
*sk
= sock
->sk
;
2818 DECLARE_SOCKADDR(struct sockaddr_ll
*, saddr
, msg
->msg_name
);
2819 struct sk_buff
*skb
;
2820 struct net_device
*dev
;
2822 unsigned char *addr
;
2823 int err
, reserve
= 0;
2824 struct sockcm_cookie sockc
;
2825 struct virtio_net_hdr vnet_hdr
= { 0 };
2827 struct packet_sock
*po
= pkt_sk(sk
);
2828 bool has_vnet_hdr
= false;
2829 int hlen
, tlen
, linear
;
2833 * Get and verify the address.
2836 if (likely(saddr
== NULL
)) {
2837 dev
= packet_cached_dev_get(po
);
2842 if (msg
->msg_namelen
< sizeof(struct sockaddr_ll
))
2844 if (msg
->msg_namelen
< (saddr
->sll_halen
+ offsetof(struct sockaddr_ll
, sll_addr
)))
2846 proto
= saddr
->sll_protocol
;
2847 addr
= saddr
->sll_addr
;
2848 dev
= dev_get_by_index(sock_net(sk
), saddr
->sll_ifindex
);
2852 if (unlikely(dev
== NULL
))
2855 if (unlikely(!(dev
->flags
& IFF_UP
)))
2858 sockc
.tsflags
= sk
->sk_tsflags
;
2859 sockc
.mark
= sk
->sk_mark
;
2860 if (msg
->msg_controllen
) {
2861 err
= sock_cmsg_send(sk
, msg
, &sockc
);
2866 if (sock
->type
== SOCK_RAW
)
2867 reserve
= dev
->hard_header_len
;
2868 if (po
->has_vnet_hdr
) {
2869 err
= packet_snd_vnet_parse(msg
, &len
, &vnet_hdr
);
2872 has_vnet_hdr
= true;
2875 if (unlikely(sock_flag(sk
, SOCK_NOFCS
))) {
2876 if (!netif_supports_nofcs(dev
)) {
2877 err
= -EPROTONOSUPPORT
;
2880 extra_len
= 4; /* We're doing our own CRC */
2884 if (!vnet_hdr
.gso_type
&&
2885 (len
> dev
->mtu
+ reserve
+ VLAN_HLEN
+ extra_len
))
2889 hlen
= LL_RESERVED_SPACE(dev
);
2890 tlen
= dev
->needed_tailroom
;
2891 linear
= __virtio16_to_cpu(vio_le(), vnet_hdr
.hdr_len
);
2892 linear
= max(linear
, min_t(int, len
, dev
->hard_header_len
));
2893 skb
= packet_alloc_skb(sk
, hlen
+ tlen
, hlen
, len
, linear
,
2894 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
2898 skb_set_network_header(skb
, reserve
);
2901 if (sock
->type
== SOCK_DGRAM
) {
2902 offset
= dev_hard_header(skb
, dev
, ntohs(proto
), addr
, NULL
, len
);
2903 if (unlikely(offset
< 0))
2907 /* Returns -EFAULT on error */
2908 err
= skb_copy_datagram_from_iter(skb
, offset
, &msg
->msg_iter
, len
);
2912 if (sock
->type
== SOCK_RAW
&&
2913 !dev_validate_header(dev
, skb
->data
, len
)) {
2918 sock_tx_timestamp(sk
, sockc
.tsflags
, &skb_shinfo(skb
)->tx_flags
);
2920 if (!vnet_hdr
.gso_type
&& (len
> dev
->mtu
+ reserve
+ extra_len
) &&
2921 !packet_extra_vlan_len_allowed(dev
, skb
)) {
2926 skb
->protocol
= proto
;
2928 skb
->priority
= sk
->sk_priority
;
2929 skb
->mark
= sockc
.mark
;
2932 err
= virtio_net_hdr_to_skb(skb
, &vnet_hdr
, vio_le());
2935 len
+= sizeof(vnet_hdr
);
2938 skb_probe_transport_header(skb
, reserve
);
2940 if (unlikely(extra_len
== 4))
2943 err
= po
->xmit(skb
);
2944 if (err
> 0 && (err
= net_xmit_errno(err
)) != 0)
2960 static int packet_sendmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
)
2962 struct sock
*sk
= sock
->sk
;
2963 struct packet_sock
*po
= pkt_sk(sk
);
2965 if (po
->tx_ring
.pg_vec
)
2966 return tpacket_snd(po
, msg
);
2968 return packet_snd(sock
, msg
, len
);
2972 * Close a PACKET socket. This is fairly simple. We immediately go
2973 * to 'closed' state and remove our protocol entry in the device list.
2976 static int packet_release(struct socket
*sock
)
2978 struct sock
*sk
= sock
->sk
;
2979 struct packet_sock
*po
;
2980 struct packet_fanout
*f
;
2982 union tpacket_req_u req_u
;
2990 mutex_lock(&net
->packet
.sklist_lock
);
2991 sk_del_node_init_rcu(sk
);
2992 mutex_unlock(&net
->packet
.sklist_lock
);
2995 sock_prot_inuse_add(net
, sk
->sk_prot
, -1);
2998 spin_lock(&po
->bind_lock
);
2999 unregister_prot_hook(sk
, false);
3000 packet_cached_dev_reset(po
);
3002 if (po
->prot_hook
.dev
) {
3003 dev_put(po
->prot_hook
.dev
);
3004 po
->prot_hook
.dev
= NULL
;
3006 spin_unlock(&po
->bind_lock
);
3008 packet_flush_mclist(sk
);
3010 if (po
->rx_ring
.pg_vec
) {
3011 memset(&req_u
, 0, sizeof(req_u
));
3012 packet_set_ring(sk
, &req_u
, 1, 0);
3015 if (po
->tx_ring
.pg_vec
) {
3016 memset(&req_u
, 0, sizeof(req_u
));
3017 packet_set_ring(sk
, &req_u
, 1, 1);
3020 f
= fanout_release(sk
);
3025 kfree(po
->rollover
);
3026 fanout_release_data(f
);
3030 * Now the socket is dead. No more input will appear.
3037 skb_queue_purge(&sk
->sk_receive_queue
);
3038 packet_free_pending(po
);
3039 sk_refcnt_debug_release(sk
);
3046 * Attach a packet hook.
3049 static int packet_do_bind(struct sock
*sk
, const char *name
, int ifindex
,
3052 struct packet_sock
*po
= pkt_sk(sk
);
3053 struct net_device
*dev_curr
;
3056 struct net_device
*dev
= NULL
;
3058 bool unlisted
= false;
3061 spin_lock(&po
->bind_lock
);
3070 dev
= dev_get_by_name_rcu(sock_net(sk
), name
);
3075 } else if (ifindex
) {
3076 dev
= dev_get_by_index_rcu(sock_net(sk
), ifindex
);
3086 proto_curr
= po
->prot_hook
.type
;
3087 dev_curr
= po
->prot_hook
.dev
;
3089 need_rehook
= proto_curr
!= proto
|| dev_curr
!= dev
;
3094 /* prevents packet_notifier() from calling
3095 * register_prot_hook()
3098 __unregister_prot_hook(sk
, true);
3100 dev_curr
= po
->prot_hook
.dev
;
3102 unlisted
= !dev_get_by_index_rcu(sock_net(sk
),
3106 BUG_ON(po
->running
);
3108 po
->prot_hook
.type
= proto
;
3110 if (unlikely(unlisted
)) {
3112 po
->prot_hook
.dev
= NULL
;
3114 packet_cached_dev_reset(po
);
3116 po
->prot_hook
.dev
= dev
;
3117 po
->ifindex
= dev
? dev
->ifindex
: 0;
3118 packet_cached_dev_assign(po
, dev
);
3124 if (proto
== 0 || !need_rehook
)
3127 if (!unlisted
&& (!dev
|| (dev
->flags
& IFF_UP
))) {
3128 register_prot_hook(sk
);
3130 sk
->sk_err
= ENETDOWN
;
3131 if (!sock_flag(sk
, SOCK_DEAD
))
3132 sk
->sk_error_report(sk
);
3137 spin_unlock(&po
->bind_lock
);
3143 * Bind a packet socket to a device
3146 static int packet_bind_spkt(struct socket
*sock
, struct sockaddr
*uaddr
,
3149 struct sock
*sk
= sock
->sk
;
3150 char name
[sizeof(uaddr
->sa_data
) + 1];
3156 if (addr_len
!= sizeof(struct sockaddr
))
3158 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3161 memcpy(name
, uaddr
->sa_data
, sizeof(uaddr
->sa_data
));
3162 name
[sizeof(uaddr
->sa_data
)] = 0;
3164 return packet_do_bind(sk
, name
, 0, pkt_sk(sk
)->num
);
3167 static int packet_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
3169 struct sockaddr_ll
*sll
= (struct sockaddr_ll
*)uaddr
;
3170 struct sock
*sk
= sock
->sk
;
3176 if (addr_len
< sizeof(struct sockaddr_ll
))
3178 if (sll
->sll_family
!= AF_PACKET
)
3181 return packet_do_bind(sk
, NULL
, sll
->sll_ifindex
,
3182 sll
->sll_protocol
? : pkt_sk(sk
)->num
);
3185 static struct proto packet_proto
= {
3187 .owner
= THIS_MODULE
,
3188 .obj_size
= sizeof(struct packet_sock
),
3192 * Create a packet of type SOCK_PACKET.
3195 static int packet_create(struct net
*net
, struct socket
*sock
, int protocol
,
3199 struct packet_sock
*po
;
3200 __be16 proto
= (__force __be16
)protocol
; /* weird, but documented */
3203 if (!ns_capable(net
->user_ns
, CAP_NET_RAW
))
3205 if (sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
&&
3206 sock
->type
!= SOCK_PACKET
)
3207 return -ESOCKTNOSUPPORT
;
3209 sock
->state
= SS_UNCONNECTED
;
3212 sk
= sk_alloc(net
, PF_PACKET
, GFP_KERNEL
, &packet_proto
, kern
);
3216 sock
->ops
= &packet_ops
;
3217 if (sock
->type
== SOCK_PACKET
)
3218 sock
->ops
= &packet_ops_spkt
;
3220 sock_init_data(sock
, sk
);
3223 sk
->sk_family
= PF_PACKET
;
3225 po
->xmit
= dev_queue_xmit
;
3227 err
= packet_alloc_pending(po
);
3231 packet_cached_dev_reset(po
);
3233 sk
->sk_destruct
= packet_sock_destruct
;
3234 sk_refcnt_debug_inc(sk
);
3237 * Attach a protocol block
3240 spin_lock_init(&po
->bind_lock
);
3241 mutex_init(&po
->pg_vec_lock
);
3242 po
->rollover
= NULL
;
3243 po
->prot_hook
.func
= packet_rcv
;
3245 if (sock
->type
== SOCK_PACKET
)
3246 po
->prot_hook
.func
= packet_rcv_spkt
;
3248 po
->prot_hook
.af_packet_priv
= sk
;
3251 po
->prot_hook
.type
= proto
;
3252 register_prot_hook(sk
);
3255 mutex_lock(&net
->packet
.sklist_lock
);
3256 sk_add_node_rcu(sk
, &net
->packet
.sklist
);
3257 mutex_unlock(&net
->packet
.sklist_lock
);
3260 sock_prot_inuse_add(net
, &packet_proto
, 1);
3271 * Pull a packet from our receive queue and hand it to the user.
3272 * If necessary we block.
3275 static int packet_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
,
3278 struct sock
*sk
= sock
->sk
;
3279 struct sk_buff
*skb
;
3281 int vnet_hdr_len
= 0;
3282 unsigned int origlen
= 0;
3285 if (flags
& ~(MSG_PEEK
|MSG_DONTWAIT
|MSG_TRUNC
|MSG_CMSG_COMPAT
|MSG_ERRQUEUE
))
3289 /* What error should we return now? EUNATTACH? */
3290 if (pkt_sk(sk
)->ifindex
< 0)
3294 if (flags
& MSG_ERRQUEUE
) {
3295 err
= sock_recv_errqueue(sk
, msg
, len
,
3296 SOL_PACKET
, PACKET_TX_TIMESTAMP
);
3301 * Call the generic datagram receiver. This handles all sorts
3302 * of horrible races and re-entrancy so we can forget about it
3303 * in the protocol layers.
3305 * Now it will return ENETDOWN, if device have just gone down,
3306 * but then it will block.
3309 skb
= skb_recv_datagram(sk
, flags
, flags
& MSG_DONTWAIT
, &err
);
3312 * An error occurred so return it. Because skb_recv_datagram()
3313 * handles the blocking we don't see and worry about blocking
3320 if (pkt_sk(sk
)->pressure
)
3321 packet_rcv_has_room(pkt_sk(sk
), NULL
);
3323 if (pkt_sk(sk
)->has_vnet_hdr
) {
3324 err
= packet_rcv_vnet(msg
, skb
, &len
);
3327 vnet_hdr_len
= sizeof(struct virtio_net_hdr
);
3330 /* You lose any data beyond the buffer you gave. If it worries
3331 * a user program they can ask the device for its MTU
3337 msg
->msg_flags
|= MSG_TRUNC
;
3340 err
= skb_copy_datagram_msg(skb
, 0, msg
, copied
);
3344 if (sock
->type
!= SOCK_PACKET
) {
3345 struct sockaddr_ll
*sll
= &PACKET_SKB_CB(skb
)->sa
.ll
;
3347 /* Original length was stored in sockaddr_ll fields */
3348 origlen
= PACKET_SKB_CB(skb
)->sa
.origlen
;
3349 sll
->sll_family
= AF_PACKET
;
3350 sll
->sll_protocol
= skb
->protocol
;
3353 sock_recv_ts_and_drops(msg
, sk
, skb
);
3355 if (msg
->msg_name
) {
3356 /* If the address length field is there to be filled
3357 * in, we fill it in now.
3359 if (sock
->type
== SOCK_PACKET
) {
3360 __sockaddr_check_size(sizeof(struct sockaddr_pkt
));
3361 msg
->msg_namelen
= sizeof(struct sockaddr_pkt
);
3363 struct sockaddr_ll
*sll
= &PACKET_SKB_CB(skb
)->sa
.ll
;
3365 msg
->msg_namelen
= sll
->sll_halen
+
3366 offsetof(struct sockaddr_ll
, sll_addr
);
3368 memcpy(msg
->msg_name
, &PACKET_SKB_CB(skb
)->sa
,
3372 if (pkt_sk(sk
)->auxdata
) {
3373 struct tpacket_auxdata aux
;
3375 aux
.tp_status
= TP_STATUS_USER
;
3376 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3377 aux
.tp_status
|= TP_STATUS_CSUMNOTREADY
;
3378 else if (skb
->pkt_type
!= PACKET_OUTGOING
&&
3379 (skb
->ip_summed
== CHECKSUM_COMPLETE
||
3380 skb_csum_unnecessary(skb
)))
3381 aux
.tp_status
|= TP_STATUS_CSUM_VALID
;
3383 aux
.tp_len
= origlen
;
3384 aux
.tp_snaplen
= skb
->len
;
3386 aux
.tp_net
= skb_network_offset(skb
);
3387 if (skb_vlan_tag_present(skb
)) {
3388 aux
.tp_vlan_tci
= skb_vlan_tag_get(skb
);
3389 aux
.tp_vlan_tpid
= ntohs(skb
->vlan_proto
);
3390 aux
.tp_status
|= TP_STATUS_VLAN_VALID
| TP_STATUS_VLAN_TPID_VALID
;
3392 aux
.tp_vlan_tci
= 0;
3393 aux
.tp_vlan_tpid
= 0;
3395 put_cmsg(msg
, SOL_PACKET
, PACKET_AUXDATA
, sizeof(aux
), &aux
);
3399 * Free or return the buffer as appropriate. Again this
3400 * hides all the races and re-entrancy issues from us.
3402 err
= vnet_hdr_len
+ ((flags
&MSG_TRUNC
) ? skb
->len
: copied
);
3405 skb_free_datagram(sk
, skb
);
3410 static int packet_getname_spkt(struct socket
*sock
, struct sockaddr
*uaddr
,
3411 int *uaddr_len
, int peer
)
3413 struct net_device
*dev
;
3414 struct sock
*sk
= sock
->sk
;
3419 uaddr
->sa_family
= AF_PACKET
;
3420 memset(uaddr
->sa_data
, 0, sizeof(uaddr
->sa_data
));
3422 dev
= dev_get_by_index_rcu(sock_net(sk
), pkt_sk(sk
)->ifindex
);
3424 strlcpy(uaddr
->sa_data
, dev
->name
, sizeof(uaddr
->sa_data
));
3426 *uaddr_len
= sizeof(*uaddr
);
3431 static int packet_getname(struct socket
*sock
, struct sockaddr
*uaddr
,
3432 int *uaddr_len
, int peer
)
3434 struct net_device
*dev
;
3435 struct sock
*sk
= sock
->sk
;
3436 struct packet_sock
*po
= pkt_sk(sk
);
3437 DECLARE_SOCKADDR(struct sockaddr_ll
*, sll
, uaddr
);
3442 sll
->sll_family
= AF_PACKET
;
3443 sll
->sll_ifindex
= po
->ifindex
;
3444 sll
->sll_protocol
= po
->num
;
3445 sll
->sll_pkttype
= 0;
3447 dev
= dev_get_by_index_rcu(sock_net(sk
), po
->ifindex
);
3449 sll
->sll_hatype
= dev
->type
;
3450 sll
->sll_halen
= dev
->addr_len
;
3451 memcpy(sll
->sll_addr
, dev
->dev_addr
, dev
->addr_len
);
3453 sll
->sll_hatype
= 0; /* Bad: we have no ARPHRD_UNSPEC */
3457 *uaddr_len
= offsetof(struct sockaddr_ll
, sll_addr
) + sll
->sll_halen
;
3462 static int packet_dev_mc(struct net_device
*dev
, struct packet_mclist
*i
,
3466 case PACKET_MR_MULTICAST
:
3467 if (i
->alen
!= dev
->addr_len
)
3470 return dev_mc_add(dev
, i
->addr
);
3472 return dev_mc_del(dev
, i
->addr
);
3474 case PACKET_MR_PROMISC
:
3475 return dev_set_promiscuity(dev
, what
);
3476 case PACKET_MR_ALLMULTI
:
3477 return dev_set_allmulti(dev
, what
);
3478 case PACKET_MR_UNICAST
:
3479 if (i
->alen
!= dev
->addr_len
)
3482 return dev_uc_add(dev
, i
->addr
);
3484 return dev_uc_del(dev
, i
->addr
);
3492 static void packet_dev_mclist_delete(struct net_device
*dev
,
3493 struct packet_mclist
**mlp
)
3495 struct packet_mclist
*ml
;
3497 while ((ml
= *mlp
) != NULL
) {
3498 if (ml
->ifindex
== dev
->ifindex
) {
3499 packet_dev_mc(dev
, ml
, -1);
3507 static int packet_mc_add(struct sock
*sk
, struct packet_mreq_max
*mreq
)
3509 struct packet_sock
*po
= pkt_sk(sk
);
3510 struct packet_mclist
*ml
, *i
;
3511 struct net_device
*dev
;
3517 dev
= __dev_get_by_index(sock_net(sk
), mreq
->mr_ifindex
);
3522 if (mreq
->mr_alen
> dev
->addr_len
)
3526 i
= kmalloc(sizeof(*i
), GFP_KERNEL
);
3531 for (ml
= po
->mclist
; ml
; ml
= ml
->next
) {
3532 if (ml
->ifindex
== mreq
->mr_ifindex
&&
3533 ml
->type
== mreq
->mr_type
&&
3534 ml
->alen
== mreq
->mr_alen
&&
3535 memcmp(ml
->addr
, mreq
->mr_address
, ml
->alen
) == 0) {
3537 /* Free the new element ... */
3543 i
->type
= mreq
->mr_type
;
3544 i
->ifindex
= mreq
->mr_ifindex
;
3545 i
->alen
= mreq
->mr_alen
;
3546 memcpy(i
->addr
, mreq
->mr_address
, i
->alen
);
3547 memset(i
->addr
+ i
->alen
, 0, sizeof(i
->addr
) - i
->alen
);
3549 i
->next
= po
->mclist
;
3551 err
= packet_dev_mc(dev
, i
, 1);
3553 po
->mclist
= i
->next
;
3562 static int packet_mc_drop(struct sock
*sk
, struct packet_mreq_max
*mreq
)
3564 struct packet_mclist
*ml
, **mlp
;
3568 for (mlp
= &pkt_sk(sk
)->mclist
; (ml
= *mlp
) != NULL
; mlp
= &ml
->next
) {
3569 if (ml
->ifindex
== mreq
->mr_ifindex
&&
3570 ml
->type
== mreq
->mr_type
&&
3571 ml
->alen
== mreq
->mr_alen
&&
3572 memcmp(ml
->addr
, mreq
->mr_address
, ml
->alen
) == 0) {
3573 if (--ml
->count
== 0) {
3574 struct net_device
*dev
;
3576 dev
= __dev_get_by_index(sock_net(sk
), ml
->ifindex
);
3578 packet_dev_mc(dev
, ml
, -1);
3588 static void packet_flush_mclist(struct sock
*sk
)
3590 struct packet_sock
*po
= pkt_sk(sk
);
3591 struct packet_mclist
*ml
;
3597 while ((ml
= po
->mclist
) != NULL
) {
3598 struct net_device
*dev
;
3600 po
->mclist
= ml
->next
;
3601 dev
= __dev_get_by_index(sock_net(sk
), ml
->ifindex
);
3603 packet_dev_mc(dev
, ml
, -1);
3610 packet_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
3612 struct sock
*sk
= sock
->sk
;
3613 struct packet_sock
*po
= pkt_sk(sk
);
3616 if (level
!= SOL_PACKET
)
3617 return -ENOPROTOOPT
;
3620 case PACKET_ADD_MEMBERSHIP
:
3621 case PACKET_DROP_MEMBERSHIP
:
3623 struct packet_mreq_max mreq
;
3625 memset(&mreq
, 0, sizeof(mreq
));
3626 if (len
< sizeof(struct packet_mreq
))
3628 if (len
> sizeof(mreq
))
3630 if (copy_from_user(&mreq
, optval
, len
))
3632 if (len
< (mreq
.mr_alen
+ offsetof(struct packet_mreq
, mr_address
)))
3634 if (optname
== PACKET_ADD_MEMBERSHIP
)
3635 ret
= packet_mc_add(sk
, &mreq
);
3637 ret
= packet_mc_drop(sk
, &mreq
);
3641 case PACKET_RX_RING
:
3642 case PACKET_TX_RING
:
3644 union tpacket_req_u req_u
;
3647 switch (po
->tp_version
) {
3650 len
= sizeof(req_u
.req
);
3654 len
= sizeof(req_u
.req3
);
3659 if (copy_from_user(&req_u
.req
, optval
, len
))
3661 return packet_set_ring(sk
, &req_u
, 0,
3662 optname
== PACKET_TX_RING
);
3664 case PACKET_COPY_THRESH
:
3668 if (optlen
!= sizeof(val
))
3670 if (copy_from_user(&val
, optval
, sizeof(val
)))
3673 pkt_sk(sk
)->copy_thresh
= val
;
3676 case PACKET_VERSION
:
3680 if (optlen
!= sizeof(val
))
3682 if (copy_from_user(&val
, optval
, sizeof(val
)))
3693 if (po
->rx_ring
.pg_vec
|| po
->tx_ring
.pg_vec
) {
3696 po
->tp_version
= val
;
3702 case PACKET_RESERVE
:
3706 if (optlen
!= sizeof(val
))
3708 if (copy_from_user(&val
, optval
, sizeof(val
)))
3713 if (po
->rx_ring
.pg_vec
|| po
->tx_ring
.pg_vec
) {
3716 po
->tp_reserve
= val
;
3726 if (optlen
!= sizeof(val
))
3728 if (po
->rx_ring
.pg_vec
|| po
->tx_ring
.pg_vec
)
3730 if (copy_from_user(&val
, optval
, sizeof(val
)))
3732 po
->tp_loss
= !!val
;
3735 case PACKET_AUXDATA
:
3739 if (optlen
< sizeof(val
))
3741 if (copy_from_user(&val
, optval
, sizeof(val
)))
3744 po
->auxdata
= !!val
;
3747 case PACKET_ORIGDEV
:
3751 if (optlen
< sizeof(val
))
3753 if (copy_from_user(&val
, optval
, sizeof(val
)))
3756 po
->origdev
= !!val
;
3759 case PACKET_VNET_HDR
:
3763 if (sock
->type
!= SOCK_RAW
)
3765 if (po
->rx_ring
.pg_vec
|| po
->tx_ring
.pg_vec
)
3767 if (optlen
< sizeof(val
))
3769 if (copy_from_user(&val
, optval
, sizeof(val
)))
3772 po
->has_vnet_hdr
= !!val
;
3775 case PACKET_TIMESTAMP
:
3779 if (optlen
!= sizeof(val
))
3781 if (copy_from_user(&val
, optval
, sizeof(val
)))
3784 po
->tp_tstamp
= val
;
3791 if (optlen
!= sizeof(val
))
3793 if (copy_from_user(&val
, optval
, sizeof(val
)))
3796 return fanout_add(sk
, val
& 0xffff, val
>> 16);
3798 case PACKET_FANOUT_DATA
:
3803 return fanout_set_data(po
, optval
, optlen
);
3805 case PACKET_TX_HAS_OFF
:
3809 if (optlen
!= sizeof(val
))
3811 if (po
->rx_ring
.pg_vec
|| po
->tx_ring
.pg_vec
)
3813 if (copy_from_user(&val
, optval
, sizeof(val
)))
3815 po
->tp_tx_has_off
= !!val
;
3818 case PACKET_QDISC_BYPASS
:
3822 if (optlen
!= sizeof(val
))
3824 if (copy_from_user(&val
, optval
, sizeof(val
)))
3827 po
->xmit
= val
? packet_direct_xmit
: dev_queue_xmit
;
3831 return -ENOPROTOOPT
;
3835 static int packet_getsockopt(struct socket
*sock
, int level
, int optname
,
3836 char __user
*optval
, int __user
*optlen
)
3839 int val
, lv
= sizeof(val
);
3840 struct sock
*sk
= sock
->sk
;
3841 struct packet_sock
*po
= pkt_sk(sk
);
3843 union tpacket_stats_u st
;
3844 struct tpacket_rollover_stats rstats
;
3846 if (level
!= SOL_PACKET
)
3847 return -ENOPROTOOPT
;
3849 if (get_user(len
, optlen
))
3856 case PACKET_STATISTICS
:
3857 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
3858 memcpy(&st
, &po
->stats
, sizeof(st
));
3859 memset(&po
->stats
, 0, sizeof(po
->stats
));
3860 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
3862 if (po
->tp_version
== TPACKET_V3
) {
3863 lv
= sizeof(struct tpacket_stats_v3
);
3864 st
.stats3
.tp_packets
+= st
.stats3
.tp_drops
;
3867 lv
= sizeof(struct tpacket_stats
);
3868 st
.stats1
.tp_packets
+= st
.stats1
.tp_drops
;
3873 case PACKET_AUXDATA
:
3876 case PACKET_ORIGDEV
:
3879 case PACKET_VNET_HDR
:
3880 val
= po
->has_vnet_hdr
;
3882 case PACKET_VERSION
:
3883 val
= po
->tp_version
;
3886 if (len
> sizeof(int))
3888 if (len
< sizeof(int))
3890 if (copy_from_user(&val
, optval
, len
))
3894 val
= sizeof(struct tpacket_hdr
);
3897 val
= sizeof(struct tpacket2_hdr
);
3900 val
= sizeof(struct tpacket3_hdr
);
3906 case PACKET_RESERVE
:
3907 val
= po
->tp_reserve
;
3912 case PACKET_TIMESTAMP
:
3913 val
= po
->tp_tstamp
;
3917 ((u32
)po
->fanout
->id
|
3918 ((u32
)po
->fanout
->type
<< 16) |
3919 ((u32
)po
->fanout
->flags
<< 24)) :
3922 case PACKET_ROLLOVER_STATS
:
3925 rstats
.tp_all
= atomic_long_read(&po
->rollover
->num
);
3926 rstats
.tp_huge
= atomic_long_read(&po
->rollover
->num_huge
);
3927 rstats
.tp_failed
= atomic_long_read(&po
->rollover
->num_failed
);
3929 lv
= sizeof(rstats
);
3931 case PACKET_TX_HAS_OFF
:
3932 val
= po
->tp_tx_has_off
;
3934 case PACKET_QDISC_BYPASS
:
3935 val
= packet_use_direct_xmit(po
);
3938 return -ENOPROTOOPT
;
3943 if (put_user(len
, optlen
))
3945 if (copy_to_user(optval
, data
, len
))
3951 #ifdef CONFIG_COMPAT
3952 static int compat_packet_setsockopt(struct socket
*sock
, int level
, int optname
,
3953 char __user
*optval
, unsigned int optlen
)
3955 struct packet_sock
*po
= pkt_sk(sock
->sk
);
3957 if (level
!= SOL_PACKET
)
3958 return -ENOPROTOOPT
;
3960 if (optname
== PACKET_FANOUT_DATA
&&
3961 po
->fanout
&& po
->fanout
->type
== PACKET_FANOUT_CBPF
) {
3962 optval
= (char __user
*)get_compat_bpf_fprog(optval
);
3965 optlen
= sizeof(struct sock_fprog
);
3968 return packet_setsockopt(sock
, level
, optname
, optval
, optlen
);
3972 static int packet_notifier(struct notifier_block
*this,
3973 unsigned long msg
, void *ptr
)
3976 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
3977 struct net
*net
= dev_net(dev
);
3980 sk_for_each_rcu(sk
, &net
->packet
.sklist
) {
3981 struct packet_sock
*po
= pkt_sk(sk
);
3984 case NETDEV_UNREGISTER
:
3986 packet_dev_mclist_delete(dev
, &po
->mclist
);
3990 if (dev
->ifindex
== po
->ifindex
) {
3991 spin_lock(&po
->bind_lock
);
3993 __unregister_prot_hook(sk
, false);
3994 sk
->sk_err
= ENETDOWN
;
3995 if (!sock_flag(sk
, SOCK_DEAD
))
3996 sk
->sk_error_report(sk
);
3998 if (msg
== NETDEV_UNREGISTER
) {
3999 packet_cached_dev_reset(po
);
4001 if (po
->prot_hook
.dev
)
4002 dev_put(po
->prot_hook
.dev
);
4003 po
->prot_hook
.dev
= NULL
;
4005 spin_unlock(&po
->bind_lock
);
4009 if (dev
->ifindex
== po
->ifindex
) {
4010 spin_lock(&po
->bind_lock
);
4012 register_prot_hook(sk
);
4013 spin_unlock(&po
->bind_lock
);
4023 static int packet_ioctl(struct socket
*sock
, unsigned int cmd
,
4026 struct sock
*sk
= sock
->sk
;
4031 int amount
= sk_wmem_alloc_get(sk
);
4033 return put_user(amount
, (int __user
*)arg
);
4037 struct sk_buff
*skb
;
4040 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
4041 skb
= skb_peek(&sk
->sk_receive_queue
);
4044 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
4045 return put_user(amount
, (int __user
*)arg
);
4048 return sock_get_timestamp(sk
, (struct timeval __user
*)arg
);
4050 return sock_get_timestampns(sk
, (struct timespec __user
*)arg
);
4060 case SIOCGIFBRDADDR
:
4061 case SIOCSIFBRDADDR
:
4062 case SIOCGIFNETMASK
:
4063 case SIOCSIFNETMASK
:
4064 case SIOCGIFDSTADDR
:
4065 case SIOCSIFDSTADDR
:
4067 return inet_dgram_ops
.ioctl(sock
, cmd
, arg
);
4071 return -ENOIOCTLCMD
;
4076 static unsigned int packet_poll(struct file
*file
, struct socket
*sock
,
4079 struct sock
*sk
= sock
->sk
;
4080 struct packet_sock
*po
= pkt_sk(sk
);
4081 unsigned int mask
= datagram_poll(file
, sock
, wait
);
4083 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
4084 if (po
->rx_ring
.pg_vec
) {
4085 if (!packet_previous_rx_frame(po
, &po
->rx_ring
,
4087 mask
|= POLLIN
| POLLRDNORM
;
4089 if (po
->pressure
&& __packet_rcv_has_room(po
, NULL
) == ROOM_NORMAL
)
4091 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
4092 spin_lock_bh(&sk
->sk_write_queue
.lock
);
4093 if (po
->tx_ring
.pg_vec
) {
4094 if (packet_current_frame(po
, &po
->tx_ring
, TP_STATUS_AVAILABLE
))
4095 mask
|= POLLOUT
| POLLWRNORM
;
4097 spin_unlock_bh(&sk
->sk_write_queue
.lock
);
4102 /* Dirty? Well, I still did not learn better way to account
4106 static void packet_mm_open(struct vm_area_struct
*vma
)
4108 struct file
*file
= vma
->vm_file
;
4109 struct socket
*sock
= file
->private_data
;
4110 struct sock
*sk
= sock
->sk
;
4113 atomic_inc(&pkt_sk(sk
)->mapped
);
4116 static void packet_mm_close(struct vm_area_struct
*vma
)
4118 struct file
*file
= vma
->vm_file
;
4119 struct socket
*sock
= file
->private_data
;
4120 struct sock
*sk
= sock
->sk
;
4123 atomic_dec(&pkt_sk(sk
)->mapped
);
4126 static const struct vm_operations_struct packet_mmap_ops
= {
4127 .open
= packet_mm_open
,
4128 .close
= packet_mm_close
,
4131 static void free_pg_vec(struct pgv
*pg_vec
, unsigned int order
,
4136 for (i
= 0; i
< len
; i
++) {
4137 if (likely(pg_vec
[i
].buffer
)) {
4138 if (is_vmalloc_addr(pg_vec
[i
].buffer
))
4139 vfree(pg_vec
[i
].buffer
);
4141 free_pages((unsigned long)pg_vec
[i
].buffer
,
4143 pg_vec
[i
].buffer
= NULL
;
4149 static char *alloc_one_pg_vec_page(unsigned long order
)
4152 gfp_t gfp_flags
= GFP_KERNEL
| __GFP_COMP
|
4153 __GFP_ZERO
| __GFP_NOWARN
| __GFP_NORETRY
;
4155 buffer
= (char *) __get_free_pages(gfp_flags
, order
);
4159 /* __get_free_pages failed, fall back to vmalloc */
4160 buffer
= vzalloc((1 << order
) * PAGE_SIZE
);
4164 /* vmalloc failed, lets dig into swap here */
4165 gfp_flags
&= ~__GFP_NORETRY
;
4166 buffer
= (char *) __get_free_pages(gfp_flags
, order
);
4170 /* complete and utter failure */
4174 static struct pgv
*alloc_pg_vec(struct tpacket_req
*req
, int order
)
4176 unsigned int block_nr
= req
->tp_block_nr
;
4180 pg_vec
= kcalloc(block_nr
, sizeof(struct pgv
), GFP_KERNEL
);
4181 if (unlikely(!pg_vec
))
4184 for (i
= 0; i
< block_nr
; i
++) {
4185 pg_vec
[i
].buffer
= alloc_one_pg_vec_page(order
);
4186 if (unlikely(!pg_vec
[i
].buffer
))
4187 goto out_free_pgvec
;
4194 free_pg_vec(pg_vec
, order
, block_nr
);
4199 static int packet_set_ring(struct sock
*sk
, union tpacket_req_u
*req_u
,
4200 int closing
, int tx_ring
)
4202 struct pgv
*pg_vec
= NULL
;
4203 struct packet_sock
*po
= pkt_sk(sk
);
4204 int was_running
, order
= 0;
4205 struct packet_ring_buffer
*rb
;
4206 struct sk_buff_head
*rb_queue
;
4209 /* Added to avoid minimal code churn */
4210 struct tpacket_req
*req
= &req_u
->req
;
4214 rb
= tx_ring
? &po
->tx_ring
: &po
->rx_ring
;
4215 rb_queue
= tx_ring
? &sk
->sk_write_queue
: &sk
->sk_receive_queue
;
4219 if (atomic_read(&po
->mapped
))
4221 if (packet_read_pending(rb
))
4225 if (req
->tp_block_nr
) {
4226 /* Sanity tests and some calculations */
4228 if (unlikely(rb
->pg_vec
))
4231 switch (po
->tp_version
) {
4233 po
->tp_hdrlen
= TPACKET_HDRLEN
;
4236 po
->tp_hdrlen
= TPACKET2_HDRLEN
;
4239 po
->tp_hdrlen
= TPACKET3_HDRLEN
;
4244 if (unlikely((int)req
->tp_block_size
<= 0))
4246 if (unlikely(!PAGE_ALIGNED(req
->tp_block_size
)))
4248 if (po
->tp_version
>= TPACKET_V3
&&
4249 req
->tp_block_size
<=
4250 BLK_PLUS_PRIV((u64
)req_u
->req3
.tp_sizeof_priv
))
4252 if (unlikely(req
->tp_frame_size
< po
->tp_hdrlen
+
4255 if (unlikely(req
->tp_frame_size
& (TPACKET_ALIGNMENT
- 1)))
4258 rb
->frames_per_block
= req
->tp_block_size
/ req
->tp_frame_size
;
4259 if (unlikely(rb
->frames_per_block
== 0))
4261 if (unlikely(req
->tp_block_size
> UINT_MAX
/ req
->tp_block_nr
))
4263 if (unlikely((rb
->frames_per_block
* req
->tp_block_nr
) !=
4268 order
= get_order(req
->tp_block_size
);
4269 pg_vec
= alloc_pg_vec(req
, order
);
4270 if (unlikely(!pg_vec
))
4272 switch (po
->tp_version
) {
4274 /* Block transmit is not supported yet */
4276 init_prb_bdqc(po
, rb
, pg_vec
, req_u
);
4278 struct tpacket_req3
*req3
= &req_u
->req3
;
4280 if (req3
->tp_retire_blk_tov
||
4281 req3
->tp_sizeof_priv
||
4282 req3
->tp_feature_req_word
) {
4295 if (unlikely(req
->tp_frame_nr
))
4300 /* Detach socket from network */
4301 spin_lock(&po
->bind_lock
);
4302 was_running
= po
->running
;
4306 __unregister_prot_hook(sk
, false);
4308 spin_unlock(&po
->bind_lock
);
4313 mutex_lock(&po
->pg_vec_lock
);
4314 if (closing
|| atomic_read(&po
->mapped
) == 0) {
4316 spin_lock_bh(&rb_queue
->lock
);
4317 swap(rb
->pg_vec
, pg_vec
);
4318 rb
->frame_max
= (req
->tp_frame_nr
- 1);
4320 rb
->frame_size
= req
->tp_frame_size
;
4321 spin_unlock_bh(&rb_queue
->lock
);
4323 swap(rb
->pg_vec_order
, order
);
4324 swap(rb
->pg_vec_len
, req
->tp_block_nr
);
4326 rb
->pg_vec_pages
= req
->tp_block_size
/PAGE_SIZE
;
4327 po
->prot_hook
.func
= (po
->rx_ring
.pg_vec
) ?
4328 tpacket_rcv
: packet_rcv
;
4329 skb_queue_purge(rb_queue
);
4330 if (atomic_read(&po
->mapped
))
4331 pr_err("packet_mmap: vma is busy: %d\n",
4332 atomic_read(&po
->mapped
));
4334 mutex_unlock(&po
->pg_vec_lock
);
4336 spin_lock(&po
->bind_lock
);
4339 register_prot_hook(sk
);
4341 spin_unlock(&po
->bind_lock
);
4342 if (pg_vec
&& (po
->tp_version
> TPACKET_V2
)) {
4343 /* Because we don't support block-based V3 on tx-ring */
4345 prb_shutdown_retire_blk_timer(po
, rb_queue
);
4349 free_pg_vec(pg_vec
, order
, req
->tp_block_nr
);
4355 static int packet_mmap(struct file
*file
, struct socket
*sock
,
4356 struct vm_area_struct
*vma
)
4358 struct sock
*sk
= sock
->sk
;
4359 struct packet_sock
*po
= pkt_sk(sk
);
4360 unsigned long size
, expected_size
;
4361 struct packet_ring_buffer
*rb
;
4362 unsigned long start
;
4369 mutex_lock(&po
->pg_vec_lock
);
4372 for (rb
= &po
->rx_ring
; rb
<= &po
->tx_ring
; rb
++) {
4374 expected_size
+= rb
->pg_vec_len
4380 if (expected_size
== 0)
4383 size
= vma
->vm_end
- vma
->vm_start
;
4384 if (size
!= expected_size
)
4387 start
= vma
->vm_start
;
4388 for (rb
= &po
->rx_ring
; rb
<= &po
->tx_ring
; rb
++) {
4389 if (rb
->pg_vec
== NULL
)
4392 for (i
= 0; i
< rb
->pg_vec_len
; i
++) {
4394 void *kaddr
= rb
->pg_vec
[i
].buffer
;
4397 for (pg_num
= 0; pg_num
< rb
->pg_vec_pages
; pg_num
++) {
4398 page
= pgv_to_page(kaddr
);
4399 err
= vm_insert_page(vma
, start
, page
);
4408 atomic_inc(&po
->mapped
);
4409 vma
->vm_ops
= &packet_mmap_ops
;
4413 mutex_unlock(&po
->pg_vec_lock
);
4417 static const struct proto_ops packet_ops_spkt
= {
4418 .family
= PF_PACKET
,
4419 .owner
= THIS_MODULE
,
4420 .release
= packet_release
,
4421 .bind
= packet_bind_spkt
,
4422 .connect
= sock_no_connect
,
4423 .socketpair
= sock_no_socketpair
,
4424 .accept
= sock_no_accept
,
4425 .getname
= packet_getname_spkt
,
4426 .poll
= datagram_poll
,
4427 .ioctl
= packet_ioctl
,
4428 .listen
= sock_no_listen
,
4429 .shutdown
= sock_no_shutdown
,
4430 .setsockopt
= sock_no_setsockopt
,
4431 .getsockopt
= sock_no_getsockopt
,
4432 .sendmsg
= packet_sendmsg_spkt
,
4433 .recvmsg
= packet_recvmsg
,
4434 .mmap
= sock_no_mmap
,
4435 .sendpage
= sock_no_sendpage
,
4438 static const struct proto_ops packet_ops
= {
4439 .family
= PF_PACKET
,
4440 .owner
= THIS_MODULE
,
4441 .release
= packet_release
,
4442 .bind
= packet_bind
,
4443 .connect
= sock_no_connect
,
4444 .socketpair
= sock_no_socketpair
,
4445 .accept
= sock_no_accept
,
4446 .getname
= packet_getname
,
4447 .poll
= packet_poll
,
4448 .ioctl
= packet_ioctl
,
4449 .listen
= sock_no_listen
,
4450 .shutdown
= sock_no_shutdown
,
4451 .setsockopt
= packet_setsockopt
,
4452 .getsockopt
= packet_getsockopt
,
4453 #ifdef CONFIG_COMPAT
4454 .compat_setsockopt
= compat_packet_setsockopt
,
4456 .sendmsg
= packet_sendmsg
,
4457 .recvmsg
= packet_recvmsg
,
4458 .mmap
= packet_mmap
,
4459 .sendpage
= sock_no_sendpage
,
4462 static const struct net_proto_family packet_family_ops
= {
4463 .family
= PF_PACKET
,
4464 .create
= packet_create
,
4465 .owner
= THIS_MODULE
,
4468 static struct notifier_block packet_netdev_notifier
= {
4469 .notifier_call
= packet_notifier
,
4472 #ifdef CONFIG_PROC_FS
4474 static void *packet_seq_start(struct seq_file
*seq
, loff_t
*pos
)
4477 struct net
*net
= seq_file_net(seq
);
4480 return seq_hlist_start_head_rcu(&net
->packet
.sklist
, *pos
);
4483 static void *packet_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
4485 struct net
*net
= seq_file_net(seq
);
4486 return seq_hlist_next_rcu(v
, &net
->packet
.sklist
, pos
);
4489 static void packet_seq_stop(struct seq_file
*seq
, void *v
)
4495 static int packet_seq_show(struct seq_file
*seq
, void *v
)
4497 if (v
== SEQ_START_TOKEN
)
4498 seq_puts(seq
, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
4500 struct sock
*s
= sk_entry(v
);
4501 const struct packet_sock
*po
= pkt_sk(s
);
4504 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
4506 refcount_read(&s
->sk_refcnt
),
4511 atomic_read(&s
->sk_rmem_alloc
),
4512 from_kuid_munged(seq_user_ns(seq
), sock_i_uid(s
)),
4519 static const struct seq_operations packet_seq_ops
= {
4520 .start
= packet_seq_start
,
4521 .next
= packet_seq_next
,
4522 .stop
= packet_seq_stop
,
4523 .show
= packet_seq_show
,
4526 static int packet_seq_open(struct inode
*inode
, struct file
*file
)
4528 return seq_open_net(inode
, file
, &packet_seq_ops
,
4529 sizeof(struct seq_net_private
));
4532 static const struct file_operations packet_seq_fops
= {
4533 .owner
= THIS_MODULE
,
4534 .open
= packet_seq_open
,
4536 .llseek
= seq_lseek
,
4537 .release
= seq_release_net
,
4542 static int __net_init
packet_net_init(struct net
*net
)
4544 mutex_init(&net
->packet
.sklist_lock
);
4545 INIT_HLIST_HEAD(&net
->packet
.sklist
);
4547 if (!proc_create("packet", 0, net
->proc_net
, &packet_seq_fops
))
4553 static void __net_exit
packet_net_exit(struct net
*net
)
4555 remove_proc_entry("packet", net
->proc_net
);
4556 WARN_ON_ONCE(!hlist_empty(&net
->packet
.sklist
));
4559 static struct pernet_operations packet_net_ops
= {
4560 .init
= packet_net_init
,
4561 .exit
= packet_net_exit
,
4565 static void __exit
packet_exit(void)
4567 unregister_netdevice_notifier(&packet_netdev_notifier
);
4568 unregister_pernet_subsys(&packet_net_ops
);
4569 sock_unregister(PF_PACKET
);
4570 proto_unregister(&packet_proto
);
4573 static int __init
packet_init(void)
4575 int rc
= proto_register(&packet_proto
, 0);
4580 sock_register(&packet_family_ops
);
4581 register_pernet_subsys(&packet_net_ops
);
4582 register_netdevice_notifier(&packet_netdev_notifier
);
4587 module_init(packet_init
);
4588 module_exit(packet_exit
);
4589 MODULE_LICENSE("GPL");
4590 MODULE_ALIAS_NETPROTO(PF_PACKET
);