]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/packet/af_packet.c
net: packet: use sockaddr_ll fields as storage for skb original length in recvmsg...
[mirror_ubuntu-zesty-kernel.git] / net / packet / af_packet.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
1ce4f28b 12 * Fixes:
1da177e4
LT
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
1ce4f28b 35 * Ulises Alonso : Frame number limit removal and
1da177e4 36 * packet_set_ring memory leak.
0fb375fb
EB
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
1ce4f28b 40 * byte arrays at the end of sockaddr_ll
0fb375fb 41 * and packet_mreq.
69e3c75f 42 * Johann Baudy : Added TX RING.
f6fb8f10 43 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
1da177e4
LT
47 *
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
52 *
53 */
1ce4f28b 54
1da177e4 55#include <linux/types.h>
1da177e4 56#include <linux/mm.h>
4fc268d2 57#include <linux/capability.h>
1da177e4
LT
58#include <linux/fcntl.h>
59#include <linux/socket.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/if_packet.h>
64#include <linux/wireless.h>
ffbc6111 65#include <linux/kernel.h>
1da177e4 66#include <linux/kmod.h>
5a0e3ad6 67#include <linux/slab.h>
0e3125c7 68#include <linux/vmalloc.h>
457c4cbc 69#include <net/net_namespace.h>
1da177e4
LT
70#include <net/ip.h>
71#include <net/protocol.h>
72#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <linux/errno.h>
75#include <linux/timer.h>
1da177e4
LT
76#include <asm/uaccess.h>
77#include <asm/ioctls.h>
78#include <asm/page.h>
a1f8e7f7 79#include <asm/cacheflush.h>
1da177e4
LT
80#include <asm/io.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83#include <linux/poll.h>
84#include <linux/module.h>
85#include <linux/init.h>
905db440 86#include <linux/mutex.h>
05423b24 87#include <linux/if_vlan.h>
bfd5f4a3 88#include <linux/virtio_net.h>
ed85b565 89#include <linux/errqueue.h>
614f60fa 90#include <linux/net_tstamp.h>
b0138408 91#include <linux/percpu.h>
1da177e4
LT
92#ifdef CONFIG_INET
93#include <net/inet_common.h>
94#endif
95
2787b04b
PE
96#include "internal.h"
97
1da177e4
LT
98/*
99 Assumptions:
100 - if device has no dev->hard_header routine, it adds and removes ll header
101 inside itself. In this case ll header is invisible outside of device,
102 but higher levels still should reserve dev->hard_header_len.
103 Some devices are enough clever to reallocate skb, when header
104 will not fit to reserved space (tunnel), another ones are silly
105 (PPP).
106 - packet socket receives packets with pulled ll header,
107 so that SOCK_RAW should push it back.
108
109On receive:
110-----------
111
112Incoming, dev->hard_header!=NULL
b0e380b1
ACM
113 mac_header -> ll header
114 data -> data
1da177e4
LT
115
116Outgoing, dev->hard_header!=NULL
b0e380b1
ACM
117 mac_header -> ll header
118 data -> ll header
1da177e4
LT
119
120Incoming, dev->hard_header==NULL
b0e380b1
ACM
121 mac_header -> UNKNOWN position. It is very likely, that it points to ll
122 header. PPP makes it, that is wrong, because introduce
db0c58f9 123 assymetry between rx and tx paths.
b0e380b1 124 data -> data
1da177e4
LT
125
126Outgoing, dev->hard_header==NULL
b0e380b1
ACM
127 mac_header -> data. ll header is still not built!
128 data -> data
1da177e4
LT
129
130Resume
131 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
132
133
134On transmit:
135------------
136
137dev->hard_header != NULL
b0e380b1
ACM
138 mac_header -> ll header
139 data -> ll header
1da177e4
LT
140
141dev->hard_header == NULL (ll header is added by device, we cannot control it)
b0e380b1
ACM
142 mac_header -> data
143 data -> data
1da177e4
LT
144
145 We should set nh.raw on output to correct posistion,
146 packet classifier depends on it.
147 */
148
1da177e4
LT
149/* Private packet socket structures. */
150
0fb375fb
EB
151/* identical to struct packet_mreq except it has
152 * a longer address field.
153 */
40d4e3df 154struct packet_mreq_max {
0fb375fb
EB
155 int mr_ifindex;
156 unsigned short mr_type;
157 unsigned short mr_alen;
158 unsigned char mr_address[MAX_ADDR_LEN];
1da177e4 159};
a2efcfa0 160
184f489e
DB
161union tpacket_uhdr {
162 struct tpacket_hdr *h1;
163 struct tpacket2_hdr *h2;
164 struct tpacket3_hdr *h3;
165 void *raw;
166};
167
f6fb8f10 168static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
69e3c75f
JB
169 int closing, int tx_ring);
170
f6fb8f10 171#define V3_ALIGNMENT (8)
172
bc59ba39 173#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
f6fb8f10 174
175#define BLK_PLUS_PRIV(sz_of_priv) \
176 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
177
f6fb8f10 178#define PGV_FROM_VMALLOC 1
69e3c75f 179
f6fb8f10 180#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
181#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
182#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
183#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
184#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
185#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
186#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
187
69e3c75f
JB
188struct packet_sock;
189static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
77f65ebd
WB
190static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
191 struct packet_type *pt, struct net_device *orig_dev);
1da177e4 192
f6fb8f10 193static void *packet_previous_frame(struct packet_sock *po,
194 struct packet_ring_buffer *rb,
195 int status);
196static void packet_increment_head(struct packet_ring_buffer *buff);
bc59ba39 197static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
198 struct tpacket_block_desc *);
199static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
f6fb8f10 200 struct packet_sock *);
bc59ba39 201static void prb_retire_current_block(struct tpacket_kbdq_core *,
f6fb8f10 202 struct packet_sock *, unsigned int status);
bc59ba39 203static int prb_queue_frozen(struct tpacket_kbdq_core *);
204static void prb_open_block(struct tpacket_kbdq_core *,
205 struct tpacket_block_desc *);
f6fb8f10 206static void prb_retire_rx_blk_timer_expired(unsigned long);
bc59ba39 207static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
208static void prb_init_blk_timer(struct packet_sock *,
209 struct tpacket_kbdq_core *,
210 void (*func) (unsigned long));
211static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
212static void prb_clear_rxhash(struct tpacket_kbdq_core *,
213 struct tpacket3_hdr *);
214static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
215 struct tpacket3_hdr *);
1da177e4
LT
216static void packet_flush_mclist(struct sock *sk);
217
ffbc6111 218struct packet_skb_cb {
ffbc6111
HX
219 union {
220 struct sockaddr_pkt pkt;
2472d761
EB
221 union {
222 /* Trick: alias skb original length with
223 * ll.sll_family and ll.protocol in order
224 * to save room.
225 */
226 unsigned int origlen;
227 struct sockaddr_ll ll;
228 };
ffbc6111
HX
229 } sa;
230};
231
232#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
8dc41944 233
bc59ba39 234#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
f6fb8f10 235#define GET_PBLOCK_DESC(x, bid) \
bc59ba39 236 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
f6fb8f10 237#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
bc59ba39 238 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
f6fb8f10 239#define GET_NEXT_PRB_BLK_NUM(x) \
240 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
241 ((x)->kactive_blk_num+1) : 0)
242
dc99f600
DM
243static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
244static void __fanout_link(struct sock *sk, struct packet_sock *po);
245
d346a3fa
DB
246static int packet_direct_xmit(struct sk_buff *skb)
247{
248 struct net_device *dev = skb->dev;
d346a3fa
DB
249 netdev_features_t features;
250 struct netdev_queue *txq;
43279500 251 int ret = NETDEV_TX_BUSY;
d346a3fa
DB
252
253 if (unlikely(!netif_running(dev) ||
43279500
DB
254 !netif_carrier_ok(dev)))
255 goto drop;
d346a3fa
DB
256
257 features = netif_skb_features(skb);
258 if (skb_needs_linearize(skb, features) &&
43279500
DB
259 __skb_linearize(skb))
260 goto drop;
d346a3fa 261
10c51b56 262 txq = skb_get_tx_queue(dev, skb);
d346a3fa 263
43279500
DB
264 local_bh_disable();
265
266 HARD_TX_LOCK(dev, txq, smp_processor_id());
10b3ad8c 267 if (!netif_xmit_frozen_or_drv_stopped(txq))
fa2dbdc2 268 ret = netdev_start_xmit(skb, dev, txq, false);
43279500 269 HARD_TX_UNLOCK(dev, txq);
d346a3fa 270
43279500
DB
271 local_bh_enable();
272
273 if (!dev_xmit_complete(ret))
d346a3fa 274 kfree_skb(skb);
43279500 275
d346a3fa 276 return ret;
43279500 277drop:
0f97ede4 278 atomic_long_inc(&dev->tx_dropped);
43279500
DB
279 kfree_skb(skb);
280 return NET_XMIT_DROP;
d346a3fa
DB
281}
282
66e56cd4
DB
283static struct net_device *packet_cached_dev_get(struct packet_sock *po)
284{
285 struct net_device *dev;
286
287 rcu_read_lock();
288 dev = rcu_dereference(po->cached_dev);
289 if (likely(dev))
290 dev_hold(dev);
291 rcu_read_unlock();
292
293 return dev;
294}
295
296static void packet_cached_dev_assign(struct packet_sock *po,
297 struct net_device *dev)
298{
299 rcu_assign_pointer(po->cached_dev, dev);
300}
301
302static void packet_cached_dev_reset(struct packet_sock *po)
303{
304 RCU_INIT_POINTER(po->cached_dev, NULL);
305}
306
d346a3fa
DB
307static bool packet_use_direct_xmit(const struct packet_sock *po)
308{
309 return po->xmit == packet_direct_xmit;
310}
311
0fd5d57b 312static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
d346a3fa 313{
1cbac010 314 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
d346a3fa
DB
315}
316
0fd5d57b
DB
317static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
318{
319 const struct net_device_ops *ops = dev->netdev_ops;
320 u16 queue_index;
321
322 if (ops->ndo_select_queue) {
323 queue_index = ops->ndo_select_queue(dev, skb, NULL,
324 __packet_pick_tx_queue);
325 queue_index = netdev_cap_txqueue(dev, queue_index);
326 } else {
327 queue_index = __packet_pick_tx_queue(dev, skb);
328 }
329
330 skb_set_queue_mapping(skb, queue_index);
331}
332
ce06b03e
DM
333/* register_prot_hook must be invoked with the po->bind_lock held,
334 * or from a context in which asynchronous accesses to the packet
335 * socket is not possible (packet_create()).
336 */
337static void register_prot_hook(struct sock *sk)
338{
339 struct packet_sock *po = pkt_sk(sk);
e40526cb 340
ce06b03e 341 if (!po->running) {
66e56cd4 342 if (po->fanout)
dc99f600 343 __fanout_link(sk, po);
66e56cd4 344 else
dc99f600 345 dev_add_pack(&po->prot_hook);
e40526cb 346
ce06b03e
DM
347 sock_hold(sk);
348 po->running = 1;
349 }
350}
351
352/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
353 * held. If the sync parameter is true, we will temporarily drop
354 * the po->bind_lock and do a synchronize_net to make sure no
355 * asynchronous packet processing paths still refer to the elements
356 * of po->prot_hook. If the sync parameter is false, it is the
357 * callers responsibility to take care of this.
358 */
359static void __unregister_prot_hook(struct sock *sk, bool sync)
360{
361 struct packet_sock *po = pkt_sk(sk);
362
363 po->running = 0;
66e56cd4
DB
364
365 if (po->fanout)
dc99f600 366 __fanout_unlink(sk, po);
66e56cd4 367 else
dc99f600 368 __dev_remove_pack(&po->prot_hook);
e40526cb 369
ce06b03e
DM
370 __sock_put(sk);
371
372 if (sync) {
373 spin_unlock(&po->bind_lock);
374 synchronize_net();
375 spin_lock(&po->bind_lock);
376 }
377}
378
379static void unregister_prot_hook(struct sock *sk, bool sync)
380{
381 struct packet_sock *po = pkt_sk(sk);
382
383 if (po->running)
384 __unregister_prot_hook(sk, sync);
385}
386
6e58040b 387static inline struct page * __pure pgv_to_page(void *addr)
0af55bb5
CG
388{
389 if (is_vmalloc_addr(addr))
390 return vmalloc_to_page(addr);
391 return virt_to_page(addr);
392}
393
69e3c75f 394static void __packet_set_status(struct packet_sock *po, void *frame, int status)
1da177e4 395{
184f489e 396 union tpacket_uhdr h;
1da177e4 397
69e3c75f 398 h.raw = frame;
bbd6ef87
PM
399 switch (po->tp_version) {
400 case TPACKET_V1:
69e3c75f 401 h.h1->tp_status = status;
0af55bb5 402 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
bbd6ef87
PM
403 break;
404 case TPACKET_V2:
69e3c75f 405 h.h2->tp_status = status;
0af55bb5 406 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
bbd6ef87 407 break;
f6fb8f10 408 case TPACKET_V3:
69e3c75f 409 default:
f6fb8f10 410 WARN(1, "TPACKET version not supported.\n");
69e3c75f 411 BUG();
bbd6ef87 412 }
69e3c75f
JB
413
414 smp_wmb();
bbd6ef87
PM
415}
416
69e3c75f 417static int __packet_get_status(struct packet_sock *po, void *frame)
bbd6ef87 418{
184f489e 419 union tpacket_uhdr h;
bbd6ef87 420
69e3c75f
JB
421 smp_rmb();
422
bbd6ef87
PM
423 h.raw = frame;
424 switch (po->tp_version) {
425 case TPACKET_V1:
0af55bb5 426 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
69e3c75f 427 return h.h1->tp_status;
bbd6ef87 428 case TPACKET_V2:
0af55bb5 429 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
69e3c75f 430 return h.h2->tp_status;
f6fb8f10 431 case TPACKET_V3:
69e3c75f 432 default:
f6fb8f10 433 WARN(1, "TPACKET version not supported.\n");
69e3c75f
JB
434 BUG();
435 return 0;
bbd6ef87 436 }
1da177e4 437}
69e3c75f 438
b9c32fb2
DB
439static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
440 unsigned int flags)
7a51384c
DB
441{
442 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
443
68a360e8
WB
444 if (shhwtstamps &&
445 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
446 ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
447 return TP_STATUS_TS_RAW_HARDWARE;
7a51384c
DB
448
449 if (ktime_to_timespec_cond(skb->tstamp, ts))
b9c32fb2 450 return TP_STATUS_TS_SOFTWARE;
7a51384c 451
b9c32fb2 452 return 0;
7a51384c
DB
453}
454
b9c32fb2
DB
455static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
456 struct sk_buff *skb)
2e31396f
WB
457{
458 union tpacket_uhdr h;
459 struct timespec ts;
b9c32fb2 460 __u32 ts_status;
2e31396f 461
b9c32fb2
DB
462 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
463 return 0;
2e31396f
WB
464
465 h.raw = frame;
466 switch (po->tp_version) {
467 case TPACKET_V1:
468 h.h1->tp_sec = ts.tv_sec;
469 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
470 break;
471 case TPACKET_V2:
472 h.h2->tp_sec = ts.tv_sec;
473 h.h2->tp_nsec = ts.tv_nsec;
474 break;
475 case TPACKET_V3:
476 default:
477 WARN(1, "TPACKET version not supported.\n");
478 BUG();
479 }
480
481 /* one flush is safe, as both fields always lie on the same cacheline */
482 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
483 smp_wmb();
b9c32fb2
DB
484
485 return ts_status;
2e31396f
WB
486}
487
69e3c75f
JB
488static void *packet_lookup_frame(struct packet_sock *po,
489 struct packet_ring_buffer *rb,
490 unsigned int position,
491 int status)
492{
493 unsigned int pg_vec_pos, frame_offset;
184f489e 494 union tpacket_uhdr h;
69e3c75f
JB
495
496 pg_vec_pos = position / rb->frames_per_block;
497 frame_offset = position % rb->frames_per_block;
498
0e3125c7
NH
499 h.raw = rb->pg_vec[pg_vec_pos].buffer +
500 (frame_offset * rb->frame_size);
69e3c75f
JB
501
502 if (status != __packet_get_status(po, h.raw))
503 return NULL;
504
505 return h.raw;
506}
507
eea49cc9 508static void *packet_current_frame(struct packet_sock *po,
69e3c75f
JB
509 struct packet_ring_buffer *rb,
510 int status)
511{
512 return packet_lookup_frame(po, rb, rb->head, status);
513}
514
bc59ba39 515static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
f6fb8f10 516{
517 del_timer_sync(&pkc->retire_blk_timer);
518}
519
520static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
521 int tx_ring,
522 struct sk_buff_head *rb_queue)
523{
bc59ba39 524 struct tpacket_kbdq_core *pkc;
f6fb8f10 525
22781a5b
DJ
526 pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
527 GET_PBDQC_FROM_RB(&po->rx_ring);
f6fb8f10 528
ec6f809f 529 spin_lock_bh(&rb_queue->lock);
f6fb8f10 530 pkc->delete_blk_timer = 1;
ec6f809f 531 spin_unlock_bh(&rb_queue->lock);
f6fb8f10 532
533 prb_del_retire_blk_timer(pkc);
534}
535
536static void prb_init_blk_timer(struct packet_sock *po,
bc59ba39 537 struct tpacket_kbdq_core *pkc,
f6fb8f10 538 void (*func) (unsigned long))
539{
540 init_timer(&pkc->retire_blk_timer);
541 pkc->retire_blk_timer.data = (long)po;
542 pkc->retire_blk_timer.function = func;
543 pkc->retire_blk_timer.expires = jiffies;
544}
545
546static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
547{
bc59ba39 548 struct tpacket_kbdq_core *pkc;
f6fb8f10 549
550 if (tx_ring)
551 BUG();
552
22781a5b
DJ
553 pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
554 GET_PBDQC_FROM_RB(&po->rx_ring);
f6fb8f10 555 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
556}
557
558static int prb_calc_retire_blk_tmo(struct packet_sock *po,
559 int blk_size_in_bytes)
560{
561 struct net_device *dev;
562 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
4bc71cb9
JP
563 struct ethtool_cmd ecmd;
564 int err;
e440cf2c 565 u32 speed;
f6fb8f10 566
4bc71cb9
JP
567 rtnl_lock();
568 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
569 if (unlikely(!dev)) {
570 rtnl_unlock();
f6fb8f10 571 return DEFAULT_PRB_RETIRE_TOV;
4bc71cb9
JP
572 }
573 err = __ethtool_get_settings(dev, &ecmd);
e440cf2c 574 speed = ethtool_cmd_speed(&ecmd);
4bc71cb9
JP
575 rtnl_unlock();
576 if (!err) {
4bc71cb9
JP
577 /*
578 * If the link speed is so slow you don't really
579 * need to worry about perf anyways
580 */
e440cf2c 581 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
4bc71cb9 582 return DEFAULT_PRB_RETIRE_TOV;
e440cf2c 583 } else {
584 msec = 1;
585 div = speed / 1000;
f6fb8f10 586 }
587 }
588
589 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
590
591 if (div)
592 mbits /= div;
593
594 tmo = mbits * msec;
595
596 if (div)
597 return tmo+1;
598 return tmo;
599}
600
bc59ba39 601static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
f6fb8f10 602 union tpacket_req_u *req_u)
603{
604 p1->feature_req_word = req_u->req3.tp_feature_req_word;
605}
606
607static void init_prb_bdqc(struct packet_sock *po,
608 struct packet_ring_buffer *rb,
609 struct pgv *pg_vec,
610 union tpacket_req_u *req_u, int tx_ring)
611{
22781a5b 612 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
bc59ba39 613 struct tpacket_block_desc *pbd;
f6fb8f10 614
615 memset(p1, 0x0, sizeof(*p1));
616
617 p1->knxt_seq_num = 1;
618 p1->pkbdq = pg_vec;
bc59ba39 619 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
e3192690 620 p1->pkblk_start = pg_vec[0].buffer;
f6fb8f10 621 p1->kblk_size = req_u->req3.tp_block_size;
622 p1->knum_blocks = req_u->req3.tp_block_nr;
623 p1->hdrlen = po->tp_hdrlen;
624 p1->version = po->tp_version;
625 p1->last_kactive_blk_num = 0;
ee80fbf3 626 po->stats.stats3.tp_freeze_q_cnt = 0;
f6fb8f10 627 if (req_u->req3.tp_retire_blk_tov)
628 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
629 else
630 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
631 req_u->req3.tp_block_size);
632 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
633 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
634
dc808110 635 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
f6fb8f10 636 prb_init_ft_ops(p1, req_u);
637 prb_setup_retire_blk_timer(po, tx_ring);
638 prb_open_block(p1, pbd);
639}
640
641/* Do NOT update the last_blk_num first.
642 * Assumes sk_buff_head lock is held.
643 */
bc59ba39 644static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
f6fb8f10 645{
646 mod_timer(&pkc->retire_blk_timer,
647 jiffies + pkc->tov_in_jiffies);
648 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
649}
650
651/*
652 * Timer logic:
653 * 1) We refresh the timer only when we open a block.
654 * By doing this we don't waste cycles refreshing the timer
655 * on packet-by-packet basis.
656 *
657 * With a 1MB block-size, on a 1Gbps line, it will take
658 * i) ~8 ms to fill a block + ii) memcpy etc.
659 * In this cut we are not accounting for the memcpy time.
660 *
661 * So, if the user sets the 'tmo' to 10ms then the timer
662 * will never fire while the block is still getting filled
663 * (which is what we want). However, the user could choose
664 * to close a block early and that's fine.
665 *
666 * But when the timer does fire, we check whether or not to refresh it.
667 * Since the tmo granularity is in msecs, it is not too expensive
668 * to refresh the timer, lets say every '8' msecs.
669 * Either the user can set the 'tmo' or we can derive it based on
670 * a) line-speed and b) block-size.
671 * prb_calc_retire_blk_tmo() calculates the tmo.
672 *
673 */
674static void prb_retire_rx_blk_timer_expired(unsigned long data)
675{
676 struct packet_sock *po = (struct packet_sock *)data;
22781a5b 677 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
f6fb8f10 678 unsigned int frozen;
bc59ba39 679 struct tpacket_block_desc *pbd;
f6fb8f10 680
681 spin_lock(&po->sk.sk_receive_queue.lock);
682
683 frozen = prb_queue_frozen(pkc);
684 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
685
686 if (unlikely(pkc->delete_blk_timer))
687 goto out;
688
689 /* We only need to plug the race when the block is partially filled.
690 * tpacket_rcv:
691 * lock(); increment BLOCK_NUM_PKTS; unlock()
692 * copy_bits() is in progress ...
693 * timer fires on other cpu:
694 * we can't retire the current block because copy_bits
695 * is in progress.
696 *
697 */
698 if (BLOCK_NUM_PKTS(pbd)) {
699 while (atomic_read(&pkc->blk_fill_in_prog)) {
700 /* Waiting for skb_copy_bits to finish... */
701 cpu_relax();
702 }
703 }
704
705 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
706 if (!frozen) {
707 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
708 if (!prb_dispatch_next_block(pkc, po))
709 goto refresh_timer;
710 else
711 goto out;
712 } else {
713 /* Case 1. Queue was frozen because user-space was
714 * lagging behind.
715 */
716 if (prb_curr_blk_in_use(pkc, pbd)) {
717 /*
718 * Ok, user-space is still behind.
719 * So just refresh the timer.
720 */
721 goto refresh_timer;
722 } else {
723 /* Case 2. queue was frozen,user-space caught up,
724 * now the link went idle && the timer fired.
725 * We don't have a block to close.So we open this
726 * block and restart the timer.
727 * opening a block thaws the queue,restarts timer
728 * Thawing/timer-refresh is a side effect.
729 */
730 prb_open_block(pkc, pbd);
731 goto out;
732 }
733 }
734 }
735
736refresh_timer:
737 _prb_refresh_rx_retire_blk_timer(pkc);
738
739out:
740 spin_unlock(&po->sk.sk_receive_queue.lock);
741}
742
eea49cc9 743static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
bc59ba39 744 struct tpacket_block_desc *pbd1, __u32 status)
f6fb8f10 745{
746 /* Flush everything minus the block header */
747
748#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
749 u8 *start, *end;
750
751 start = (u8 *)pbd1;
752
753 /* Skip the block header(we know header WILL fit in 4K) */
754 start += PAGE_SIZE;
755
756 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
757 for (; start < end; start += PAGE_SIZE)
758 flush_dcache_page(pgv_to_page(start));
759
760 smp_wmb();
761#endif
762
763 /* Now update the block status. */
764
765 BLOCK_STATUS(pbd1) = status;
766
767 /* Flush the block header */
768
769#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
770 start = (u8 *)pbd1;
771 flush_dcache_page(pgv_to_page(start));
772
773 smp_wmb();
774#endif
775}
776
777/*
778 * Side effect:
779 *
780 * 1) flush the block
781 * 2) Increment active_blk_num
782 *
783 * Note:We DONT refresh the timer on purpose.
784 * Because almost always the next block will be opened.
785 */
bc59ba39 786static void prb_close_block(struct tpacket_kbdq_core *pkc1,
787 struct tpacket_block_desc *pbd1,
f6fb8f10 788 struct packet_sock *po, unsigned int stat)
789{
790 __u32 status = TP_STATUS_USER | stat;
791
792 struct tpacket3_hdr *last_pkt;
bc59ba39 793 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
da413eec 794 struct sock *sk = &po->sk;
f6fb8f10 795
ee80fbf3 796 if (po->stats.stats3.tp_drops)
f6fb8f10 797 status |= TP_STATUS_LOSING;
798
799 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
800 last_pkt->tp_next_offset = 0;
801
802 /* Get the ts of the last pkt */
803 if (BLOCK_NUM_PKTS(pbd1)) {
804 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
805 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
806 } else {
807 /* Ok, we tmo'd - so get the current time */
808 struct timespec ts;
809 getnstimeofday(&ts);
810 h1->ts_last_pkt.ts_sec = ts.tv_sec;
811 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
812 }
813
814 smp_wmb();
815
816 /* Flush the block */
817 prb_flush_block(pkc1, pbd1, status);
818
da413eec
DC
819 sk->sk_data_ready(sk);
820
f6fb8f10 821 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
822}
823
eea49cc9 824static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
f6fb8f10 825{
826 pkc->reset_pending_on_curr_blk = 0;
827}
828
829/*
830 * Side effect of opening a block:
831 *
832 * 1) prb_queue is thawed.
833 * 2) retire_blk_timer is refreshed.
834 *
835 */
bc59ba39 836static void prb_open_block(struct tpacket_kbdq_core *pkc1,
837 struct tpacket_block_desc *pbd1)
f6fb8f10 838{
839 struct timespec ts;
bc59ba39 840 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
f6fb8f10 841
842 smp_rmb();
843
8da3056c
DB
844 /* We could have just memset this but we will lose the
845 * flexibility of making the priv area sticky
846 */
f6fb8f10 847
8da3056c
DB
848 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
849 BLOCK_NUM_PKTS(pbd1) = 0;
850 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
f6fb8f10 851
8da3056c
DB
852 getnstimeofday(&ts);
853
854 h1->ts_first_pkt.ts_sec = ts.tv_sec;
855 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
f6fb8f10 856
8da3056c
DB
857 pkc1->pkblk_start = (char *)pbd1;
858 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
859
860 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
861 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
862
863 pbd1->version = pkc1->version;
864 pkc1->prev = pkc1->nxt_offset;
865 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
866
867 prb_thaw_queue(pkc1);
868 _prb_refresh_rx_retire_blk_timer(pkc1);
869
870 smp_wmb();
f6fb8f10 871}
872
873/*
874 * Queue freeze logic:
875 * 1) Assume tp_block_nr = 8 blocks.
876 * 2) At time 't0', user opens Rx ring.
877 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
878 * 4) user-space is either sleeping or processing block '0'.
879 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
880 * it will close block-7,loop around and try to fill block '0'.
881 * call-flow:
882 * __packet_lookup_frame_in_block
883 * prb_retire_current_block()
884 * prb_dispatch_next_block()
885 * |->(BLOCK_STATUS == USER) evaluates to true
886 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
887 * 6) Now there are two cases:
888 * 6.1) Link goes idle right after the queue is frozen.
889 * But remember, the last open_block() refreshed the timer.
890 * When this timer expires,it will refresh itself so that we can
891 * re-open block-0 in near future.
892 * 6.2) Link is busy and keeps on receiving packets. This is a simple
893 * case and __packet_lookup_frame_in_block will check if block-0
894 * is free and can now be re-used.
895 */
eea49cc9 896static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
f6fb8f10 897 struct packet_sock *po)
898{
899 pkc->reset_pending_on_curr_blk = 1;
ee80fbf3 900 po->stats.stats3.tp_freeze_q_cnt++;
f6fb8f10 901}
902
903#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
904
905/*
906 * If the next block is free then we will dispatch it
907 * and return a good offset.
908 * Else, we will freeze the queue.
909 * So, caller must check the return value.
910 */
bc59ba39 911static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
f6fb8f10 912 struct packet_sock *po)
913{
bc59ba39 914 struct tpacket_block_desc *pbd;
f6fb8f10 915
916 smp_rmb();
917
918 /* 1. Get current block num */
919 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
920
921 /* 2. If this block is currently in_use then freeze the queue */
922 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
923 prb_freeze_queue(pkc, po);
924 return NULL;
925 }
926
927 /*
928 * 3.
929 * open this block and return the offset where the first packet
930 * needs to get stored.
931 */
932 prb_open_block(pkc, pbd);
933 return (void *)pkc->nxt_offset;
934}
935
bc59ba39 936static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
f6fb8f10 937 struct packet_sock *po, unsigned int status)
938{
bc59ba39 939 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
f6fb8f10 940
941 /* retire/close the current block */
942 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
943 /*
944 * Plug the case where copy_bits() is in progress on
945 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
946 * have space to copy the pkt in the current block and
947 * called prb_retire_current_block()
948 *
949 * We don't need to worry about the TMO case because
950 * the timer-handler already handled this case.
951 */
952 if (!(status & TP_STATUS_BLK_TMO)) {
953 while (atomic_read(&pkc->blk_fill_in_prog)) {
954 /* Waiting for skb_copy_bits to finish... */
955 cpu_relax();
956 }
957 }
958 prb_close_block(pkc, pbd, po, status);
959 return;
960 }
f6fb8f10 961}
962
eea49cc9 963static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
bc59ba39 964 struct tpacket_block_desc *pbd)
f6fb8f10 965{
966 return TP_STATUS_USER & BLOCK_STATUS(pbd);
967}
968
eea49cc9 969static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
f6fb8f10 970{
971 return pkc->reset_pending_on_curr_blk;
972}
973
eea49cc9 974static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
f6fb8f10 975{
bc59ba39 976 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
f6fb8f10 977 atomic_dec(&pkc->blk_fill_in_prog);
978}
979
eea49cc9 980static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
f6fb8f10 981 struct tpacket3_hdr *ppd)
982{
3958afa1 983 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
f6fb8f10 984}
985
eea49cc9 986static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
f6fb8f10 987 struct tpacket3_hdr *ppd)
988{
989 ppd->hv1.tp_rxhash = 0;
990}
991
eea49cc9 992static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
f6fb8f10 993 struct tpacket3_hdr *ppd)
994{
df8a39de
JP
995 if (skb_vlan_tag_present(pkc->skb)) {
996 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
a0cdfcf3
AW
997 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
998 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
f6fb8f10 999 } else {
9e67030a 1000 ppd->hv1.tp_vlan_tci = 0;
a0cdfcf3 1001 ppd->hv1.tp_vlan_tpid = 0;
9e67030a 1002 ppd->tp_status = TP_STATUS_AVAILABLE;
f6fb8f10 1003 }
1004}
1005
bc59ba39 1006static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
f6fb8f10 1007 struct tpacket3_hdr *ppd)
1008{
a0cdfcf3 1009 ppd->hv1.tp_padding = 0;
f6fb8f10 1010 prb_fill_vlan_info(pkc, ppd);
1011
1012 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1013 prb_fill_rxhash(pkc, ppd);
1014 else
1015 prb_clear_rxhash(pkc, ppd);
1016}
1017
eea49cc9 1018static void prb_fill_curr_block(char *curr,
bc59ba39 1019 struct tpacket_kbdq_core *pkc,
1020 struct tpacket_block_desc *pbd,
f6fb8f10 1021 unsigned int len)
1022{
1023 struct tpacket3_hdr *ppd;
1024
1025 ppd = (struct tpacket3_hdr *)curr;
1026 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1027 pkc->prev = curr;
1028 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1029 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1030 BLOCK_NUM_PKTS(pbd) += 1;
1031 atomic_inc(&pkc->blk_fill_in_prog);
1032 prb_run_all_ft_ops(pkc, ppd);
1033}
1034
1035/* Assumes caller has the sk->rx_queue.lock */
1036static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1037 struct sk_buff *skb,
1038 int status,
1039 unsigned int len
1040 )
1041{
bc59ba39 1042 struct tpacket_kbdq_core *pkc;
1043 struct tpacket_block_desc *pbd;
f6fb8f10 1044 char *curr, *end;
1045
e3192690 1046 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
f6fb8f10 1047 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1048
1049 /* Queue is frozen when user space is lagging behind */
1050 if (prb_queue_frozen(pkc)) {
1051 /*
1052 * Check if that last block which caused the queue to freeze,
1053 * is still in_use by user-space.
1054 */
1055 if (prb_curr_blk_in_use(pkc, pbd)) {
1056 /* Can't record this packet */
1057 return NULL;
1058 } else {
1059 /*
1060 * Ok, the block was released by user-space.
1061 * Now let's open that block.
1062 * opening a block also thaws the queue.
1063 * Thawing is a side effect.
1064 */
1065 prb_open_block(pkc, pbd);
1066 }
1067 }
1068
1069 smp_mb();
1070 curr = pkc->nxt_offset;
1071 pkc->skb = skb;
e3192690 1072 end = (char *)pbd + pkc->kblk_size;
f6fb8f10 1073
1074 /* first try the current block */
1075 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1076 prb_fill_curr_block(curr, pkc, pbd, len);
1077 return (void *)curr;
1078 }
1079
1080 /* Ok, close the current block */
1081 prb_retire_current_block(pkc, po, 0);
1082
1083 /* Now, try to dispatch the next block */
1084 curr = (char *)prb_dispatch_next_block(pkc, po);
1085 if (curr) {
1086 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1087 prb_fill_curr_block(curr, pkc, pbd, len);
1088 return (void *)curr;
1089 }
1090
1091 /*
1092 * No free blocks are available.user_space hasn't caught up yet.
1093 * Queue was just frozen and now this packet will get dropped.
1094 */
1095 return NULL;
1096}
1097
eea49cc9 1098static void *packet_current_rx_frame(struct packet_sock *po,
f6fb8f10 1099 struct sk_buff *skb,
1100 int status, unsigned int len)
1101{
1102 char *curr = NULL;
1103 switch (po->tp_version) {
1104 case TPACKET_V1:
1105 case TPACKET_V2:
1106 curr = packet_lookup_frame(po, &po->rx_ring,
1107 po->rx_ring.head, status);
1108 return curr;
1109 case TPACKET_V3:
1110 return __packet_lookup_frame_in_block(po, skb, status, len);
1111 default:
1112 WARN(1, "TPACKET version not supported\n");
1113 BUG();
99aa3473 1114 return NULL;
f6fb8f10 1115 }
1116}
1117
eea49cc9 1118static void *prb_lookup_block(struct packet_sock *po,
f6fb8f10 1119 struct packet_ring_buffer *rb,
77f65ebd 1120 unsigned int idx,
f6fb8f10 1121 int status)
1122{
bc59ba39 1123 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
77f65ebd 1124 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
f6fb8f10 1125
1126 if (status != BLOCK_STATUS(pbd))
1127 return NULL;
1128 return pbd;
1129}
1130
eea49cc9 1131static int prb_previous_blk_num(struct packet_ring_buffer *rb)
f6fb8f10 1132{
1133 unsigned int prev;
1134 if (rb->prb_bdqc.kactive_blk_num)
1135 prev = rb->prb_bdqc.kactive_blk_num-1;
1136 else
1137 prev = rb->prb_bdqc.knum_blocks-1;
1138 return prev;
1139}
1140
1141/* Assumes caller has held the rx_queue.lock */
eea49cc9 1142static void *__prb_previous_block(struct packet_sock *po,
f6fb8f10 1143 struct packet_ring_buffer *rb,
1144 int status)
1145{
1146 unsigned int previous = prb_previous_blk_num(rb);
1147 return prb_lookup_block(po, rb, previous, status);
1148}
1149
eea49cc9 1150static void *packet_previous_rx_frame(struct packet_sock *po,
f6fb8f10 1151 struct packet_ring_buffer *rb,
1152 int status)
1153{
1154 if (po->tp_version <= TPACKET_V2)
1155 return packet_previous_frame(po, rb, status);
1156
1157 return __prb_previous_block(po, rb, status);
1158}
1159
eea49cc9 1160static void packet_increment_rx_head(struct packet_sock *po,
f6fb8f10 1161 struct packet_ring_buffer *rb)
1162{
1163 switch (po->tp_version) {
1164 case TPACKET_V1:
1165 case TPACKET_V2:
1166 return packet_increment_head(rb);
1167 case TPACKET_V3:
1168 default:
1169 WARN(1, "TPACKET version not supported.\n");
1170 BUG();
1171 return;
1172 }
1173}
1174
eea49cc9 1175static void *packet_previous_frame(struct packet_sock *po,
69e3c75f
JB
1176 struct packet_ring_buffer *rb,
1177 int status)
1178{
1179 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1180 return packet_lookup_frame(po, rb, previous, status);
1181}
1182
eea49cc9 1183static void packet_increment_head(struct packet_ring_buffer *buff)
69e3c75f
JB
1184{
1185 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1186}
1187
b0138408
DB
1188static void packet_inc_pending(struct packet_ring_buffer *rb)
1189{
1190 this_cpu_inc(*rb->pending_refcnt);
1191}
1192
1193static void packet_dec_pending(struct packet_ring_buffer *rb)
1194{
1195 this_cpu_dec(*rb->pending_refcnt);
1196}
1197
1198static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1199{
1200 unsigned int refcnt = 0;
1201 int cpu;
1202
1203 /* We don't use pending refcount in rx_ring. */
1204 if (rb->pending_refcnt == NULL)
1205 return 0;
1206
1207 for_each_possible_cpu(cpu)
1208 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1209
1210 return refcnt;
1211}
1212
1213static int packet_alloc_pending(struct packet_sock *po)
1214{
1215 po->rx_ring.pending_refcnt = NULL;
1216
1217 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1218 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1219 return -ENOBUFS;
1220
1221 return 0;
1222}
1223
1224static void packet_free_pending(struct packet_sock *po)
1225{
1226 free_percpu(po->tx_ring.pending_refcnt);
1227}
1228
77f65ebd
WB
1229static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1230{
1231 struct sock *sk = &po->sk;
1232 bool has_room;
1233
1234 if (po->prot_hook.func != tpacket_rcv)
1235 return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize)
1236 <= sk->sk_rcvbuf;
1237
1238 spin_lock(&sk->sk_receive_queue.lock);
1239 if (po->tp_version == TPACKET_V3)
1240 has_room = prb_lookup_block(po, &po->rx_ring,
1241 po->rx_ring.prb_bdqc.kactive_blk_num,
1242 TP_STATUS_KERNEL);
1243 else
1244 has_room = packet_lookup_frame(po, &po->rx_ring,
1245 po->rx_ring.head,
1246 TP_STATUS_KERNEL);
1247 spin_unlock(&sk->sk_receive_queue.lock);
1248
1249 return has_room;
1250}
1251
1da177e4
LT
1252static void packet_sock_destruct(struct sock *sk)
1253{
ed85b565
RC
1254 skb_queue_purge(&sk->sk_error_queue);
1255
547b792c
IJ
1256 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1257 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1da177e4
LT
1258
1259 if (!sock_flag(sk, SOCK_DEAD)) {
40d4e3df 1260 pr_err("Attempt to release alive packet socket: %p\n", sk);
1da177e4
LT
1261 return;
1262 }
1263
17ab56a2 1264 sk_refcnt_debug_dec(sk);
1da177e4
LT
1265}
1266
dc99f600
DM
1267static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1268{
1269 int x = atomic_read(&f->rr_cur) + 1;
1270
1271 if (x >= num)
1272 x = 0;
1273
1274 return x;
1275}
1276
77f65ebd
WB
1277static unsigned int fanout_demux_hash(struct packet_fanout *f,
1278 struct sk_buff *skb,
1279 unsigned int num)
dc99f600 1280{
61b905da 1281 return reciprocal_scale(skb_get_hash(skb), num);
dc99f600
DM
1282}
1283
77f65ebd
WB
1284static unsigned int fanout_demux_lb(struct packet_fanout *f,
1285 struct sk_buff *skb,
1286 unsigned int num)
dc99f600
DM
1287{
1288 int cur, old;
1289
1290 cur = atomic_read(&f->rr_cur);
1291 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1292 fanout_rr_next(f, num))) != cur)
1293 cur = old;
77f65ebd
WB
1294 return cur;
1295}
1296
1297static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1298 struct sk_buff *skb,
1299 unsigned int num)
1300{
1301 return smp_processor_id() % num;
dc99f600
DM
1302}
1303
5df0ddfb
DB
1304static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1305 struct sk_buff *skb,
1306 unsigned int num)
1307{
f337db64 1308 return prandom_u32_max(num);
5df0ddfb
DB
1309}
1310
77f65ebd
WB
1311static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1312 struct sk_buff *skb,
1313 unsigned int idx, unsigned int skip,
1314 unsigned int num)
95ec3eb4 1315{
77f65ebd 1316 unsigned int i, j;
95ec3eb4 1317
77f65ebd
WB
1318 i = j = min_t(int, f->next[idx], num - 1);
1319 do {
1320 if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) {
1321 if (i != j)
1322 f->next[idx] = i;
1323 return i;
1324 }
1325 if (++i == num)
1326 i = 0;
1327 } while (i != j);
1328
1329 return idx;
1330}
1331
2d36097d
NH
1332static unsigned int fanout_demux_qm(struct packet_fanout *f,
1333 struct sk_buff *skb,
1334 unsigned int num)
1335{
1336 return skb_get_queue_mapping(skb) % num;
1337}
1338
77f65ebd
WB
1339static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1340{
1341 return f->flags & (flag >> 8);
95ec3eb4
DM
1342}
1343
95ec3eb4
DM
1344static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1345 struct packet_type *pt, struct net_device *orig_dev)
dc99f600
DM
1346{
1347 struct packet_fanout *f = pt->af_packet_priv;
1348 unsigned int num = f->num_members;
1349 struct packet_sock *po;
77f65ebd 1350 unsigned int idx;
dc99f600
DM
1351
1352 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1353 !num) {
1354 kfree_skb(skb);
1355 return 0;
1356 }
1357
95ec3eb4
DM
1358 switch (f->type) {
1359 case PACKET_FANOUT_HASH:
1360 default:
77f65ebd 1361 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
bc416d97 1362 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
95ec3eb4
DM
1363 if (!skb)
1364 return 0;
1365 }
77f65ebd 1366 idx = fanout_demux_hash(f, skb, num);
95ec3eb4
DM
1367 break;
1368 case PACKET_FANOUT_LB:
77f65ebd 1369 idx = fanout_demux_lb(f, skb, num);
95ec3eb4
DM
1370 break;
1371 case PACKET_FANOUT_CPU:
77f65ebd
WB
1372 idx = fanout_demux_cpu(f, skb, num);
1373 break;
5df0ddfb
DB
1374 case PACKET_FANOUT_RND:
1375 idx = fanout_demux_rnd(f, skb, num);
1376 break;
2d36097d
NH
1377 case PACKET_FANOUT_QM:
1378 idx = fanout_demux_qm(f, skb, num);
1379 break;
77f65ebd
WB
1380 case PACKET_FANOUT_ROLLOVER:
1381 idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
95ec3eb4 1382 break;
dc99f600
DM
1383 }
1384
77f65ebd
WB
1385 po = pkt_sk(f->arr[idx]);
1386 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) &&
1387 unlikely(!packet_rcv_has_room(po, skb))) {
1388 idx = fanout_demux_rollover(f, skb, idx, idx, num);
1389 po = pkt_sk(f->arr[idx]);
1390 }
dc99f600
DM
1391
1392 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1393}
1394
fff3321d
PE
1395DEFINE_MUTEX(fanout_mutex);
1396EXPORT_SYMBOL_GPL(fanout_mutex);
dc99f600
DM
1397static LIST_HEAD(fanout_list);
1398
1399static void __fanout_link(struct sock *sk, struct packet_sock *po)
1400{
1401 struct packet_fanout *f = po->fanout;
1402
1403 spin_lock(&f->lock);
1404 f->arr[f->num_members] = sk;
1405 smp_wmb();
1406 f->num_members++;
1407 spin_unlock(&f->lock);
1408}
1409
1410static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1411{
1412 struct packet_fanout *f = po->fanout;
1413 int i;
1414
1415 spin_lock(&f->lock);
1416 for (i = 0; i < f->num_members; i++) {
1417 if (f->arr[i] == sk)
1418 break;
1419 }
1420 BUG_ON(i >= f->num_members);
1421 f->arr[i] = f->arr[f->num_members - 1];
1422 f->num_members--;
1423 spin_unlock(&f->lock);
1424}
1425
d4dd8aee 1426static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
c0de08d0 1427{
d4dd8aee 1428 if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout)
c0de08d0
EL
1429 return true;
1430
1431 return false;
1432}
1433
7736d33f 1434static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
dc99f600
DM
1435{
1436 struct packet_sock *po = pkt_sk(sk);
1437 struct packet_fanout *f, *match;
7736d33f 1438 u8 type = type_flags & 0xff;
77f65ebd 1439 u8 flags = type_flags >> 8;
dc99f600
DM
1440 int err;
1441
1442 switch (type) {
77f65ebd
WB
1443 case PACKET_FANOUT_ROLLOVER:
1444 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1445 return -EINVAL;
dc99f600
DM
1446 case PACKET_FANOUT_HASH:
1447 case PACKET_FANOUT_LB:
95ec3eb4 1448 case PACKET_FANOUT_CPU:
5df0ddfb 1449 case PACKET_FANOUT_RND:
2d36097d 1450 case PACKET_FANOUT_QM:
dc99f600
DM
1451 break;
1452 default:
1453 return -EINVAL;
1454 }
1455
1456 if (!po->running)
1457 return -EINVAL;
1458
1459 if (po->fanout)
1460 return -EALREADY;
1461
1462 mutex_lock(&fanout_mutex);
1463 match = NULL;
1464 list_for_each_entry(f, &fanout_list, list) {
1465 if (f->id == id &&
1466 read_pnet(&f->net) == sock_net(sk)) {
1467 match = f;
1468 break;
1469 }
1470 }
afe62c68 1471 err = -EINVAL;
77f65ebd 1472 if (match && match->flags != flags)
afe62c68 1473 goto out;
dc99f600 1474 if (!match) {
afe62c68 1475 err = -ENOMEM;
dc99f600 1476 match = kzalloc(sizeof(*match), GFP_KERNEL);
afe62c68
ED
1477 if (!match)
1478 goto out;
1479 write_pnet(&match->net, sock_net(sk));
1480 match->id = id;
1481 match->type = type;
77f65ebd 1482 match->flags = flags;
afe62c68
ED
1483 atomic_set(&match->rr_cur, 0);
1484 INIT_LIST_HEAD(&match->list);
1485 spin_lock_init(&match->lock);
1486 atomic_set(&match->sk_ref, 0);
1487 match->prot_hook.type = po->prot_hook.type;
1488 match->prot_hook.dev = po->prot_hook.dev;
1489 match->prot_hook.func = packet_rcv_fanout;
1490 match->prot_hook.af_packet_priv = match;
c0de08d0 1491 match->prot_hook.id_match = match_fanout_group;
afe62c68
ED
1492 dev_add_pack(&match->prot_hook);
1493 list_add(&match->list, &fanout_list);
dc99f600 1494 }
afe62c68
ED
1495 err = -EINVAL;
1496 if (match->type == type &&
1497 match->prot_hook.type == po->prot_hook.type &&
1498 match->prot_hook.dev == po->prot_hook.dev) {
1499 err = -ENOSPC;
1500 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1501 __dev_remove_pack(&po->prot_hook);
1502 po->fanout = match;
1503 atomic_inc(&match->sk_ref);
1504 __fanout_link(sk, po);
1505 err = 0;
dc99f600
DM
1506 }
1507 }
afe62c68 1508out:
dc99f600
DM
1509 mutex_unlock(&fanout_mutex);
1510 return err;
1511}
1512
1513static void fanout_release(struct sock *sk)
1514{
1515 struct packet_sock *po = pkt_sk(sk);
1516 struct packet_fanout *f;
1517
1518 f = po->fanout;
1519 if (!f)
1520 return;
1521
fff3321d 1522 mutex_lock(&fanout_mutex);
dc99f600
DM
1523 po->fanout = NULL;
1524
dc99f600
DM
1525 if (atomic_dec_and_test(&f->sk_ref)) {
1526 list_del(&f->list);
1527 dev_remove_pack(&f->prot_hook);
1528 kfree(f);
1529 }
1530 mutex_unlock(&fanout_mutex);
1531}
1da177e4 1532
90ddc4f0 1533static const struct proto_ops packet_ops;
1da177e4 1534
90ddc4f0 1535static const struct proto_ops packet_ops_spkt;
1da177e4 1536
40d4e3df
ED
1537static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1538 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
1539{
1540 struct sock *sk;
1541 struct sockaddr_pkt *spkt;
1542
1543 /*
1544 * When we registered the protocol we saved the socket in the data
1545 * field for just this event.
1546 */
1547
1548 sk = pt->af_packet_priv;
1ce4f28b 1549
1da177e4
LT
1550 /*
1551 * Yank back the headers [hope the device set this
1552 * right or kerboom...]
1553 *
1554 * Incoming packets have ll header pulled,
1555 * push it back.
1556 *
98e399f8 1557 * For outgoing ones skb->data == skb_mac_header(skb)
1da177e4
LT
1558 * so that this procedure is noop.
1559 */
1560
1561 if (skb->pkt_type == PACKET_LOOPBACK)
1562 goto out;
1563
09ad9bc7 1564 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
1565 goto out;
1566
40d4e3df
ED
1567 skb = skb_share_check(skb, GFP_ATOMIC);
1568 if (skb == NULL)
1da177e4
LT
1569 goto oom;
1570
1571 /* drop any routing info */
adf30907 1572 skb_dst_drop(skb);
1da177e4 1573
84531c24
PO
1574 /* drop conntrack reference */
1575 nf_reset(skb);
1576
ffbc6111 1577 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1da177e4 1578
98e399f8 1579 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
1580
1581 /*
1582 * The SOCK_PACKET socket receives _all_ frames.
1583 */
1584
1585 spkt->spkt_family = dev->type;
1586 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1587 spkt->spkt_protocol = skb->protocol;
1588
1589 /*
1590 * Charge the memory to the socket. This is done specifically
1591 * to prevent sockets using all the memory up.
1592 */
1593
40d4e3df 1594 if (sock_queue_rcv_skb(sk, skb) == 0)
1da177e4
LT
1595 return 0;
1596
1597out:
1598 kfree_skb(skb);
1599oom:
1600 return 0;
1601}
1602
1603
1604/*
1605 * Output a raw packet to a device layer. This bypasses all the other
1606 * protocol layers and you must therefore supply it with a complete frame
1607 */
1ce4f28b 1608
1da177e4
LT
1609static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1610 struct msghdr *msg, size_t len)
1611{
1612 struct sock *sk = sock->sk;
342dfc30 1613 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1a35ca80 1614 struct sk_buff *skb = NULL;
1da177e4 1615 struct net_device *dev;
40d4e3df 1616 __be16 proto = 0;
1da177e4 1617 int err;
3bdc0eba 1618 int extra_len = 0;
1ce4f28b 1619
1da177e4 1620 /*
1ce4f28b 1621 * Get and verify the address.
1da177e4
LT
1622 */
1623
40d4e3df 1624 if (saddr) {
1da177e4 1625 if (msg->msg_namelen < sizeof(struct sockaddr))
40d4e3df
ED
1626 return -EINVAL;
1627 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1628 proto = saddr->spkt_protocol;
1629 } else
1630 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1da177e4
LT
1631
1632 /*
1ce4f28b 1633 * Find the device first to size check it
1da177e4
LT
1634 */
1635
de74e92a 1636 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1a35ca80 1637retry:
654d1f8a
ED
1638 rcu_read_lock();
1639 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1da177e4
LT
1640 err = -ENODEV;
1641 if (dev == NULL)
1642 goto out_unlock;
1ce4f28b 1643
d5e76b0a
DM
1644 err = -ENETDOWN;
1645 if (!(dev->flags & IFF_UP))
1646 goto out_unlock;
1647
1da177e4 1648 /*
40d4e3df
ED
1649 * You may not queue a frame bigger than the mtu. This is the lowest level
1650 * raw protocol and you must do your own fragmentation at this level.
1da177e4 1651 */
1ce4f28b 1652
3bdc0eba
BG
1653 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1654 if (!netif_supports_nofcs(dev)) {
1655 err = -EPROTONOSUPPORT;
1656 goto out_unlock;
1657 }
1658 extra_len = 4; /* We're doing our own CRC */
1659 }
1660
1da177e4 1661 err = -EMSGSIZE;
3bdc0eba 1662 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1da177e4
LT
1663 goto out_unlock;
1664
1a35ca80
ED
1665 if (!skb) {
1666 size_t reserved = LL_RESERVED_SPACE(dev);
4ce40912 1667 int tlen = dev->needed_tailroom;
1a35ca80
ED
1668 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1669
1670 rcu_read_unlock();
4ce40912 1671 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1a35ca80
ED
1672 if (skb == NULL)
1673 return -ENOBUFS;
1674 /* FIXME: Save some space for broken drivers that write a hard
1675 * header at transmission time by themselves. PPP is the notable
1676 * one here. This should really be fixed at the driver level.
1677 */
1678 skb_reserve(skb, reserved);
1679 skb_reset_network_header(skb);
1680
1681 /* Try to align data part correctly */
1682 if (hhlen) {
1683 skb->data -= hhlen;
1684 skb->tail -= hhlen;
1685 if (len < hhlen)
1686 skb_reset_network_header(skb);
1687 }
6ce8e9ce 1688 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1a35ca80
ED
1689 if (err)
1690 goto out_free;
1691 goto retry;
1da177e4
LT
1692 }
1693
3bdc0eba 1694 if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
57f89bfa
BG
1695 /* Earlier code assumed this would be a VLAN pkt,
1696 * double-check this now that we have the actual
1697 * packet in hand.
1698 */
1699 struct ethhdr *ehdr;
1700 skb_reset_mac_header(skb);
1701 ehdr = eth_hdr(skb);
1702 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1703 err = -EMSGSIZE;
1704 goto out_unlock;
1705 }
1706 }
1a35ca80 1707
1da177e4
LT
1708 skb->protocol = proto;
1709 skb->dev = dev;
1710 skb->priority = sk->sk_priority;
2d37a186 1711 skb->mark = sk->sk_mark;
bf84a010
DB
1712
1713 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1da177e4 1714
3bdc0eba
BG
1715 if (unlikely(extra_len == 4))
1716 skb->no_fcs = 1;
1717
40893fd0 1718 skb_probe_transport_header(skb, 0);
c1aad275 1719
1da177e4 1720 dev_queue_xmit(skb);
654d1f8a 1721 rcu_read_unlock();
40d4e3df 1722 return len;
1da177e4 1723
1da177e4 1724out_unlock:
654d1f8a 1725 rcu_read_unlock();
1a35ca80
ED
1726out_free:
1727 kfree_skb(skb);
1da177e4
LT
1728 return err;
1729}
1da177e4 1730
eea49cc9 1731static unsigned int run_filter(const struct sk_buff *skb,
62ab0812 1732 const struct sock *sk,
dbcb5855 1733 unsigned int res)
1da177e4
LT
1734{
1735 struct sk_filter *filter;
fda9ef5d 1736
80f8f102
ED
1737 rcu_read_lock();
1738 filter = rcu_dereference(sk->sk_filter);
dbcb5855 1739 if (filter != NULL)
0a14842f 1740 res = SK_RUN_FILTER(filter, skb);
80f8f102 1741 rcu_read_unlock();
1da177e4 1742
dbcb5855 1743 return res;
1da177e4
LT
1744}
1745
1746/*
62ab0812
ED
1747 * This function makes lazy skb cloning in hope that most of packets
1748 * are discarded by BPF.
1749 *
1750 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1751 * and skb->cb are mangled. It works because (and until) packets
1752 * falling here are owned by current CPU. Output packets are cloned
1753 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1754 * sequencially, so that if we return skb to original state on exit,
1755 * we will not harm anyone.
1da177e4
LT
1756 */
1757
40d4e3df
ED
1758static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1759 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
1760{
1761 struct sock *sk;
1762 struct sockaddr_ll *sll;
1763 struct packet_sock *po;
40d4e3df 1764 u8 *skb_head = skb->data;
1da177e4 1765 int skb_len = skb->len;
dbcb5855 1766 unsigned int snaplen, res;
1da177e4
LT
1767
1768 if (skb->pkt_type == PACKET_LOOPBACK)
1769 goto drop;
1770
1771 sk = pt->af_packet_priv;
1772 po = pkt_sk(sk);
1773
09ad9bc7 1774 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
1775 goto drop;
1776
1da177e4
LT
1777 skb->dev = dev;
1778
3b04ddde 1779 if (dev->header_ops) {
1da177e4 1780 /* The device has an explicit notion of ll header,
62ab0812
ED
1781 * exported to higher levels.
1782 *
1783 * Otherwise, the device hides details of its frame
1784 * structure, so that corresponding packet head is
1785 * never delivered to user.
1da177e4
LT
1786 */
1787 if (sk->sk_type != SOCK_DGRAM)
98e399f8 1788 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
1789 else if (skb->pkt_type == PACKET_OUTGOING) {
1790 /* Special case: outgoing packets have ll header at head */
bbe735e4 1791 skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
1792 }
1793 }
1794
1795 snaplen = skb->len;
1796
dbcb5855
DM
1797 res = run_filter(skb, sk, snaplen);
1798 if (!res)
fda9ef5d 1799 goto drop_n_restore;
dbcb5855
DM
1800 if (snaplen > res)
1801 snaplen = res;
1da177e4 1802
0fd7bac6 1803 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1da177e4
LT
1804 goto drop_n_acct;
1805
1806 if (skb_shared(skb)) {
1807 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1808 if (nskb == NULL)
1809 goto drop_n_acct;
1810
1811 if (skb_head != skb->data) {
1812 skb->data = skb_head;
1813 skb->len = skb_len;
1814 }
abc4e4fa 1815 consume_skb(skb);
1da177e4
LT
1816 skb = nskb;
1817 }
1818
ffbc6111
HX
1819 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1820 sizeof(skb->cb));
1821
1822 sll = &PACKET_SKB_CB(skb)->sa.ll;
1da177e4 1823 sll->sll_hatype = dev->type;
1da177e4 1824 sll->sll_pkttype = skb->pkt_type;
8032b464 1825 if (unlikely(po->origdev))
80feaacb
PWJ
1826 sll->sll_ifindex = orig_dev->ifindex;
1827 else
1828 sll->sll_ifindex = dev->ifindex;
1da177e4 1829
b95cce35 1830 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1da177e4 1831
2472d761
EB
1832 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
1833 * Use their space for storing the original skb length.
1834 */
1835 PACKET_SKB_CB(skb)->sa.origlen = skb->len;
8dc41944 1836
1da177e4
LT
1837 if (pskb_trim(skb, snaplen))
1838 goto drop_n_acct;
1839
1840 skb_set_owner_r(skb, sk);
1841 skb->dev = NULL;
adf30907 1842 skb_dst_drop(skb);
1da177e4 1843
84531c24
PO
1844 /* drop conntrack reference */
1845 nf_reset(skb);
1846
1da177e4 1847 spin_lock(&sk->sk_receive_queue.lock);
ee80fbf3 1848 po->stats.stats1.tp_packets++;
3b885787 1849 skb->dropcount = atomic_read(&sk->sk_drops);
1da177e4
LT
1850 __skb_queue_tail(&sk->sk_receive_queue, skb);
1851 spin_unlock(&sk->sk_receive_queue.lock);
676d2369 1852 sk->sk_data_ready(sk);
1da177e4
LT
1853 return 0;
1854
1855drop_n_acct:
7091fbd8 1856 spin_lock(&sk->sk_receive_queue.lock);
ee80fbf3 1857 po->stats.stats1.tp_drops++;
7091fbd8
WB
1858 atomic_inc(&sk->sk_drops);
1859 spin_unlock(&sk->sk_receive_queue.lock);
1da177e4
LT
1860
1861drop_n_restore:
1862 if (skb_head != skb->data && skb_shared(skb)) {
1863 skb->data = skb_head;
1864 skb->len = skb_len;
1865 }
1866drop:
ead2ceb0 1867 consume_skb(skb);
1da177e4
LT
1868 return 0;
1869}
1870
40d4e3df
ED
1871static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1872 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
1873{
1874 struct sock *sk;
1875 struct packet_sock *po;
1876 struct sockaddr_ll *sll;
184f489e 1877 union tpacket_uhdr h;
40d4e3df 1878 u8 *skb_head = skb->data;
1da177e4 1879 int skb_len = skb->len;
dbcb5855 1880 unsigned int snaplen, res;
f6fb8f10 1881 unsigned long status = TP_STATUS_USER;
bbd6ef87 1882 unsigned short macoff, netoff, hdrlen;
1da177e4 1883 struct sk_buff *copy_skb = NULL;
bbd6ef87 1884 struct timespec ts;
b9c32fb2 1885 __u32 ts_status;
1da177e4 1886
51846355
AW
1887 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
1888 * We may add members to them until current aligned size without forcing
1889 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
1890 */
1891 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
1892 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
1893
1da177e4
LT
1894 if (skb->pkt_type == PACKET_LOOPBACK)
1895 goto drop;
1896
1897 sk = pt->af_packet_priv;
1898 po = pkt_sk(sk);
1899
09ad9bc7 1900 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
1901 goto drop;
1902
3b04ddde 1903 if (dev->header_ops) {
1da177e4 1904 if (sk->sk_type != SOCK_DGRAM)
98e399f8 1905 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
1906 else if (skb->pkt_type == PACKET_OUTGOING) {
1907 /* Special case: outgoing packets have ll header at head */
bbe735e4 1908 skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
1909 }
1910 }
1911
8dc41944
HX
1912 if (skb->ip_summed == CHECKSUM_PARTIAL)
1913 status |= TP_STATUS_CSUMNOTREADY;
1914
1da177e4
LT
1915 snaplen = skb->len;
1916
dbcb5855
DM
1917 res = run_filter(skb, sk, snaplen);
1918 if (!res)
fda9ef5d 1919 goto drop_n_restore;
dbcb5855
DM
1920 if (snaplen > res)
1921 snaplen = res;
1da177e4
LT
1922
1923 if (sk->sk_type == SOCK_DGRAM) {
8913336a
PM
1924 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1925 po->tp_reserve;
1da177e4 1926 } else {
95c96174 1927 unsigned int maclen = skb_network_offset(skb);
bbd6ef87 1928 netoff = TPACKET_ALIGN(po->tp_hdrlen +
8913336a
PM
1929 (maclen < 16 ? 16 : maclen)) +
1930 po->tp_reserve;
1da177e4
LT
1931 macoff = netoff - maclen;
1932 }
f6fb8f10 1933 if (po->tp_version <= TPACKET_V2) {
1934 if (macoff + snaplen > po->rx_ring.frame_size) {
1935 if (po->copy_thresh &&
0fd7bac6 1936 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
f6fb8f10 1937 if (skb_shared(skb)) {
1938 copy_skb = skb_clone(skb, GFP_ATOMIC);
1939 } else {
1940 copy_skb = skb_get(skb);
1941 skb_head = skb->data;
1942 }
1943 if (copy_skb)
1944 skb_set_owner_r(copy_skb, sk);
1da177e4 1945 }
f6fb8f10 1946 snaplen = po->rx_ring.frame_size - macoff;
1947 if ((int)snaplen < 0)
1948 snaplen = 0;
1da177e4 1949 }
dc808110
ED
1950 } else if (unlikely(macoff + snaplen >
1951 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
1952 u32 nval;
1953
1954 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
1955 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
1956 snaplen, nval, macoff);
1957 snaplen = nval;
1958 if (unlikely((int)snaplen < 0)) {
1959 snaplen = 0;
1960 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
1961 }
1da177e4 1962 }
1da177e4 1963 spin_lock(&sk->sk_receive_queue.lock);
f6fb8f10 1964 h.raw = packet_current_rx_frame(po, skb,
1965 TP_STATUS_KERNEL, (macoff+snaplen));
bbd6ef87 1966 if (!h.raw)
1da177e4 1967 goto ring_is_full;
f6fb8f10 1968 if (po->tp_version <= TPACKET_V2) {
1969 packet_increment_rx_head(po, &po->rx_ring);
1970 /*
1971 * LOSING will be reported till you read the stats,
1972 * because it's COR - Clear On Read.
1973 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1974 * at packet level.
1975 */
ee80fbf3 1976 if (po->stats.stats1.tp_drops)
f6fb8f10 1977 status |= TP_STATUS_LOSING;
1978 }
ee80fbf3 1979 po->stats.stats1.tp_packets++;
1da177e4
LT
1980 if (copy_skb) {
1981 status |= TP_STATUS_COPY;
1982 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1983 }
1da177e4
LT
1984 spin_unlock(&sk->sk_receive_queue.lock);
1985
bbd6ef87 1986 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
b9c32fb2
DB
1987
1988 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
7a51384c 1989 getnstimeofday(&ts);
1da177e4 1990
b9c32fb2
DB
1991 status |= ts_status;
1992
bbd6ef87
PM
1993 switch (po->tp_version) {
1994 case TPACKET_V1:
1995 h.h1->tp_len = skb->len;
1996 h.h1->tp_snaplen = snaplen;
1997 h.h1->tp_mac = macoff;
1998 h.h1->tp_net = netoff;
4b457bdf
DB
1999 h.h1->tp_sec = ts.tv_sec;
2000 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
bbd6ef87
PM
2001 hdrlen = sizeof(*h.h1);
2002 break;
2003 case TPACKET_V2:
2004 h.h2->tp_len = skb->len;
2005 h.h2->tp_snaplen = snaplen;
2006 h.h2->tp_mac = macoff;
2007 h.h2->tp_net = netoff;
bbd6ef87
PM
2008 h.h2->tp_sec = ts.tv_sec;
2009 h.h2->tp_nsec = ts.tv_nsec;
df8a39de
JP
2010 if (skb_vlan_tag_present(skb)) {
2011 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
a0cdfcf3
AW
2012 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2013 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
a3bcc23e
BG
2014 } else {
2015 h.h2->tp_vlan_tci = 0;
a0cdfcf3 2016 h.h2->tp_vlan_tpid = 0;
a3bcc23e 2017 }
e4d26f4b 2018 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
bbd6ef87
PM
2019 hdrlen = sizeof(*h.h2);
2020 break;
f6fb8f10 2021 case TPACKET_V3:
2022 /* tp_nxt_offset,vlan are already populated above.
2023 * So DONT clear those fields here
2024 */
2025 h.h3->tp_status |= status;
2026 h.h3->tp_len = skb->len;
2027 h.h3->tp_snaplen = snaplen;
2028 h.h3->tp_mac = macoff;
2029 h.h3->tp_net = netoff;
f6fb8f10 2030 h.h3->tp_sec = ts.tv_sec;
2031 h.h3->tp_nsec = ts.tv_nsec;
e4d26f4b 2032 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
f6fb8f10 2033 hdrlen = sizeof(*h.h3);
2034 break;
bbd6ef87
PM
2035 default:
2036 BUG();
2037 }
1da177e4 2038
bbd6ef87 2039 sll = h.raw + TPACKET_ALIGN(hdrlen);
b95cce35 2040 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1da177e4
LT
2041 sll->sll_family = AF_PACKET;
2042 sll->sll_hatype = dev->type;
2043 sll->sll_protocol = skb->protocol;
2044 sll->sll_pkttype = skb->pkt_type;
8032b464 2045 if (unlikely(po->origdev))
80feaacb
PWJ
2046 sll->sll_ifindex = orig_dev->ifindex;
2047 else
2048 sll->sll_ifindex = dev->ifindex;
1da177e4 2049
e16aa207 2050 smp_mb();
f0d4eb29 2051
f6dafa95 2052#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
f0d4eb29 2053 if (po->tp_version <= TPACKET_V2) {
0af55bb5
CG
2054 u8 *start, *end;
2055
f0d4eb29
DB
2056 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2057 macoff + snaplen);
2058
2059 for (start = h.raw; start < end; start += PAGE_SIZE)
2060 flush_dcache_page(pgv_to_page(start));
1da177e4 2061 }
f0d4eb29 2062 smp_wmb();
f6dafa95 2063#endif
f0d4eb29 2064
da413eec 2065 if (po->tp_version <= TPACKET_V2) {
f6fb8f10 2066 __packet_set_status(po, h.raw, status);
da413eec
DC
2067 sk->sk_data_ready(sk);
2068 } else {
f6fb8f10 2069 prb_clear_blk_fill_status(&po->rx_ring);
da413eec 2070 }
1da177e4
LT
2071
2072drop_n_restore:
2073 if (skb_head != skb->data && skb_shared(skb)) {
2074 skb->data = skb_head;
2075 skb->len = skb_len;
2076 }
2077drop:
1ce4f28b 2078 kfree_skb(skb);
1da177e4
LT
2079 return 0;
2080
2081ring_is_full:
ee80fbf3 2082 po->stats.stats1.tp_drops++;
1da177e4
LT
2083 spin_unlock(&sk->sk_receive_queue.lock);
2084
676d2369 2085 sk->sk_data_ready(sk);
acb5d75b 2086 kfree_skb(copy_skb);
1da177e4
LT
2087 goto drop_n_restore;
2088}
2089
69e3c75f
JB
2090static void tpacket_destruct_skb(struct sk_buff *skb)
2091{
2092 struct packet_sock *po = pkt_sk(skb->sk);
1da177e4 2093
69e3c75f 2094 if (likely(po->tx_ring.pg_vec)) {
f0d4eb29 2095 void *ph;
b9c32fb2
DB
2096 __u32 ts;
2097
69e3c75f 2098 ph = skb_shinfo(skb)->destructor_arg;
b0138408 2099 packet_dec_pending(&po->tx_ring);
b9c32fb2
DB
2100
2101 ts = __packet_set_timestamp(po, ph, skb);
2102 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
69e3c75f
JB
2103 }
2104
2105 sock_wfree(skb);
2106}
2107
9c707762
WB
2108static bool ll_header_truncated(const struct net_device *dev, int len)
2109{
2110 /* net device doesn't like empty head */
2111 if (unlikely(len <= dev->hard_header_len)) {
eee2f04b 2112 net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n",
9c707762
WB
2113 current->comm, len, dev->hard_header_len);
2114 return true;
2115 }
2116
2117 return false;
2118}
2119
40d4e3df
ED
2120static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2121 void *frame, struct net_device *dev, int size_max,
ae641949 2122 __be16 proto, unsigned char *addr, int hlen)
69e3c75f 2123{
184f489e 2124 union tpacket_uhdr ph;
09effa67 2125 int to_write, offset, len, tp_len, nr_frags, len_max;
69e3c75f
JB
2126 struct socket *sock = po->sk.sk_socket;
2127 struct page *page;
2128 void *data;
2129 int err;
2130
2131 ph.raw = frame;
2132
2133 skb->protocol = proto;
2134 skb->dev = dev;
2135 skb->priority = po->sk.sk_priority;
2d37a186 2136 skb->mark = po->sk.sk_mark;
2e31396f 2137 sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
69e3c75f
JB
2138 skb_shinfo(skb)->destructor_arg = ph.raw;
2139
2140 switch (po->tp_version) {
2141 case TPACKET_V2:
2142 tp_len = ph.h2->tp_len;
2143 break;
2144 default:
2145 tp_len = ph.h1->tp_len;
2146 break;
2147 }
09effa67
DM
2148 if (unlikely(tp_len > size_max)) {
2149 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2150 return -EMSGSIZE;
2151 }
69e3c75f 2152
ae641949 2153 skb_reserve(skb, hlen);
69e3c75f 2154 skb_reset_network_header(skb);
c1aad275 2155
d346a3fa
DB
2156 if (!packet_use_direct_xmit(po))
2157 skb_probe_transport_header(skb, 0);
2158 if (unlikely(po->tp_tx_has_off)) {
5920cd3a
PC
2159 int off_min, off_max, off;
2160 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2161 off_max = po->tx_ring.frame_size - tp_len;
2162 if (sock->type == SOCK_DGRAM) {
2163 switch (po->tp_version) {
2164 case TPACKET_V2:
2165 off = ph.h2->tp_net;
2166 break;
2167 default:
2168 off = ph.h1->tp_net;
2169 break;
2170 }
2171 } else {
2172 switch (po->tp_version) {
2173 case TPACKET_V2:
2174 off = ph.h2->tp_mac;
2175 break;
2176 default:
2177 off = ph.h1->tp_mac;
2178 break;
2179 }
2180 }
2181 if (unlikely((off < off_min) || (off_max < off)))
2182 return -EINVAL;
2183 data = ph.raw + off;
2184 } else {
2185 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
2186 }
69e3c75f
JB
2187 to_write = tp_len;
2188
2189 if (sock->type == SOCK_DGRAM) {
2190 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2191 NULL, tp_len);
2192 if (unlikely(err < 0))
2193 return -EINVAL;
40d4e3df 2194 } else if (dev->hard_header_len) {
9c707762 2195 if (ll_header_truncated(dev, tp_len))
69e3c75f 2196 return -EINVAL;
69e3c75f
JB
2197
2198 skb_push(skb, dev->hard_header_len);
2199 err = skb_store_bits(skb, 0, data,
2200 dev->hard_header_len);
2201 if (unlikely(err))
2202 return err;
2203
2204 data += dev->hard_header_len;
2205 to_write -= dev->hard_header_len;
2206 }
2207
69e3c75f
JB
2208 offset = offset_in_page(data);
2209 len_max = PAGE_SIZE - offset;
2210 len = ((to_write > len_max) ? len_max : to_write);
2211
2212 skb->data_len = to_write;
2213 skb->len += to_write;
2214 skb->truesize += to_write;
2215 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2216
2217 while (likely(to_write)) {
2218 nr_frags = skb_shinfo(skb)->nr_frags;
2219
2220 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
40d4e3df
ED
2221 pr_err("Packet exceed the number of skb frags(%lu)\n",
2222 MAX_SKB_FRAGS);
69e3c75f
JB
2223 return -EFAULT;
2224 }
2225
0af55bb5
CG
2226 page = pgv_to_page(data);
2227 data += len;
69e3c75f
JB
2228 flush_dcache_page(page);
2229 get_page(page);
0af55bb5 2230 skb_fill_page_desc(skb, nr_frags, page, offset, len);
69e3c75f
JB
2231 to_write -= len;
2232 offset = 0;
2233 len_max = PAGE_SIZE;
2234 len = ((to_write > len_max) ? len_max : to_write);
2235 }
2236
2237 return tp_len;
2238}
2239
2240static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2241{
69e3c75f
JB
2242 struct sk_buff *skb;
2243 struct net_device *dev;
2244 __be16 proto;
09effa67 2245 int err, reserve = 0;
40d4e3df 2246 void *ph;
342dfc30 2247 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
87a2fd28 2248 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
69e3c75f
JB
2249 int tp_len, size_max;
2250 unsigned char *addr;
2251 int len_sum = 0;
9e67030a 2252 int status = TP_STATUS_AVAILABLE;
ae641949 2253 int hlen, tlen;
69e3c75f 2254
69e3c75f
JB
2255 mutex_lock(&po->pg_vec_lock);
2256
66e56cd4 2257 if (likely(saddr == NULL)) {
e40526cb 2258 dev = packet_cached_dev_get(po);
69e3c75f
JB
2259 proto = po->num;
2260 addr = NULL;
2261 } else {
2262 err = -EINVAL;
2263 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2264 goto out;
2265 if (msg->msg_namelen < (saddr->sll_halen
2266 + offsetof(struct sockaddr_ll,
2267 sll_addr)))
2268 goto out;
69e3c75f
JB
2269 proto = saddr->sll_protocol;
2270 addr = saddr->sll_addr;
827d9780 2271 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
69e3c75f
JB
2272 }
2273
69e3c75f
JB
2274 err = -ENXIO;
2275 if (unlikely(dev == NULL))
2276 goto out;
69e3c75f
JB
2277 err = -ENETDOWN;
2278 if (unlikely(!(dev->flags & IFF_UP)))
2279 goto out_put;
2280
52f1454f 2281 reserve = dev->hard_header_len + VLAN_HLEN;
69e3c75f 2282 size_max = po->tx_ring.frame_size
b5dd884e 2283 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
69e3c75f 2284
09effa67
DM
2285 if (size_max > dev->mtu + reserve)
2286 size_max = dev->mtu + reserve;
2287
69e3c75f
JB
2288 do {
2289 ph = packet_current_frame(po, &po->tx_ring,
87a2fd28 2290 TP_STATUS_SEND_REQUEST);
69e3c75f 2291 if (unlikely(ph == NULL)) {
87a2fd28
DB
2292 if (need_wait && need_resched())
2293 schedule();
69e3c75f
JB
2294 continue;
2295 }
2296
2297 status = TP_STATUS_SEND_REQUEST;
ae641949
HX
2298 hlen = LL_RESERVED_SPACE(dev);
2299 tlen = dev->needed_tailroom;
69e3c75f 2300 skb = sock_alloc_send_skb(&po->sk,
ae641949 2301 hlen + tlen + sizeof(struct sockaddr_ll),
69e3c75f
JB
2302 0, &err);
2303
2304 if (unlikely(skb == NULL))
2305 goto out_status;
2306
2307 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
52f1454f
DB
2308 addr, hlen);
2309 if (tp_len > dev->mtu + dev->hard_header_len) {
2310 struct ethhdr *ehdr;
2311 /* Earlier code assumed this would be a VLAN pkt,
2312 * double-check this now that we have the actual
2313 * packet in hand.
2314 */
69e3c75f 2315
52f1454f
DB
2316 skb_reset_mac_header(skb);
2317 ehdr = eth_hdr(skb);
2318 if (ehdr->h_proto != htons(ETH_P_8021Q))
2319 tp_len = -EMSGSIZE;
2320 }
69e3c75f
JB
2321 if (unlikely(tp_len < 0)) {
2322 if (po->tp_loss) {
2323 __packet_set_status(po, ph,
2324 TP_STATUS_AVAILABLE);
2325 packet_increment_head(&po->tx_ring);
2326 kfree_skb(skb);
2327 continue;
2328 } else {
2329 status = TP_STATUS_WRONG_FORMAT;
2330 err = tp_len;
2331 goto out_status;
2332 }
2333 }
2334
0fd5d57b
DB
2335 packet_pick_tx_queue(dev, skb);
2336
69e3c75f
JB
2337 skb->destructor = tpacket_destruct_skb;
2338 __packet_set_status(po, ph, TP_STATUS_SENDING);
b0138408 2339 packet_inc_pending(&po->tx_ring);
69e3c75f
JB
2340
2341 status = TP_STATUS_SEND_REQUEST;
d346a3fa 2342 err = po->xmit(skb);
eb70df13
JP
2343 if (unlikely(err > 0)) {
2344 err = net_xmit_errno(err);
2345 if (err && __packet_get_status(po, ph) ==
2346 TP_STATUS_AVAILABLE) {
2347 /* skb was destructed already */
2348 skb = NULL;
2349 goto out_status;
2350 }
2351 /*
2352 * skb was dropped but not destructed yet;
2353 * let's treat it like congestion or err < 0
2354 */
2355 err = 0;
2356 }
69e3c75f
JB
2357 packet_increment_head(&po->tx_ring);
2358 len_sum += tp_len;
b0138408
DB
2359 } while (likely((ph != NULL) ||
2360 /* Note: packet_read_pending() might be slow if we have
2361 * to call it as it's per_cpu variable, but in fast-path
2362 * we already short-circuit the loop with the first
2363 * condition, and luckily don't have to go that path
2364 * anyway.
2365 */
2366 (need_wait && packet_read_pending(&po->tx_ring))));
69e3c75f
JB
2367
2368 err = len_sum;
2369 goto out_put;
2370
69e3c75f
JB
2371out_status:
2372 __packet_set_status(po, ph, status);
2373 kfree_skb(skb);
2374out_put:
e40526cb 2375 dev_put(dev);
69e3c75f
JB
2376out:
2377 mutex_unlock(&po->pg_vec_lock);
2378 return err;
2379}
69e3c75f 2380
eea49cc9
OJ
2381static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2382 size_t reserve, size_t len,
2383 size_t linear, int noblock,
2384 int *err)
bfd5f4a3
SS
2385{
2386 struct sk_buff *skb;
2387
2388 /* Under a page? Don't bother with paged skb. */
2389 if (prepad + len < PAGE_SIZE || !linear)
2390 linear = len;
2391
2392 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
28d64271 2393 err, 0);
bfd5f4a3
SS
2394 if (!skb)
2395 return NULL;
2396
2397 skb_reserve(skb, reserve);
2398 skb_put(skb, linear);
2399 skb->data_len = len - linear;
2400 skb->len += len - linear;
2401
2402 return skb;
2403}
2404
d346a3fa 2405static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1da177e4
LT
2406{
2407 struct sock *sk = sock->sk;
342dfc30 2408 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
1da177e4
LT
2409 struct sk_buff *skb;
2410 struct net_device *dev;
0e11c91e 2411 __be16 proto;
1da177e4 2412 unsigned char *addr;
827d9780 2413 int err, reserve = 0;
bfd5f4a3
SS
2414 struct virtio_net_hdr vnet_hdr = { 0 };
2415 int offset = 0;
2416 int vnet_hdr_len;
2417 struct packet_sock *po = pkt_sk(sk);
2418 unsigned short gso_type = 0;
ae641949 2419 int hlen, tlen;
3bdc0eba 2420 int extra_len = 0;
8feb2fb2 2421 ssize_t n;
1da177e4
LT
2422
2423 /*
1ce4f28b 2424 * Get and verify the address.
1da177e4 2425 */
1ce4f28b 2426
66e56cd4 2427 if (likely(saddr == NULL)) {
e40526cb 2428 dev = packet_cached_dev_get(po);
1da177e4
LT
2429 proto = po->num;
2430 addr = NULL;
2431 } else {
2432 err = -EINVAL;
2433 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2434 goto out;
0fb375fb
EB
2435 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2436 goto out;
1da177e4
LT
2437 proto = saddr->sll_protocol;
2438 addr = saddr->sll_addr;
827d9780 2439 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
1da177e4
LT
2440 }
2441
1da177e4 2442 err = -ENXIO;
e40526cb 2443 if (unlikely(dev == NULL))
1da177e4 2444 goto out_unlock;
d5e76b0a 2445 err = -ENETDOWN;
e40526cb 2446 if (unlikely(!(dev->flags & IFF_UP)))
d5e76b0a
DM
2447 goto out_unlock;
2448
e40526cb
DB
2449 if (sock->type == SOCK_RAW)
2450 reserve = dev->hard_header_len;
bfd5f4a3
SS
2451 if (po->has_vnet_hdr) {
2452 vnet_hdr_len = sizeof(vnet_hdr);
2453
2454 err = -EINVAL;
2455 if (len < vnet_hdr_len)
2456 goto out_unlock;
2457
2458 len -= vnet_hdr_len;
2459
8feb2fb2 2460 err = -EFAULT;
c0371da6 2461 n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter);
8feb2fb2 2462 if (n != vnet_hdr_len)
bfd5f4a3
SS
2463 goto out_unlock;
2464
2465 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
dc9e5153
MT
2466 (__virtio16_to_cpu(false, vnet_hdr.csum_start) +
2467 __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 >
2468 __virtio16_to_cpu(false, vnet_hdr.hdr_len)))
2469 vnet_hdr.hdr_len = __cpu_to_virtio16(false,
2470 __virtio16_to_cpu(false, vnet_hdr.csum_start) +
2471 __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2);
bfd5f4a3
SS
2472
2473 err = -EINVAL;
dc9e5153 2474 if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len)
bfd5f4a3
SS
2475 goto out_unlock;
2476
2477 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2478 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2479 case VIRTIO_NET_HDR_GSO_TCPV4:
2480 gso_type = SKB_GSO_TCPV4;
2481 break;
2482 case VIRTIO_NET_HDR_GSO_TCPV6:
2483 gso_type = SKB_GSO_TCPV6;
2484 break;
2485 case VIRTIO_NET_HDR_GSO_UDP:
2486 gso_type = SKB_GSO_UDP;
2487 break;
2488 default:
2489 goto out_unlock;
2490 }
2491
2492 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2493 gso_type |= SKB_GSO_TCP_ECN;
2494
2495 if (vnet_hdr.gso_size == 0)
2496 goto out_unlock;
2497
2498 }
2499 }
2500
3bdc0eba
BG
2501 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2502 if (!netif_supports_nofcs(dev)) {
2503 err = -EPROTONOSUPPORT;
2504 goto out_unlock;
2505 }
2506 extra_len = 4; /* We're doing our own CRC */
2507 }
2508
1da177e4 2509 err = -EMSGSIZE;
3bdc0eba 2510 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
1da177e4
LT
2511 goto out_unlock;
2512
bfd5f4a3 2513 err = -ENOBUFS;
ae641949
HX
2514 hlen = LL_RESERVED_SPACE(dev);
2515 tlen = dev->needed_tailroom;
dc9e5153
MT
2516 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
2517 __virtio16_to_cpu(false, vnet_hdr.hdr_len),
bfd5f4a3 2518 msg->msg_flags & MSG_DONTWAIT, &err);
40d4e3df 2519 if (skb == NULL)
1da177e4
LT
2520 goto out_unlock;
2521
bfd5f4a3 2522 skb_set_network_header(skb, reserve);
1da177e4 2523
0c4e8581 2524 err = -EINVAL;
9c707762
WB
2525 if (sock->type == SOCK_DGRAM) {
2526 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
46d2cfb1 2527 if (unlikely(offset < 0))
9c707762
WB
2528 goto out_free;
2529 } else {
2530 if (ll_header_truncated(dev, len))
2531 goto out_free;
2532 }
1da177e4
LT
2533
2534 /* Returns -EFAULT on error */
c0371da6 2535 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
1da177e4
LT
2536 if (err)
2537 goto out_free;
bf84a010
DB
2538
2539 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1da177e4 2540
3bdc0eba 2541 if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
09effa67
DM
2542 /* Earlier code assumed this would be a VLAN pkt,
2543 * double-check this now that we have the actual
2544 * packet in hand.
2545 */
2546 struct ethhdr *ehdr;
2547 skb_reset_mac_header(skb);
2548 ehdr = eth_hdr(skb);
2549 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2550 err = -EMSGSIZE;
2551 goto out_free;
2552 }
57f89bfa
BG
2553 }
2554
09effa67
DM
2555 skb->protocol = proto;
2556 skb->dev = dev;
1da177e4 2557 skb->priority = sk->sk_priority;
2d37a186 2558 skb->mark = sk->sk_mark;
0fd5d57b
DB
2559
2560 packet_pick_tx_queue(dev, skb);
1da177e4 2561
bfd5f4a3
SS
2562 if (po->has_vnet_hdr) {
2563 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
dc9e5153
MT
2564 u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start);
2565 u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset);
2566 if (!skb_partial_csum_set(skb, s, o)) {
bfd5f4a3
SS
2567 err = -EINVAL;
2568 goto out_free;
2569 }
2570 }
2571
dc9e5153
MT
2572 skb_shinfo(skb)->gso_size =
2573 __virtio16_to_cpu(false, vnet_hdr.gso_size);
bfd5f4a3
SS
2574 skb_shinfo(skb)->gso_type = gso_type;
2575
2576 /* Header must be checked, and gso_segs computed. */
2577 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2578 skb_shinfo(skb)->gso_segs = 0;
2579
2580 len += vnet_hdr_len;
2581 }
2582
d346a3fa
DB
2583 if (!packet_use_direct_xmit(po))
2584 skb_probe_transport_header(skb, reserve);
3bdc0eba
BG
2585 if (unlikely(extra_len == 4))
2586 skb->no_fcs = 1;
2587
d346a3fa 2588 err = po->xmit(skb);
1da177e4
LT
2589 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2590 goto out_unlock;
2591
e40526cb 2592 dev_put(dev);
1da177e4 2593
40d4e3df 2594 return len;
1da177e4
LT
2595
2596out_free:
2597 kfree_skb(skb);
2598out_unlock:
e40526cb 2599 if (dev)
1da177e4
LT
2600 dev_put(dev);
2601out:
2602 return err;
2603}
2604
69e3c75f
JB
2605static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2606 struct msghdr *msg, size_t len)
2607{
69e3c75f
JB
2608 struct sock *sk = sock->sk;
2609 struct packet_sock *po = pkt_sk(sk);
d346a3fa 2610
69e3c75f
JB
2611 if (po->tx_ring.pg_vec)
2612 return tpacket_snd(po, msg);
2613 else
69e3c75f
JB
2614 return packet_snd(sock, msg, len);
2615}
2616
1da177e4
LT
2617/*
2618 * Close a PACKET socket. This is fairly simple. We immediately go
2619 * to 'closed' state and remove our protocol entry in the device list.
2620 */
2621
2622static int packet_release(struct socket *sock)
2623{
2624 struct sock *sk = sock->sk;
2625 struct packet_sock *po;
d12d01d6 2626 struct net *net;
f6fb8f10 2627 union tpacket_req_u req_u;
1da177e4
LT
2628
2629 if (!sk)
2630 return 0;
2631
3b1e0a65 2632 net = sock_net(sk);
1da177e4
LT
2633 po = pkt_sk(sk);
2634
0fa7fa98 2635 mutex_lock(&net->packet.sklist_lock);
808f5114 2636 sk_del_node_init_rcu(sk);
0fa7fa98
PE
2637 mutex_unlock(&net->packet.sklist_lock);
2638
2639 preempt_disable();
920de804 2640 sock_prot_inuse_add(net, sk->sk_prot, -1);
0fa7fa98 2641 preempt_enable();
1da177e4 2642
808f5114 2643 spin_lock(&po->bind_lock);
ce06b03e 2644 unregister_prot_hook(sk, false);
66e56cd4
DB
2645 packet_cached_dev_reset(po);
2646
160ff18a
BG
2647 if (po->prot_hook.dev) {
2648 dev_put(po->prot_hook.dev);
2649 po->prot_hook.dev = NULL;
2650 }
808f5114 2651 spin_unlock(&po->bind_lock);
1da177e4 2652
1da177e4 2653 packet_flush_mclist(sk);
1da177e4 2654
9665d5d6
PS
2655 if (po->rx_ring.pg_vec) {
2656 memset(&req_u, 0, sizeof(req_u));
f6fb8f10 2657 packet_set_ring(sk, &req_u, 1, 0);
9665d5d6 2658 }
69e3c75f 2659
9665d5d6
PS
2660 if (po->tx_ring.pg_vec) {
2661 memset(&req_u, 0, sizeof(req_u));
f6fb8f10 2662 packet_set_ring(sk, &req_u, 1, 1);
9665d5d6 2663 }
1da177e4 2664
dc99f600
DM
2665 fanout_release(sk);
2666
808f5114 2667 synchronize_net();
1da177e4
LT
2668 /*
2669 * Now the socket is dead. No more input will appear.
2670 */
1da177e4
LT
2671 sock_orphan(sk);
2672 sock->sk = NULL;
2673
2674 /* Purge queues */
2675
2676 skb_queue_purge(&sk->sk_receive_queue);
b0138408 2677 packet_free_pending(po);
17ab56a2 2678 sk_refcnt_debug_release(sk);
1da177e4
LT
2679
2680 sock_put(sk);
2681 return 0;
2682}
2683
2684/*
2685 * Attach a packet hook.
2686 */
2687
902fefb8 2688static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
1da177e4
LT
2689{
2690 struct packet_sock *po = pkt_sk(sk);
902fefb8
DB
2691 const struct net_device *dev_curr;
2692 __be16 proto_curr;
2693 bool need_rehook;
dc99f600 2694
aef950b4
WY
2695 if (po->fanout) {
2696 if (dev)
2697 dev_put(dev);
2698
dc99f600 2699 return -EINVAL;
aef950b4 2700 }
1da177e4
LT
2701
2702 lock_sock(sk);
1da177e4 2703 spin_lock(&po->bind_lock);
66e56cd4 2704
902fefb8
DB
2705 proto_curr = po->prot_hook.type;
2706 dev_curr = po->prot_hook.dev;
2707
2708 need_rehook = proto_curr != proto || dev_curr != dev;
2709
2710 if (need_rehook) {
2711 unregister_prot_hook(sk, true);
1da177e4 2712
902fefb8
DB
2713 po->num = proto;
2714 po->prot_hook.type = proto;
1da177e4 2715
902fefb8
DB
2716 if (po->prot_hook.dev)
2717 dev_put(po->prot_hook.dev);
2718
2719 po->prot_hook.dev = dev;
2720
2721 po->ifindex = dev ? dev->ifindex : 0;
2722 packet_cached_dev_assign(po, dev);
2723 }
66e56cd4 2724
902fefb8 2725 if (proto == 0 || !need_rehook)
1da177e4
LT
2726 goto out_unlock;
2727
be85d4ad 2728 if (!dev || (dev->flags & IFF_UP)) {
ce06b03e 2729 register_prot_hook(sk);
be85d4ad
UT
2730 } else {
2731 sk->sk_err = ENETDOWN;
2732 if (!sock_flag(sk, SOCK_DEAD))
2733 sk->sk_error_report(sk);
1da177e4
LT
2734 }
2735
2736out_unlock:
2737 spin_unlock(&po->bind_lock);
2738 release_sock(sk);
2739 return 0;
2740}
2741
2742/*
2743 * Bind a packet socket to a device
2744 */
2745
40d4e3df
ED
2746static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2747 int addr_len)
1da177e4 2748{
40d4e3df 2749 struct sock *sk = sock->sk;
1da177e4
LT
2750 char name[15];
2751 struct net_device *dev;
2752 int err = -ENODEV;
1ce4f28b 2753
1da177e4
LT
2754 /*
2755 * Check legality
2756 */
1ce4f28b 2757
8ae55f04 2758 if (addr_len != sizeof(struct sockaddr))
1da177e4 2759 return -EINVAL;
40d4e3df 2760 strlcpy(name, uaddr->sa_data, sizeof(name));
1da177e4 2761
3b1e0a65 2762 dev = dev_get_by_name(sock_net(sk), name);
160ff18a 2763 if (dev)
1da177e4 2764 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1da177e4
LT
2765 return err;
2766}
1da177e4
LT
2767
2768static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2769{
40d4e3df
ED
2770 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2771 struct sock *sk = sock->sk;
1da177e4
LT
2772 struct net_device *dev = NULL;
2773 int err;
2774
2775
2776 /*
2777 * Check legality
2778 */
1ce4f28b 2779
1da177e4
LT
2780 if (addr_len < sizeof(struct sockaddr_ll))
2781 return -EINVAL;
2782 if (sll->sll_family != AF_PACKET)
2783 return -EINVAL;
2784
2785 if (sll->sll_ifindex) {
2786 err = -ENODEV;
3b1e0a65 2787 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
1da177e4
LT
2788 if (dev == NULL)
2789 goto out;
2790 }
2791 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1da177e4
LT
2792
2793out:
2794 return err;
2795}
2796
2797static struct proto packet_proto = {
2798 .name = "PACKET",
2799 .owner = THIS_MODULE,
2800 .obj_size = sizeof(struct packet_sock),
2801};
2802
2803/*
1ce4f28b 2804 * Create a packet of type SOCK_PACKET.
1da177e4
LT
2805 */
2806
3f378b68
EP
2807static int packet_create(struct net *net, struct socket *sock, int protocol,
2808 int kern)
1da177e4
LT
2809{
2810 struct sock *sk;
2811 struct packet_sock *po;
0e11c91e 2812 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1da177e4
LT
2813 int err;
2814
df008c91 2815 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1da177e4 2816 return -EPERM;
be02097c
DM
2817 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2818 sock->type != SOCK_PACKET)
1da177e4
LT
2819 return -ESOCKTNOSUPPORT;
2820
2821 sock->state = SS_UNCONNECTED;
2822
2823 err = -ENOBUFS;
6257ff21 2824 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
1da177e4
LT
2825 if (sk == NULL)
2826 goto out;
2827
2828 sock->ops = &packet_ops;
1da177e4
LT
2829 if (sock->type == SOCK_PACKET)
2830 sock->ops = &packet_ops_spkt;
be02097c 2831
1da177e4
LT
2832 sock_init_data(sock, sk);
2833
2834 po = pkt_sk(sk);
2835 sk->sk_family = PF_PACKET;
0e11c91e 2836 po->num = proto;
d346a3fa 2837 po->xmit = dev_queue_xmit;
66e56cd4 2838
b0138408
DB
2839 err = packet_alloc_pending(po);
2840 if (err)
2841 goto out2;
2842
66e56cd4 2843 packet_cached_dev_reset(po);
1da177e4
LT
2844
2845 sk->sk_destruct = packet_sock_destruct;
17ab56a2 2846 sk_refcnt_debug_inc(sk);
1da177e4
LT
2847
2848 /*
2849 * Attach a protocol block
2850 */
2851
2852 spin_lock_init(&po->bind_lock);
905db440 2853 mutex_init(&po->pg_vec_lock);
1da177e4 2854 po->prot_hook.func = packet_rcv;
be02097c 2855
1da177e4
LT
2856 if (sock->type == SOCK_PACKET)
2857 po->prot_hook.func = packet_rcv_spkt;
be02097c 2858
1da177e4
LT
2859 po->prot_hook.af_packet_priv = sk;
2860
0e11c91e
AV
2861 if (proto) {
2862 po->prot_hook.type = proto;
ce06b03e 2863 register_prot_hook(sk);
1da177e4
LT
2864 }
2865
0fa7fa98 2866 mutex_lock(&net->packet.sklist_lock);
808f5114 2867 sk_add_node_rcu(sk, &net->packet.sklist);
0fa7fa98
PE
2868 mutex_unlock(&net->packet.sklist_lock);
2869
2870 preempt_disable();
3680453c 2871 sock_prot_inuse_add(net, &packet_proto, 1);
0fa7fa98 2872 preempt_enable();
808f5114 2873
40d4e3df 2874 return 0;
b0138408
DB
2875out2:
2876 sk_free(sk);
1da177e4
LT
2877out:
2878 return err;
2879}
2880
2881/*
2882 * Pull a packet from our receive queue and hand it to the user.
2883 * If necessary we block.
2884 */
2885
2886static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2887 struct msghdr *msg, size_t len, int flags)
2888{
2889 struct sock *sk = sock->sk;
2890 struct sk_buff *skb;
2891 int copied, err;
bfd5f4a3 2892 int vnet_hdr_len = 0;
2472d761 2893 unsigned int origlen = 0;
1da177e4
LT
2894
2895 err = -EINVAL;
ed85b565 2896 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
1da177e4
LT
2897 goto out;
2898
2899#if 0
2900 /* What error should we return now? EUNATTACH? */
2901 if (pkt_sk(sk)->ifindex < 0)
2902 return -ENODEV;
2903#endif
2904
ed85b565 2905 if (flags & MSG_ERRQUEUE) {
cb820f8e
RC
2906 err = sock_recv_errqueue(sk, msg, len,
2907 SOL_PACKET, PACKET_TX_TIMESTAMP);
ed85b565
RC
2908 goto out;
2909 }
2910
1da177e4
LT
2911 /*
2912 * Call the generic datagram receiver. This handles all sorts
2913 * of horrible races and re-entrancy so we can forget about it
2914 * in the protocol layers.
2915 *
2916 * Now it will return ENETDOWN, if device have just gone down,
2917 * but then it will block.
2918 */
2919
40d4e3df 2920 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
1da177e4
LT
2921
2922 /*
1ce4f28b 2923 * An error occurred so return it. Because skb_recv_datagram()
1da177e4
LT
2924 * handles the blocking we don't see and worry about blocking
2925 * retries.
2926 */
2927
8ae55f04 2928 if (skb == NULL)
1da177e4
LT
2929 goto out;
2930
bfd5f4a3
SS
2931 if (pkt_sk(sk)->has_vnet_hdr) {
2932 struct virtio_net_hdr vnet_hdr = { 0 };
2933
2934 err = -EINVAL;
2935 vnet_hdr_len = sizeof(vnet_hdr);
1f18b717 2936 if (len < vnet_hdr_len)
bfd5f4a3
SS
2937 goto out_free;
2938
1f18b717
MK
2939 len -= vnet_hdr_len;
2940
bfd5f4a3
SS
2941 if (skb_is_gso(skb)) {
2942 struct skb_shared_info *sinfo = skb_shinfo(skb);
2943
2944 /* This is a hint as to how much should be linear. */
dc9e5153
MT
2945 vnet_hdr.hdr_len =
2946 __cpu_to_virtio16(false, skb_headlen(skb));
2947 vnet_hdr.gso_size =
2948 __cpu_to_virtio16(false, sinfo->gso_size);
bfd5f4a3
SS
2949 if (sinfo->gso_type & SKB_GSO_TCPV4)
2950 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2951 else if (sinfo->gso_type & SKB_GSO_TCPV6)
2952 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2953 else if (sinfo->gso_type & SKB_GSO_UDP)
2954 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2955 else if (sinfo->gso_type & SKB_GSO_FCOE)
2956 goto out_free;
2957 else
2958 BUG();
2959 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2960 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2961 } else
2962 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2963
2964 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2965 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
dc9e5153
MT
2966 vnet_hdr.csum_start = __cpu_to_virtio16(false,
2967 skb_checksum_start_offset(skb));
2968 vnet_hdr.csum_offset = __cpu_to_virtio16(false,
2969 skb->csum_offset);
10a8d94a
JW
2970 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2971 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
bfd5f4a3
SS
2972 } /* else everything is zero */
2973
7eab8d9e 2974 err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len);
bfd5f4a3
SS
2975 if (err < 0)
2976 goto out_free;
2977 }
2978
f3d33426
HFS
2979 /* You lose any data beyond the buffer you gave. If it worries
2980 * a user program they can ask the device for its MTU
2981 * anyway.
1da177e4 2982 */
1da177e4 2983 copied = skb->len;
40d4e3df
ED
2984 if (copied > len) {
2985 copied = len;
2986 msg->msg_flags |= MSG_TRUNC;
1da177e4
LT
2987 }
2988
51f3d02b 2989 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1da177e4
LT
2990 if (err)
2991 goto out_free;
2992
2472d761
EB
2993 if (sock->type != SOCK_PACKET) {
2994 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
2995
2996 /* Original length was stored in sockaddr_ll fields */
2997 origlen = PACKET_SKB_CB(skb)->sa.origlen;
2998 sll->sll_family = AF_PACKET;
2999 sll->sll_protocol = skb->protocol;
3000 }
3001
3b885787 3002 sock_recv_ts_and_drops(msg, sk, skb);
1da177e4 3003
f3d33426
HFS
3004 if (msg->msg_name) {
3005 /* If the address length field is there to be filled
3006 * in, we fill it in now.
3007 */
3008 if (sock->type == SOCK_PACKET) {
342dfc30 3009 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
f3d33426
HFS
3010 msg->msg_namelen = sizeof(struct sockaddr_pkt);
3011 } else {
3012 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
2472d761 3013
f3d33426
HFS
3014 msg->msg_namelen = sll->sll_halen +
3015 offsetof(struct sockaddr_ll, sll_addr);
3016 }
ffbc6111
HX
3017 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3018 msg->msg_namelen);
f3d33426 3019 }
1da177e4 3020
8dc41944 3021 if (pkt_sk(sk)->auxdata) {
ffbc6111
HX
3022 struct tpacket_auxdata aux;
3023
3024 aux.tp_status = TP_STATUS_USER;
3025 if (skb->ip_summed == CHECKSUM_PARTIAL)
3026 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2472d761 3027 aux.tp_len = origlen;
ffbc6111
HX
3028 aux.tp_snaplen = skb->len;
3029 aux.tp_mac = 0;
bbe735e4 3030 aux.tp_net = skb_network_offset(skb);
df8a39de
JP
3031 if (skb_vlan_tag_present(skb)) {
3032 aux.tp_vlan_tci = skb_vlan_tag_get(skb);
a0cdfcf3
AW
3033 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3034 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
a3bcc23e
BG
3035 } else {
3036 aux.tp_vlan_tci = 0;
a0cdfcf3 3037 aux.tp_vlan_tpid = 0;
a3bcc23e 3038 }
ffbc6111 3039 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
8dc41944
HX
3040 }
3041
1da177e4
LT
3042 /*
3043 * Free or return the buffer as appropriate. Again this
3044 * hides all the races and re-entrancy issues from us.
3045 */
bfd5f4a3 3046 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
1da177e4
LT
3047
3048out_free:
3049 skb_free_datagram(sk, skb);
3050out:
3051 return err;
3052}
3053
1da177e4
LT
3054static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3055 int *uaddr_len, int peer)
3056{
3057 struct net_device *dev;
3058 struct sock *sk = sock->sk;
3059
3060 if (peer)
3061 return -EOPNOTSUPP;
3062
3063 uaddr->sa_family = AF_PACKET;
2dc85bf3 3064 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
654d1f8a
ED
3065 rcu_read_lock();
3066 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3067 if (dev)
2dc85bf3 3068 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
654d1f8a 3069 rcu_read_unlock();
1da177e4
LT
3070 *uaddr_len = sizeof(*uaddr);
3071
3072 return 0;
3073}
1da177e4
LT
3074
3075static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3076 int *uaddr_len, int peer)
3077{
3078 struct net_device *dev;
3079 struct sock *sk = sock->sk;
3080 struct packet_sock *po = pkt_sk(sk);
13cfa97b 3081 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
1da177e4
LT
3082
3083 if (peer)
3084 return -EOPNOTSUPP;
3085
3086 sll->sll_family = AF_PACKET;
3087 sll->sll_ifindex = po->ifindex;
3088 sll->sll_protocol = po->num;
67286640 3089 sll->sll_pkttype = 0;
654d1f8a
ED
3090 rcu_read_lock();
3091 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1da177e4
LT
3092 if (dev) {
3093 sll->sll_hatype = dev->type;
3094 sll->sll_halen = dev->addr_len;
3095 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
3096 } else {
3097 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
3098 sll->sll_halen = 0;
3099 }
654d1f8a 3100 rcu_read_unlock();
0fb375fb 3101 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1da177e4
LT
3102
3103 return 0;
3104}
3105
2aeb0b88
WC
3106static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3107 int what)
1da177e4
LT
3108{
3109 switch (i->type) {
3110 case PACKET_MR_MULTICAST:
1162563f
JP
3111 if (i->alen != dev->addr_len)
3112 return -EINVAL;
1da177e4 3113 if (what > 0)
22bedad3 3114 return dev_mc_add(dev, i->addr);
1da177e4 3115 else
22bedad3 3116 return dev_mc_del(dev, i->addr);
1da177e4
LT
3117 break;
3118 case PACKET_MR_PROMISC:
2aeb0b88 3119 return dev_set_promiscuity(dev, what);
1da177e4 3120 case PACKET_MR_ALLMULTI:
2aeb0b88 3121 return dev_set_allmulti(dev, what);
d95ed927 3122 case PACKET_MR_UNICAST:
1162563f
JP
3123 if (i->alen != dev->addr_len)
3124 return -EINVAL;
d95ed927 3125 if (what > 0)
a748ee24 3126 return dev_uc_add(dev, i->addr);
d95ed927 3127 else
a748ee24 3128 return dev_uc_del(dev, i->addr);
d95ed927 3129 break;
40d4e3df
ED
3130 default:
3131 break;
1da177e4 3132 }
2aeb0b88 3133 return 0;
1da177e4
LT
3134}
3135
3136static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
3137{
40d4e3df 3138 for ( ; i; i = i->next) {
1da177e4
LT
3139 if (i->ifindex == dev->ifindex)
3140 packet_dev_mc(dev, i, what);
3141 }
3142}
3143
0fb375fb 3144static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1da177e4
LT
3145{
3146 struct packet_sock *po = pkt_sk(sk);
3147 struct packet_mclist *ml, *i;
3148 struct net_device *dev;
3149 int err;
3150
3151 rtnl_lock();
3152
3153 err = -ENODEV;
3b1e0a65 3154 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1da177e4
LT
3155 if (!dev)
3156 goto done;
3157
3158 err = -EINVAL;
1162563f 3159 if (mreq->mr_alen > dev->addr_len)
1da177e4
LT
3160 goto done;
3161
3162 err = -ENOBUFS;
8b3a7005 3163 i = kmalloc(sizeof(*i), GFP_KERNEL);
1da177e4
LT
3164 if (i == NULL)
3165 goto done;
3166
3167 err = 0;
3168 for (ml = po->mclist; ml; ml = ml->next) {
3169 if (ml->ifindex == mreq->mr_ifindex &&
3170 ml->type == mreq->mr_type &&
3171 ml->alen == mreq->mr_alen &&
3172 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3173 ml->count++;
3174 /* Free the new element ... */
3175 kfree(i);
3176 goto done;
3177 }
3178 }
3179
3180 i->type = mreq->mr_type;
3181 i->ifindex = mreq->mr_ifindex;
3182 i->alen = mreq->mr_alen;
3183 memcpy(i->addr, mreq->mr_address, i->alen);
3184 i->count = 1;
3185 i->next = po->mclist;
3186 po->mclist = i;
2aeb0b88
WC
3187 err = packet_dev_mc(dev, i, 1);
3188 if (err) {
3189 po->mclist = i->next;
3190 kfree(i);
3191 }
1da177e4
LT
3192
3193done:
3194 rtnl_unlock();
3195 return err;
3196}
3197
0fb375fb 3198static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1da177e4
LT
3199{
3200 struct packet_mclist *ml, **mlp;
3201
3202 rtnl_lock();
3203
3204 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3205 if (ml->ifindex == mreq->mr_ifindex &&
3206 ml->type == mreq->mr_type &&
3207 ml->alen == mreq->mr_alen &&
3208 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3209 if (--ml->count == 0) {
3210 struct net_device *dev;
3211 *mlp = ml->next;
ad959e76
ED
3212 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3213 if (dev)
1da177e4 3214 packet_dev_mc(dev, ml, -1);
1da177e4
LT
3215 kfree(ml);
3216 }
3217 rtnl_unlock();
3218 return 0;
3219 }
3220 }
3221 rtnl_unlock();
3222 return -EADDRNOTAVAIL;
3223}
3224
3225static void packet_flush_mclist(struct sock *sk)
3226{
3227 struct packet_sock *po = pkt_sk(sk);
3228 struct packet_mclist *ml;
3229
3230 if (!po->mclist)
3231 return;
3232
3233 rtnl_lock();
3234 while ((ml = po->mclist) != NULL) {
3235 struct net_device *dev;
3236
3237 po->mclist = ml->next;
ad959e76
ED
3238 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3239 if (dev != NULL)
1da177e4 3240 packet_dev_mc(dev, ml, -1);
1da177e4
LT
3241 kfree(ml);
3242 }
3243 rtnl_unlock();
3244}
1da177e4
LT
3245
3246static int
b7058842 3247packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1da177e4
LT
3248{
3249 struct sock *sk = sock->sk;
8dc41944 3250 struct packet_sock *po = pkt_sk(sk);
1da177e4
LT
3251 int ret;
3252
3253 if (level != SOL_PACKET)
3254 return -ENOPROTOOPT;
3255
69e3c75f 3256 switch (optname) {
1ce4f28b 3257 case PACKET_ADD_MEMBERSHIP:
1da177e4
LT
3258 case PACKET_DROP_MEMBERSHIP:
3259 {
0fb375fb
EB
3260 struct packet_mreq_max mreq;
3261 int len = optlen;
3262 memset(&mreq, 0, sizeof(mreq));
3263 if (len < sizeof(struct packet_mreq))
1da177e4 3264 return -EINVAL;
0fb375fb
EB
3265 if (len > sizeof(mreq))
3266 len = sizeof(mreq);
40d4e3df 3267 if (copy_from_user(&mreq, optval, len))
1da177e4 3268 return -EFAULT;
0fb375fb
EB
3269 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3270 return -EINVAL;
1da177e4
LT
3271 if (optname == PACKET_ADD_MEMBERSHIP)
3272 ret = packet_mc_add(sk, &mreq);
3273 else
3274 ret = packet_mc_drop(sk, &mreq);
3275 return ret;
3276 }
a2efcfa0 3277
1da177e4 3278 case PACKET_RX_RING:
69e3c75f 3279 case PACKET_TX_RING:
1da177e4 3280 {
f6fb8f10 3281 union tpacket_req_u req_u;
3282 int len;
1da177e4 3283
f6fb8f10 3284 switch (po->tp_version) {
3285 case TPACKET_V1:
3286 case TPACKET_V2:
3287 len = sizeof(req_u.req);
3288 break;
3289 case TPACKET_V3:
3290 default:
3291 len = sizeof(req_u.req3);
3292 break;
3293 }
3294 if (optlen < len)
1da177e4 3295 return -EINVAL;
bfd5f4a3
SS
3296 if (pkt_sk(sk)->has_vnet_hdr)
3297 return -EINVAL;
f6fb8f10 3298 if (copy_from_user(&req_u.req, optval, len))
1da177e4 3299 return -EFAULT;
f6fb8f10 3300 return packet_set_ring(sk, &req_u, 0,
3301 optname == PACKET_TX_RING);
1da177e4
LT
3302 }
3303 case PACKET_COPY_THRESH:
3304 {
3305 int val;
3306
40d4e3df 3307 if (optlen != sizeof(val))
1da177e4 3308 return -EINVAL;
40d4e3df 3309 if (copy_from_user(&val, optval, sizeof(val)))
1da177e4
LT
3310 return -EFAULT;
3311
3312 pkt_sk(sk)->copy_thresh = val;
3313 return 0;
3314 }
bbd6ef87
PM
3315 case PACKET_VERSION:
3316 {
3317 int val;
3318
3319 if (optlen != sizeof(val))
3320 return -EINVAL;
69e3c75f 3321 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
bbd6ef87
PM
3322 return -EBUSY;
3323 if (copy_from_user(&val, optval, sizeof(val)))
3324 return -EFAULT;
3325 switch (val) {
3326 case TPACKET_V1:
3327 case TPACKET_V2:
f6fb8f10 3328 case TPACKET_V3:
bbd6ef87
PM
3329 po->tp_version = val;
3330 return 0;
3331 default:
3332 return -EINVAL;
3333 }
3334 }
8913336a
PM
3335 case PACKET_RESERVE:
3336 {
3337 unsigned int val;
3338
3339 if (optlen != sizeof(val))
3340 return -EINVAL;
69e3c75f 3341 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
8913336a
PM
3342 return -EBUSY;
3343 if (copy_from_user(&val, optval, sizeof(val)))
3344 return -EFAULT;
3345 po->tp_reserve = val;
3346 return 0;
3347 }
69e3c75f
JB
3348 case PACKET_LOSS:
3349 {
3350 unsigned int val;
3351
3352 if (optlen != sizeof(val))
3353 return -EINVAL;
3354 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3355 return -EBUSY;
3356 if (copy_from_user(&val, optval, sizeof(val)))
3357 return -EFAULT;
3358 po->tp_loss = !!val;
3359 return 0;
3360 }
8dc41944
HX
3361 case PACKET_AUXDATA:
3362 {
3363 int val;
3364
3365 if (optlen < sizeof(val))
3366 return -EINVAL;
3367 if (copy_from_user(&val, optval, sizeof(val)))
3368 return -EFAULT;
3369
3370 po->auxdata = !!val;
3371 return 0;
3372 }
80feaacb
PWJ
3373 case PACKET_ORIGDEV:
3374 {
3375 int val;
3376
3377 if (optlen < sizeof(val))
3378 return -EINVAL;
3379 if (copy_from_user(&val, optval, sizeof(val)))
3380 return -EFAULT;
3381
3382 po->origdev = !!val;
3383 return 0;
3384 }
bfd5f4a3
SS
3385 case PACKET_VNET_HDR:
3386 {
3387 int val;
3388
3389 if (sock->type != SOCK_RAW)
3390 return -EINVAL;
3391 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3392 return -EBUSY;
3393 if (optlen < sizeof(val))
3394 return -EINVAL;
3395 if (copy_from_user(&val, optval, sizeof(val)))
3396 return -EFAULT;
3397
3398 po->has_vnet_hdr = !!val;
3399 return 0;
3400 }
614f60fa
SM
3401 case PACKET_TIMESTAMP:
3402 {
3403 int val;
3404
3405 if (optlen != sizeof(val))
3406 return -EINVAL;
3407 if (copy_from_user(&val, optval, sizeof(val)))
3408 return -EFAULT;
3409
3410 po->tp_tstamp = val;
3411 return 0;
3412 }
dc99f600
DM
3413 case PACKET_FANOUT:
3414 {
3415 int val;
3416
3417 if (optlen != sizeof(val))
3418 return -EINVAL;
3419 if (copy_from_user(&val, optval, sizeof(val)))
3420 return -EFAULT;
3421
3422 return fanout_add(sk, val & 0xffff, val >> 16);
3423 }
5920cd3a
PC
3424 case PACKET_TX_HAS_OFF:
3425 {
3426 unsigned int val;
3427
3428 if (optlen != sizeof(val))
3429 return -EINVAL;
3430 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3431 return -EBUSY;
3432 if (copy_from_user(&val, optval, sizeof(val)))
3433 return -EFAULT;
3434 po->tp_tx_has_off = !!val;
3435 return 0;
3436 }
d346a3fa
DB
3437 case PACKET_QDISC_BYPASS:
3438 {
3439 int val;
3440
3441 if (optlen != sizeof(val))
3442 return -EINVAL;
3443 if (copy_from_user(&val, optval, sizeof(val)))
3444 return -EFAULT;
3445
3446 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3447 return 0;
3448 }
1da177e4
LT
3449 default:
3450 return -ENOPROTOOPT;
3451 }
3452}
3453
3454static int packet_getsockopt(struct socket *sock, int level, int optname,
3455 char __user *optval, int __user *optlen)
3456{
3457 int len;
c06fff6e 3458 int val, lv = sizeof(val);
1da177e4
LT
3459 struct sock *sk = sock->sk;
3460 struct packet_sock *po = pkt_sk(sk);
c06fff6e 3461 void *data = &val;
ee80fbf3 3462 union tpacket_stats_u st;
1da177e4
LT
3463
3464 if (level != SOL_PACKET)
3465 return -ENOPROTOOPT;
3466
8ae55f04
KK
3467 if (get_user(len, optlen))
3468 return -EFAULT;
1da177e4
LT
3469
3470 if (len < 0)
3471 return -EINVAL;
1ce4f28b 3472
69e3c75f 3473 switch (optname) {
1da177e4 3474 case PACKET_STATISTICS:
1da177e4 3475 spin_lock_bh(&sk->sk_receive_queue.lock);
ee80fbf3
DB
3476 memcpy(&st, &po->stats, sizeof(st));
3477 memset(&po->stats, 0, sizeof(po->stats));
3478 spin_unlock_bh(&sk->sk_receive_queue.lock);
3479
f6fb8f10 3480 if (po->tp_version == TPACKET_V3) {
c06fff6e 3481 lv = sizeof(struct tpacket_stats_v3);
8bcdeaff 3482 st.stats3.tp_packets += st.stats3.tp_drops;
ee80fbf3 3483 data = &st.stats3;
f6fb8f10 3484 } else {
c06fff6e 3485 lv = sizeof(struct tpacket_stats);
8bcdeaff 3486 st.stats1.tp_packets += st.stats1.tp_drops;
ee80fbf3 3487 data = &st.stats1;
f6fb8f10 3488 }
ee80fbf3 3489
8dc41944
HX
3490 break;
3491 case PACKET_AUXDATA:
8dc41944 3492 val = po->auxdata;
80feaacb
PWJ
3493 break;
3494 case PACKET_ORIGDEV:
80feaacb 3495 val = po->origdev;
bfd5f4a3
SS
3496 break;
3497 case PACKET_VNET_HDR:
bfd5f4a3 3498 val = po->has_vnet_hdr;
1da177e4 3499 break;
bbd6ef87 3500 case PACKET_VERSION:
bbd6ef87 3501 val = po->tp_version;
bbd6ef87
PM
3502 break;
3503 case PACKET_HDRLEN:
3504 if (len > sizeof(int))
3505 len = sizeof(int);
3506 if (copy_from_user(&val, optval, len))
3507 return -EFAULT;
3508 switch (val) {
3509 case TPACKET_V1:
3510 val = sizeof(struct tpacket_hdr);
3511 break;
3512 case TPACKET_V2:
3513 val = sizeof(struct tpacket2_hdr);
3514 break;
f6fb8f10 3515 case TPACKET_V3:
3516 val = sizeof(struct tpacket3_hdr);
3517 break;
bbd6ef87
PM
3518 default:
3519 return -EINVAL;
3520 }
bbd6ef87 3521 break;
8913336a 3522 case PACKET_RESERVE:
8913336a 3523 val = po->tp_reserve;
8913336a 3524 break;
69e3c75f 3525 case PACKET_LOSS:
69e3c75f 3526 val = po->tp_loss;
69e3c75f 3527 break;
614f60fa 3528 case PACKET_TIMESTAMP:
614f60fa 3529 val = po->tp_tstamp;
614f60fa 3530 break;
dc99f600 3531 case PACKET_FANOUT:
dc99f600
DM
3532 val = (po->fanout ?
3533 ((u32)po->fanout->id |
77f65ebd
WB
3534 ((u32)po->fanout->type << 16) |
3535 ((u32)po->fanout->flags << 24)) :
dc99f600 3536 0);
dc99f600 3537 break;
5920cd3a
PC
3538 case PACKET_TX_HAS_OFF:
3539 val = po->tp_tx_has_off;
3540 break;
d346a3fa
DB
3541 case PACKET_QDISC_BYPASS:
3542 val = packet_use_direct_xmit(po);
3543 break;
1da177e4
LT
3544 default:
3545 return -ENOPROTOOPT;
3546 }
3547
c06fff6e
ED
3548 if (len > lv)
3549 len = lv;
8ae55f04
KK
3550 if (put_user(len, optlen))
3551 return -EFAULT;
8dc41944
HX
3552 if (copy_to_user(optval, data, len))
3553 return -EFAULT;
8ae55f04 3554 return 0;
1da177e4
LT
3555}
3556
3557
351638e7
JP
3558static int packet_notifier(struct notifier_block *this,
3559 unsigned long msg, void *ptr)
1da177e4
LT
3560{
3561 struct sock *sk;
351638e7 3562 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
c346dca1 3563 struct net *net = dev_net(dev);
1da177e4 3564
808f5114 3565 rcu_read_lock();
b67bfe0d 3566 sk_for_each_rcu(sk, &net->packet.sklist) {
1da177e4
LT
3567 struct packet_sock *po = pkt_sk(sk);
3568
3569 switch (msg) {
3570 case NETDEV_UNREGISTER:
1da177e4
LT
3571 if (po->mclist)
3572 packet_dev_mclist(dev, po->mclist, -1);
a2efcfa0
DM
3573 /* fallthrough */
3574
1da177e4
LT
3575 case NETDEV_DOWN:
3576 if (dev->ifindex == po->ifindex) {
3577 spin_lock(&po->bind_lock);
3578 if (po->running) {
ce06b03e 3579 __unregister_prot_hook(sk, false);
1da177e4
LT
3580 sk->sk_err = ENETDOWN;
3581 if (!sock_flag(sk, SOCK_DEAD))
3582 sk->sk_error_report(sk);
3583 }
3584 if (msg == NETDEV_UNREGISTER) {
66e56cd4 3585 packet_cached_dev_reset(po);
1da177e4 3586 po->ifindex = -1;
160ff18a
BG
3587 if (po->prot_hook.dev)
3588 dev_put(po->prot_hook.dev);
1da177e4
LT
3589 po->prot_hook.dev = NULL;
3590 }
3591 spin_unlock(&po->bind_lock);
3592 }
3593 break;
3594 case NETDEV_UP:
808f5114 3595 if (dev->ifindex == po->ifindex) {
3596 spin_lock(&po->bind_lock);
ce06b03e
DM
3597 if (po->num)
3598 register_prot_hook(sk);
808f5114 3599 spin_unlock(&po->bind_lock);
1da177e4 3600 }
1da177e4
LT
3601 break;
3602 }
3603 }
808f5114 3604 rcu_read_unlock();
1da177e4
LT
3605 return NOTIFY_DONE;
3606}
3607
3608
3609static int packet_ioctl(struct socket *sock, unsigned int cmd,
3610 unsigned long arg)
3611{
3612 struct sock *sk = sock->sk;
3613
69e3c75f 3614 switch (cmd) {
40d4e3df
ED
3615 case SIOCOUTQ:
3616 {
3617 int amount = sk_wmem_alloc_get(sk);
31e6d363 3618
40d4e3df
ED
3619 return put_user(amount, (int __user *)arg);
3620 }
3621 case SIOCINQ:
3622 {
3623 struct sk_buff *skb;
3624 int amount = 0;
3625
3626 spin_lock_bh(&sk->sk_receive_queue.lock);
3627 skb = skb_peek(&sk->sk_receive_queue);
3628 if (skb)
3629 amount = skb->len;
3630 spin_unlock_bh(&sk->sk_receive_queue.lock);
3631 return put_user(amount, (int __user *)arg);
3632 }
3633 case SIOCGSTAMP:
3634 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3635 case SIOCGSTAMPNS:
3636 return sock_get_timestampns(sk, (struct timespec __user *)arg);
1ce4f28b 3637
1da177e4 3638#ifdef CONFIG_INET
40d4e3df
ED
3639 case SIOCADDRT:
3640 case SIOCDELRT:
3641 case SIOCDARP:
3642 case SIOCGARP:
3643 case SIOCSARP:
3644 case SIOCGIFADDR:
3645 case SIOCSIFADDR:
3646 case SIOCGIFBRDADDR:
3647 case SIOCSIFBRDADDR:
3648 case SIOCGIFNETMASK:
3649 case SIOCSIFNETMASK:
3650 case SIOCGIFDSTADDR:
3651 case SIOCSIFDSTADDR:
3652 case SIOCSIFFLAGS:
40d4e3df 3653 return inet_dgram_ops.ioctl(sock, cmd, arg);
1da177e4
LT
3654#endif
3655
40d4e3df
ED
3656 default:
3657 return -ENOIOCTLCMD;
1da177e4
LT
3658 }
3659 return 0;
3660}
3661
40d4e3df 3662static unsigned int packet_poll(struct file *file, struct socket *sock,
1da177e4
LT
3663 poll_table *wait)
3664{
3665 struct sock *sk = sock->sk;
3666 struct packet_sock *po = pkt_sk(sk);
3667 unsigned int mask = datagram_poll(file, sock, wait);
3668
3669 spin_lock_bh(&sk->sk_receive_queue.lock);
69e3c75f 3670 if (po->rx_ring.pg_vec) {
f6fb8f10 3671 if (!packet_previous_rx_frame(po, &po->rx_ring,
3672 TP_STATUS_KERNEL))
1da177e4
LT
3673 mask |= POLLIN | POLLRDNORM;
3674 }
3675 spin_unlock_bh(&sk->sk_receive_queue.lock);
69e3c75f
JB
3676 spin_lock_bh(&sk->sk_write_queue.lock);
3677 if (po->tx_ring.pg_vec) {
3678 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3679 mask |= POLLOUT | POLLWRNORM;
3680 }
3681 spin_unlock_bh(&sk->sk_write_queue.lock);
1da177e4
LT
3682 return mask;
3683}
3684
3685
3686/* Dirty? Well, I still did not learn better way to account
3687 * for user mmaps.
3688 */
3689
3690static void packet_mm_open(struct vm_area_struct *vma)
3691{
3692 struct file *file = vma->vm_file;
40d4e3df 3693 struct socket *sock = file->private_data;
1da177e4 3694 struct sock *sk = sock->sk;
1ce4f28b 3695
1da177e4
LT
3696 if (sk)
3697 atomic_inc(&pkt_sk(sk)->mapped);
3698}
3699
3700static void packet_mm_close(struct vm_area_struct *vma)
3701{
3702 struct file *file = vma->vm_file;
40d4e3df 3703 struct socket *sock = file->private_data;
1da177e4 3704 struct sock *sk = sock->sk;
1ce4f28b 3705
1da177e4
LT
3706 if (sk)
3707 atomic_dec(&pkt_sk(sk)->mapped);
3708}
3709
f0f37e2f 3710static const struct vm_operations_struct packet_mmap_ops = {
40d4e3df
ED
3711 .open = packet_mm_open,
3712 .close = packet_mm_close,
1da177e4
LT
3713};
3714
0e3125c7
NH
3715static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3716 unsigned int len)
1da177e4
LT
3717{
3718 int i;
3719
4ebf0ae2 3720 for (i = 0; i < len; i++) {
0e3125c7 3721 if (likely(pg_vec[i].buffer)) {
c56b4d90 3722 if (is_vmalloc_addr(pg_vec[i].buffer))
0e3125c7
NH
3723 vfree(pg_vec[i].buffer);
3724 else
3725 free_pages((unsigned long)pg_vec[i].buffer,
3726 order);
3727 pg_vec[i].buffer = NULL;
3728 }
1da177e4
LT
3729 }
3730 kfree(pg_vec);
3731}
3732
eea49cc9 3733static char *alloc_one_pg_vec_page(unsigned long order)
4ebf0ae2 3734{
f0d4eb29 3735 char *buffer;
0e3125c7
NH
3736 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3737 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3738
3739 buffer = (char *) __get_free_pages(gfp_flags, order);
0e3125c7
NH
3740 if (buffer)
3741 return buffer;
3742
f0d4eb29 3743 /* __get_free_pages failed, fall back to vmalloc */
bbce5a59 3744 buffer = vzalloc((1 << order) * PAGE_SIZE);
0e3125c7
NH
3745 if (buffer)
3746 return buffer;
3747
f0d4eb29 3748 /* vmalloc failed, lets dig into swap here */
0e3125c7 3749 gfp_flags &= ~__GFP_NORETRY;
f0d4eb29 3750 buffer = (char *) __get_free_pages(gfp_flags, order);
0e3125c7
NH
3751 if (buffer)
3752 return buffer;
3753
f0d4eb29 3754 /* complete and utter failure */
0e3125c7 3755 return NULL;
4ebf0ae2
DM
3756}
3757
0e3125c7 3758static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4ebf0ae2
DM
3759{
3760 unsigned int block_nr = req->tp_block_nr;
0e3125c7 3761 struct pgv *pg_vec;
4ebf0ae2
DM
3762 int i;
3763
0e3125c7 3764 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4ebf0ae2
DM
3765 if (unlikely(!pg_vec))
3766 goto out;
3767
3768 for (i = 0; i < block_nr; i++) {
c56b4d90 3769 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
0e3125c7 3770 if (unlikely(!pg_vec[i].buffer))
4ebf0ae2
DM
3771 goto out_free_pgvec;
3772 }
3773
3774out:
3775 return pg_vec;
3776
3777out_free_pgvec:
3778 free_pg_vec(pg_vec, order, block_nr);
3779 pg_vec = NULL;
3780 goto out;
3781}
1da177e4 3782
f6fb8f10 3783static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
69e3c75f 3784 int closing, int tx_ring)
1da177e4 3785{
0e3125c7 3786 struct pgv *pg_vec = NULL;
1da177e4 3787 struct packet_sock *po = pkt_sk(sk);
0e11c91e 3788 int was_running, order = 0;
69e3c75f
JB
3789 struct packet_ring_buffer *rb;
3790 struct sk_buff_head *rb_queue;
0e11c91e 3791 __be16 num;
f6fb8f10 3792 int err = -EINVAL;
3793 /* Added to avoid minimal code churn */
3794 struct tpacket_req *req = &req_u->req;
3795
3796 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3797 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3798 WARN(1, "Tx-ring is not supported.\n");
3799 goto out;
3800 }
1ce4f28b 3801
69e3c75f
JB
3802 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3803 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
1da177e4 3804
69e3c75f
JB
3805 err = -EBUSY;
3806 if (!closing) {
3807 if (atomic_read(&po->mapped))
3808 goto out;
b0138408 3809 if (packet_read_pending(rb))
69e3c75f
JB
3810 goto out;
3811 }
1da177e4 3812
69e3c75f
JB
3813 if (req->tp_block_nr) {
3814 /* Sanity tests and some calculations */
3815 err = -EBUSY;
3816 if (unlikely(rb->pg_vec))
3817 goto out;
1da177e4 3818
bbd6ef87
PM
3819 switch (po->tp_version) {
3820 case TPACKET_V1:
3821 po->tp_hdrlen = TPACKET_HDRLEN;
3822 break;
3823 case TPACKET_V2:
3824 po->tp_hdrlen = TPACKET2_HDRLEN;
3825 break;
f6fb8f10 3826 case TPACKET_V3:
3827 po->tp_hdrlen = TPACKET3_HDRLEN;
3828 break;
bbd6ef87
PM
3829 }
3830
69e3c75f 3831 err = -EINVAL;
4ebf0ae2 3832 if (unlikely((int)req->tp_block_size <= 0))
69e3c75f 3833 goto out;
4ebf0ae2 3834 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
69e3c75f 3835 goto out;
dc808110
ED
3836 if (po->tp_version >= TPACKET_V3 &&
3837 (int)(req->tp_block_size -
3838 BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
3839 goto out;
8913336a 3840 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
69e3c75f
JB
3841 po->tp_reserve))
3842 goto out;
4ebf0ae2 3843 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
69e3c75f 3844 goto out;
1da177e4 3845
69e3c75f
JB
3846 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3847 if (unlikely(rb->frames_per_block <= 0))
3848 goto out;
3849 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3850 req->tp_frame_nr))
3851 goto out;
1da177e4
LT
3852
3853 err = -ENOMEM;
4ebf0ae2
DM
3854 order = get_order(req->tp_block_size);
3855 pg_vec = alloc_pg_vec(req, order);
3856 if (unlikely(!pg_vec))
1da177e4 3857 goto out;
f6fb8f10 3858 switch (po->tp_version) {
3859 case TPACKET_V3:
3860 /* Transmit path is not supported. We checked
3861 * it above but just being paranoid
3862 */
3863 if (!tx_ring)
3864 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
d7cf0c34 3865 break;
f6fb8f10 3866 default:
3867 break;
3868 }
69e3c75f
JB
3869 }
3870 /* Done */
3871 else {
3872 err = -EINVAL;
4ebf0ae2 3873 if (unlikely(req->tp_frame_nr))
69e3c75f 3874 goto out;
1da177e4
LT
3875 }
3876
3877 lock_sock(sk);
3878
3879 /* Detach socket from network */
3880 spin_lock(&po->bind_lock);
3881 was_running = po->running;
3882 num = po->num;
3883 if (was_running) {
1da177e4 3884 po->num = 0;
ce06b03e 3885 __unregister_prot_hook(sk, false);
1da177e4
LT
3886 }
3887 spin_unlock(&po->bind_lock);
1ce4f28b 3888
1da177e4
LT
3889 synchronize_net();
3890
3891 err = -EBUSY;
905db440 3892 mutex_lock(&po->pg_vec_lock);
1da177e4
LT
3893 if (closing || atomic_read(&po->mapped) == 0) {
3894 err = 0;
69e3c75f 3895 spin_lock_bh(&rb_queue->lock);
c053fd96 3896 swap(rb->pg_vec, pg_vec);
69e3c75f
JB
3897 rb->frame_max = (req->tp_frame_nr - 1);
3898 rb->head = 0;
3899 rb->frame_size = req->tp_frame_size;
3900 spin_unlock_bh(&rb_queue->lock);
3901
c053fd96
CG
3902 swap(rb->pg_vec_order, order);
3903 swap(rb->pg_vec_len, req->tp_block_nr);
69e3c75f
JB
3904
3905 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3906 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3907 tpacket_rcv : packet_rcv;
3908 skb_queue_purge(rb_queue);
1da177e4 3909 if (atomic_read(&po->mapped))
40d4e3df
ED
3910 pr_err("packet_mmap: vma is busy: %d\n",
3911 atomic_read(&po->mapped));
1da177e4 3912 }
905db440 3913 mutex_unlock(&po->pg_vec_lock);
1da177e4
LT
3914
3915 spin_lock(&po->bind_lock);
ce06b03e 3916 if (was_running) {
1da177e4 3917 po->num = num;
ce06b03e 3918 register_prot_hook(sk);
1da177e4
LT
3919 }
3920 spin_unlock(&po->bind_lock);
f6fb8f10 3921 if (closing && (po->tp_version > TPACKET_V2)) {
3922 /* Because we don't support block-based V3 on tx-ring */
3923 if (!tx_ring)
3924 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3925 }
1da177e4
LT
3926 release_sock(sk);
3927
1da177e4
LT
3928 if (pg_vec)
3929 free_pg_vec(pg_vec, order, req->tp_block_nr);
3930out:
3931 return err;
3932}
3933
69e3c75f
JB
3934static int packet_mmap(struct file *file, struct socket *sock,
3935 struct vm_area_struct *vma)
1da177e4
LT
3936{
3937 struct sock *sk = sock->sk;
3938 struct packet_sock *po = pkt_sk(sk);
69e3c75f
JB
3939 unsigned long size, expected_size;
3940 struct packet_ring_buffer *rb;
1da177e4
LT
3941 unsigned long start;
3942 int err = -EINVAL;
3943 int i;
3944
3945 if (vma->vm_pgoff)
3946 return -EINVAL;
3947
905db440 3948 mutex_lock(&po->pg_vec_lock);
69e3c75f
JB
3949
3950 expected_size = 0;
3951 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3952 if (rb->pg_vec) {
3953 expected_size += rb->pg_vec_len
3954 * rb->pg_vec_pages
3955 * PAGE_SIZE;
3956 }
3957 }
3958
3959 if (expected_size == 0)
1da177e4 3960 goto out;
69e3c75f
JB
3961
3962 size = vma->vm_end - vma->vm_start;
3963 if (size != expected_size)
1da177e4
LT
3964 goto out;
3965
1da177e4 3966 start = vma->vm_start;
69e3c75f
JB
3967 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3968 if (rb->pg_vec == NULL)
3969 continue;
3970
3971 for (i = 0; i < rb->pg_vec_len; i++) {
0e3125c7
NH
3972 struct page *page;
3973 void *kaddr = rb->pg_vec[i].buffer;
69e3c75f
JB
3974 int pg_num;
3975
c56b4d90
CG
3976 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3977 page = pgv_to_page(kaddr);
69e3c75f
JB
3978 err = vm_insert_page(vma, start, page);
3979 if (unlikely(err))
3980 goto out;
3981 start += PAGE_SIZE;
0e3125c7 3982 kaddr += PAGE_SIZE;
69e3c75f 3983 }
4ebf0ae2 3984 }
1da177e4 3985 }
69e3c75f 3986
4ebf0ae2 3987 atomic_inc(&po->mapped);
1da177e4
LT
3988 vma->vm_ops = &packet_mmap_ops;
3989 err = 0;
3990
3991out:
905db440 3992 mutex_unlock(&po->pg_vec_lock);
1da177e4
LT
3993 return err;
3994}
1da177e4 3995
90ddc4f0 3996static const struct proto_ops packet_ops_spkt = {
1da177e4
LT
3997 .family = PF_PACKET,
3998 .owner = THIS_MODULE,
3999 .release = packet_release,
4000 .bind = packet_bind_spkt,
4001 .connect = sock_no_connect,
4002 .socketpair = sock_no_socketpair,
4003 .accept = sock_no_accept,
4004 .getname = packet_getname_spkt,
4005 .poll = datagram_poll,
4006 .ioctl = packet_ioctl,
4007 .listen = sock_no_listen,
4008 .shutdown = sock_no_shutdown,
4009 .setsockopt = sock_no_setsockopt,
4010 .getsockopt = sock_no_getsockopt,
4011 .sendmsg = packet_sendmsg_spkt,
4012 .recvmsg = packet_recvmsg,
4013 .mmap = sock_no_mmap,
4014 .sendpage = sock_no_sendpage,
4015};
1da177e4 4016
90ddc4f0 4017static const struct proto_ops packet_ops = {
1da177e4
LT
4018 .family = PF_PACKET,
4019 .owner = THIS_MODULE,
4020 .release = packet_release,
4021 .bind = packet_bind,
4022 .connect = sock_no_connect,
4023 .socketpair = sock_no_socketpair,
4024 .accept = sock_no_accept,
1ce4f28b 4025 .getname = packet_getname,
1da177e4
LT
4026 .poll = packet_poll,
4027 .ioctl = packet_ioctl,
4028 .listen = sock_no_listen,
4029 .shutdown = sock_no_shutdown,
4030 .setsockopt = packet_setsockopt,
4031 .getsockopt = packet_getsockopt,
4032 .sendmsg = packet_sendmsg,
4033 .recvmsg = packet_recvmsg,
4034 .mmap = packet_mmap,
4035 .sendpage = sock_no_sendpage,
4036};
4037
ec1b4cf7 4038static const struct net_proto_family packet_family_ops = {
1da177e4
LT
4039 .family = PF_PACKET,
4040 .create = packet_create,
4041 .owner = THIS_MODULE,
4042};
4043
4044static struct notifier_block packet_netdev_notifier = {
40d4e3df 4045 .notifier_call = packet_notifier,
1da177e4
LT
4046};
4047
4048#ifdef CONFIG_PROC_FS
1da177e4
LT
4049
4050static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
808f5114 4051 __acquires(RCU)
1da177e4 4052{
e372c414 4053 struct net *net = seq_file_net(seq);
808f5114 4054
4055 rcu_read_lock();
4056 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
1da177e4
LT
4057}
4058
4059static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4060{
1bf40954 4061 struct net *net = seq_file_net(seq);
808f5114 4062 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
1da177e4
LT
4063}
4064
4065static void packet_seq_stop(struct seq_file *seq, void *v)
808f5114 4066 __releases(RCU)
1da177e4 4067{
808f5114 4068 rcu_read_unlock();
1da177e4
LT
4069}
4070
1ce4f28b 4071static int packet_seq_show(struct seq_file *seq, void *v)
1da177e4
LT
4072{
4073 if (v == SEQ_START_TOKEN)
4074 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
4075 else {
b7ceabd9 4076 struct sock *s = sk_entry(v);
1da177e4
LT
4077 const struct packet_sock *po = pkt_sk(s);
4078
4079 seq_printf(seq,
71338aa7 4080 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
1da177e4
LT
4081 s,
4082 atomic_read(&s->sk_refcnt),
4083 s->sk_type,
4084 ntohs(po->num),
4085 po->ifindex,
4086 po->running,
4087 atomic_read(&s->sk_rmem_alloc),
a7cb5a49 4088 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
40d4e3df 4089 sock_i_ino(s));
1da177e4
LT
4090 }
4091
4092 return 0;
4093}
4094
56b3d975 4095static const struct seq_operations packet_seq_ops = {
1da177e4
LT
4096 .start = packet_seq_start,
4097 .next = packet_seq_next,
4098 .stop = packet_seq_stop,
4099 .show = packet_seq_show,
4100};
4101
4102static int packet_seq_open(struct inode *inode, struct file *file)
4103{
e372c414
DL
4104 return seq_open_net(inode, file, &packet_seq_ops,
4105 sizeof(struct seq_net_private));
1da177e4
LT
4106}
4107
da7071d7 4108static const struct file_operations packet_seq_fops = {
1da177e4
LT
4109 .owner = THIS_MODULE,
4110 .open = packet_seq_open,
4111 .read = seq_read,
4112 .llseek = seq_lseek,
e372c414 4113 .release = seq_release_net,
1da177e4
LT
4114};
4115
4116#endif
4117
2c8c1e72 4118static int __net_init packet_net_init(struct net *net)
d12d01d6 4119{
0fa7fa98 4120 mutex_init(&net->packet.sklist_lock);
2aaef4e4 4121 INIT_HLIST_HEAD(&net->packet.sklist);
d12d01d6 4122
d4beaa66 4123 if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
d12d01d6
DL
4124 return -ENOMEM;
4125
4126 return 0;
4127}
4128
2c8c1e72 4129static void __net_exit packet_net_exit(struct net *net)
d12d01d6 4130{
ece31ffd 4131 remove_proc_entry("packet", net->proc_net);
d12d01d6
DL
4132}
4133
4134static struct pernet_operations packet_net_ops = {
4135 .init = packet_net_init,
4136 .exit = packet_net_exit,
4137};
4138
4139
1da177e4
LT
4140static void __exit packet_exit(void)
4141{
1da177e4 4142 unregister_netdevice_notifier(&packet_netdev_notifier);
d12d01d6 4143 unregister_pernet_subsys(&packet_net_ops);
1da177e4
LT
4144 sock_unregister(PF_PACKET);
4145 proto_unregister(&packet_proto);
4146}
4147
4148static int __init packet_init(void)
4149{
4150 int rc = proto_register(&packet_proto, 0);
4151
4152 if (rc != 0)
4153 goto out;
4154
4155 sock_register(&packet_family_ops);
d12d01d6 4156 register_pernet_subsys(&packet_net_ops);
1da177e4 4157 register_netdevice_notifier(&packet_netdev_notifier);
1da177e4
LT
4158out:
4159 return rc;
4160}
4161
4162module_init(packet_init);
4163module_exit(packet_exit);
4164MODULE_LICENSE("GPL");
4165MODULE_ALIAS_NETPROTO(PF_PACKET);