]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/packet/af_packet.c
d2238b204691b8e4f2e3acb9bc167b553ba32d50
[mirror_ubuntu-zesty-kernel.git] / net / packet / af_packet.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
12 * Fixes:
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
41 * and packet_mreq.
42 * Johann Baudy : Added TX RING.
43 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
47 *
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
52 *
53 */
54
55 #include <linux/types.h>
56 #include <linux/mm.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
70 #include <net/ip.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
73 #include <net/sock.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <asm/uaccess.h>
77 #include <asm/ioctls.h>
78 #include <asm/page.h>
79 #include <asm/cacheflush.h>
80 #include <asm/io.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/poll.h>
84 #include <linux/module.h>
85 #include <linux/init.h>
86 #include <linux/mutex.h>
87 #include <linux/if_vlan.h>
88 #include <linux/virtio_net.h>
89 #include <linux/errqueue.h>
90 #include <linux/net_tstamp.h>
91 #include <linux/percpu.h>
92 #ifdef CONFIG_INET
93 #include <net/inet_common.h>
94 #endif
95 #include <linux/bpf.h>
96 #include <net/compat.h>
97
98 #include "internal.h"
99
100 /*
101 Assumptions:
102 - if device has no dev->hard_header routine, it adds and removes ll header
103 inside itself. In this case ll header is invisible outside of device,
104 but higher levels still should reserve dev->hard_header_len.
105 Some devices are enough clever to reallocate skb, when header
106 will not fit to reserved space (tunnel), another ones are silly
107 (PPP).
108 - packet socket receives packets with pulled ll header,
109 so that SOCK_RAW should push it back.
110
111 On receive:
112 -----------
113
114 Incoming, dev->hard_header!=NULL
115 mac_header -> ll header
116 data -> data
117
118 Outgoing, dev->hard_header!=NULL
119 mac_header -> ll header
120 data -> ll header
121
122 Incoming, dev->hard_header==NULL
123 mac_header -> UNKNOWN position. It is very likely, that it points to ll
124 header. PPP makes it, that is wrong, because introduce
125 assymetry between rx and tx paths.
126 data -> data
127
128 Outgoing, dev->hard_header==NULL
129 mac_header -> data. ll header is still not built!
130 data -> data
131
132 Resume
133 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
134
135
136 On transmit:
137 ------------
138
139 dev->hard_header != NULL
140 mac_header -> ll header
141 data -> ll header
142
143 dev->hard_header == NULL (ll header is added by device, we cannot control it)
144 mac_header -> data
145 data -> data
146
147 We should set nh.raw on output to correct posistion,
148 packet classifier depends on it.
149 */
150
151 /* Private packet socket structures. */
152
153 /* identical to struct packet_mreq except it has
154 * a longer address field.
155 */
156 struct packet_mreq_max {
157 int mr_ifindex;
158 unsigned short mr_type;
159 unsigned short mr_alen;
160 unsigned char mr_address[MAX_ADDR_LEN];
161 };
162
163 union tpacket_uhdr {
164 struct tpacket_hdr *h1;
165 struct tpacket2_hdr *h2;
166 struct tpacket3_hdr *h3;
167 void *raw;
168 };
169
170 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
171 int closing, int tx_ring);
172
173 #define V3_ALIGNMENT (8)
174
175 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
176
177 #define BLK_PLUS_PRIV(sz_of_priv) \
178 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
179
180 #define PGV_FROM_VMALLOC 1
181
182 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
183 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
184 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
185 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
186 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
187 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
188 #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
189
190 struct packet_sock;
191 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
192 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
193 struct packet_type *pt, struct net_device *orig_dev);
194
195 static void *packet_previous_frame(struct packet_sock *po,
196 struct packet_ring_buffer *rb,
197 int status);
198 static void packet_increment_head(struct packet_ring_buffer *buff);
199 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
200 struct tpacket_block_desc *);
201 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
202 struct packet_sock *);
203 static void prb_retire_current_block(struct tpacket_kbdq_core *,
204 struct packet_sock *, unsigned int status);
205 static int prb_queue_frozen(struct tpacket_kbdq_core *);
206 static void prb_open_block(struct tpacket_kbdq_core *,
207 struct tpacket_block_desc *);
208 static void prb_retire_rx_blk_timer_expired(unsigned long);
209 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
210 static void prb_init_blk_timer(struct packet_sock *,
211 struct tpacket_kbdq_core *,
212 void (*func) (unsigned long));
213 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
214 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
215 struct tpacket3_hdr *);
216 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
217 struct tpacket3_hdr *);
218 static void packet_flush_mclist(struct sock *sk);
219
220 struct packet_skb_cb {
221 union {
222 struct sockaddr_pkt pkt;
223 union {
224 /* Trick: alias skb original length with
225 * ll.sll_family and ll.protocol in order
226 * to save room.
227 */
228 unsigned int origlen;
229 struct sockaddr_ll ll;
230 };
231 } sa;
232 };
233
234 #define vio_le() virtio_legacy_is_little_endian()
235
236 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
237
238 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
239 #define GET_PBLOCK_DESC(x, bid) \
240 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
241 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
242 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
243 #define GET_NEXT_PRB_BLK_NUM(x) \
244 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
245 ((x)->kactive_blk_num+1) : 0)
246
247 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
248 static void __fanout_link(struct sock *sk, struct packet_sock *po);
249
250 static int packet_direct_xmit(struct sk_buff *skb)
251 {
252 struct net_device *dev = skb->dev;
253 struct sk_buff *orig_skb = skb;
254 struct netdev_queue *txq;
255 int ret = NETDEV_TX_BUSY;
256
257 if (unlikely(!netif_running(dev) ||
258 !netif_carrier_ok(dev)))
259 goto drop;
260
261 skb = validate_xmit_skb_list(skb, dev);
262 if (skb != orig_skb)
263 goto drop;
264
265 txq = skb_get_tx_queue(dev, skb);
266
267 local_bh_disable();
268
269 HARD_TX_LOCK(dev, txq, smp_processor_id());
270 if (!netif_xmit_frozen_or_drv_stopped(txq))
271 ret = netdev_start_xmit(skb, dev, txq, false);
272 HARD_TX_UNLOCK(dev, txq);
273
274 local_bh_enable();
275
276 if (!dev_xmit_complete(ret))
277 kfree_skb(skb);
278
279 return ret;
280 drop:
281 atomic_long_inc(&dev->tx_dropped);
282 kfree_skb_list(skb);
283 return NET_XMIT_DROP;
284 }
285
286 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
287 {
288 struct net_device *dev;
289
290 rcu_read_lock();
291 dev = rcu_dereference(po->cached_dev);
292 if (likely(dev))
293 dev_hold(dev);
294 rcu_read_unlock();
295
296 return dev;
297 }
298
299 static void packet_cached_dev_assign(struct packet_sock *po,
300 struct net_device *dev)
301 {
302 rcu_assign_pointer(po->cached_dev, dev);
303 }
304
305 static void packet_cached_dev_reset(struct packet_sock *po)
306 {
307 RCU_INIT_POINTER(po->cached_dev, NULL);
308 }
309
310 static bool packet_use_direct_xmit(const struct packet_sock *po)
311 {
312 return po->xmit == packet_direct_xmit;
313 }
314
315 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
316 {
317 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
318 }
319
320 static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
321 {
322 const struct net_device_ops *ops = dev->netdev_ops;
323 u16 queue_index;
324
325 if (ops->ndo_select_queue) {
326 queue_index = ops->ndo_select_queue(dev, skb, NULL,
327 __packet_pick_tx_queue);
328 queue_index = netdev_cap_txqueue(dev, queue_index);
329 } else {
330 queue_index = __packet_pick_tx_queue(dev, skb);
331 }
332
333 skb_set_queue_mapping(skb, queue_index);
334 }
335
336 /* register_prot_hook must be invoked with the po->bind_lock held,
337 * or from a context in which asynchronous accesses to the packet
338 * socket is not possible (packet_create()).
339 */
340 static void register_prot_hook(struct sock *sk)
341 {
342 struct packet_sock *po = pkt_sk(sk);
343
344 if (!po->running) {
345 if (po->fanout)
346 __fanout_link(sk, po);
347 else
348 dev_add_pack(&po->prot_hook);
349
350 sock_hold(sk);
351 po->running = 1;
352 }
353 }
354
355 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
356 * held. If the sync parameter is true, we will temporarily drop
357 * the po->bind_lock and do a synchronize_net to make sure no
358 * asynchronous packet processing paths still refer to the elements
359 * of po->prot_hook. If the sync parameter is false, it is the
360 * callers responsibility to take care of this.
361 */
362 static void __unregister_prot_hook(struct sock *sk, bool sync)
363 {
364 struct packet_sock *po = pkt_sk(sk);
365
366 po->running = 0;
367
368 if (po->fanout)
369 __fanout_unlink(sk, po);
370 else
371 __dev_remove_pack(&po->prot_hook);
372
373 __sock_put(sk);
374
375 if (sync) {
376 spin_unlock(&po->bind_lock);
377 synchronize_net();
378 spin_lock(&po->bind_lock);
379 }
380 }
381
382 static void unregister_prot_hook(struct sock *sk, bool sync)
383 {
384 struct packet_sock *po = pkt_sk(sk);
385
386 if (po->running)
387 __unregister_prot_hook(sk, sync);
388 }
389
390 static inline struct page * __pure pgv_to_page(void *addr)
391 {
392 if (is_vmalloc_addr(addr))
393 return vmalloc_to_page(addr);
394 return virt_to_page(addr);
395 }
396
397 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
398 {
399 union tpacket_uhdr h;
400
401 h.raw = frame;
402 switch (po->tp_version) {
403 case TPACKET_V1:
404 h.h1->tp_status = status;
405 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
406 break;
407 case TPACKET_V2:
408 h.h2->tp_status = status;
409 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
410 break;
411 case TPACKET_V3:
412 default:
413 WARN(1, "TPACKET version not supported.\n");
414 BUG();
415 }
416
417 smp_wmb();
418 }
419
420 static int __packet_get_status(struct packet_sock *po, void *frame)
421 {
422 union tpacket_uhdr h;
423
424 smp_rmb();
425
426 h.raw = frame;
427 switch (po->tp_version) {
428 case TPACKET_V1:
429 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
430 return h.h1->tp_status;
431 case TPACKET_V2:
432 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
433 return h.h2->tp_status;
434 case TPACKET_V3:
435 default:
436 WARN(1, "TPACKET version not supported.\n");
437 BUG();
438 return 0;
439 }
440 }
441
442 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
443 unsigned int flags)
444 {
445 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
446
447 if (shhwtstamps &&
448 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
449 ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
450 return TP_STATUS_TS_RAW_HARDWARE;
451
452 if (ktime_to_timespec_cond(skb->tstamp, ts))
453 return TP_STATUS_TS_SOFTWARE;
454
455 return 0;
456 }
457
458 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
459 struct sk_buff *skb)
460 {
461 union tpacket_uhdr h;
462 struct timespec ts;
463 __u32 ts_status;
464
465 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
466 return 0;
467
468 h.raw = frame;
469 switch (po->tp_version) {
470 case TPACKET_V1:
471 h.h1->tp_sec = ts.tv_sec;
472 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
473 break;
474 case TPACKET_V2:
475 h.h2->tp_sec = ts.tv_sec;
476 h.h2->tp_nsec = ts.tv_nsec;
477 break;
478 case TPACKET_V3:
479 default:
480 WARN(1, "TPACKET version not supported.\n");
481 BUG();
482 }
483
484 /* one flush is safe, as both fields always lie on the same cacheline */
485 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
486 smp_wmb();
487
488 return ts_status;
489 }
490
491 static void *packet_lookup_frame(struct packet_sock *po,
492 struct packet_ring_buffer *rb,
493 unsigned int position,
494 int status)
495 {
496 unsigned int pg_vec_pos, frame_offset;
497 union tpacket_uhdr h;
498
499 pg_vec_pos = position / rb->frames_per_block;
500 frame_offset = position % rb->frames_per_block;
501
502 h.raw = rb->pg_vec[pg_vec_pos].buffer +
503 (frame_offset * rb->frame_size);
504
505 if (status != __packet_get_status(po, h.raw))
506 return NULL;
507
508 return h.raw;
509 }
510
511 static void *packet_current_frame(struct packet_sock *po,
512 struct packet_ring_buffer *rb,
513 int status)
514 {
515 return packet_lookup_frame(po, rb, rb->head, status);
516 }
517
518 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
519 {
520 del_timer_sync(&pkc->retire_blk_timer);
521 }
522
523 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
524 struct sk_buff_head *rb_queue)
525 {
526 struct tpacket_kbdq_core *pkc;
527
528 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
529
530 spin_lock_bh(&rb_queue->lock);
531 pkc->delete_blk_timer = 1;
532 spin_unlock_bh(&rb_queue->lock);
533
534 prb_del_retire_blk_timer(pkc);
535 }
536
537 static void prb_init_blk_timer(struct packet_sock *po,
538 struct tpacket_kbdq_core *pkc,
539 void (*func) (unsigned long))
540 {
541 init_timer(&pkc->retire_blk_timer);
542 pkc->retire_blk_timer.data = (long)po;
543 pkc->retire_blk_timer.function = func;
544 pkc->retire_blk_timer.expires = jiffies;
545 }
546
547 static void prb_setup_retire_blk_timer(struct packet_sock *po)
548 {
549 struct tpacket_kbdq_core *pkc;
550
551 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
552 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
553 }
554
555 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
556 int blk_size_in_bytes)
557 {
558 struct net_device *dev;
559 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
560 struct ethtool_link_ksettings ecmd;
561 int err;
562
563 rtnl_lock();
564 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
565 if (unlikely(!dev)) {
566 rtnl_unlock();
567 return DEFAULT_PRB_RETIRE_TOV;
568 }
569 err = __ethtool_get_link_ksettings(dev, &ecmd);
570 rtnl_unlock();
571 if (!err) {
572 /*
573 * If the link speed is so slow you don't really
574 * need to worry about perf anyways
575 */
576 if (ecmd.base.speed < SPEED_1000 ||
577 ecmd.base.speed == SPEED_UNKNOWN) {
578 return DEFAULT_PRB_RETIRE_TOV;
579 } else {
580 msec = 1;
581 div = ecmd.base.speed / 1000;
582 }
583 }
584
585 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
586
587 if (div)
588 mbits /= div;
589
590 tmo = mbits * msec;
591
592 if (div)
593 return tmo+1;
594 return tmo;
595 }
596
597 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
598 union tpacket_req_u *req_u)
599 {
600 p1->feature_req_word = req_u->req3.tp_feature_req_word;
601 }
602
603 static void init_prb_bdqc(struct packet_sock *po,
604 struct packet_ring_buffer *rb,
605 struct pgv *pg_vec,
606 union tpacket_req_u *req_u)
607 {
608 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
609 struct tpacket_block_desc *pbd;
610
611 memset(p1, 0x0, sizeof(*p1));
612
613 p1->knxt_seq_num = 1;
614 p1->pkbdq = pg_vec;
615 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
616 p1->pkblk_start = pg_vec[0].buffer;
617 p1->kblk_size = req_u->req3.tp_block_size;
618 p1->knum_blocks = req_u->req3.tp_block_nr;
619 p1->hdrlen = po->tp_hdrlen;
620 p1->version = po->tp_version;
621 p1->last_kactive_blk_num = 0;
622 po->stats.stats3.tp_freeze_q_cnt = 0;
623 if (req_u->req3.tp_retire_blk_tov)
624 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
625 else
626 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
627 req_u->req3.tp_block_size);
628 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
629 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
630
631 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
632 prb_init_ft_ops(p1, req_u);
633 prb_setup_retire_blk_timer(po);
634 prb_open_block(p1, pbd);
635 }
636
637 /* Do NOT update the last_blk_num first.
638 * Assumes sk_buff_head lock is held.
639 */
640 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
641 {
642 mod_timer(&pkc->retire_blk_timer,
643 jiffies + pkc->tov_in_jiffies);
644 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
645 }
646
647 /*
648 * Timer logic:
649 * 1) We refresh the timer only when we open a block.
650 * By doing this we don't waste cycles refreshing the timer
651 * on packet-by-packet basis.
652 *
653 * With a 1MB block-size, on a 1Gbps line, it will take
654 * i) ~8 ms to fill a block + ii) memcpy etc.
655 * In this cut we are not accounting for the memcpy time.
656 *
657 * So, if the user sets the 'tmo' to 10ms then the timer
658 * will never fire while the block is still getting filled
659 * (which is what we want). However, the user could choose
660 * to close a block early and that's fine.
661 *
662 * But when the timer does fire, we check whether or not to refresh it.
663 * Since the tmo granularity is in msecs, it is not too expensive
664 * to refresh the timer, lets say every '8' msecs.
665 * Either the user can set the 'tmo' or we can derive it based on
666 * a) line-speed and b) block-size.
667 * prb_calc_retire_blk_tmo() calculates the tmo.
668 *
669 */
670 static void prb_retire_rx_blk_timer_expired(unsigned long data)
671 {
672 struct packet_sock *po = (struct packet_sock *)data;
673 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
674 unsigned int frozen;
675 struct tpacket_block_desc *pbd;
676
677 spin_lock(&po->sk.sk_receive_queue.lock);
678
679 frozen = prb_queue_frozen(pkc);
680 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
681
682 if (unlikely(pkc->delete_blk_timer))
683 goto out;
684
685 /* We only need to plug the race when the block is partially filled.
686 * tpacket_rcv:
687 * lock(); increment BLOCK_NUM_PKTS; unlock()
688 * copy_bits() is in progress ...
689 * timer fires on other cpu:
690 * we can't retire the current block because copy_bits
691 * is in progress.
692 *
693 */
694 if (BLOCK_NUM_PKTS(pbd)) {
695 while (atomic_read(&pkc->blk_fill_in_prog)) {
696 /* Waiting for skb_copy_bits to finish... */
697 cpu_relax();
698 }
699 }
700
701 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
702 if (!frozen) {
703 if (!BLOCK_NUM_PKTS(pbd)) {
704 /* An empty block. Just refresh the timer. */
705 goto refresh_timer;
706 }
707 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
708 if (!prb_dispatch_next_block(pkc, po))
709 goto refresh_timer;
710 else
711 goto out;
712 } else {
713 /* Case 1. Queue was frozen because user-space was
714 * lagging behind.
715 */
716 if (prb_curr_blk_in_use(pkc, pbd)) {
717 /*
718 * Ok, user-space is still behind.
719 * So just refresh the timer.
720 */
721 goto refresh_timer;
722 } else {
723 /* Case 2. queue was frozen,user-space caught up,
724 * now the link went idle && the timer fired.
725 * We don't have a block to close.So we open this
726 * block and restart the timer.
727 * opening a block thaws the queue,restarts timer
728 * Thawing/timer-refresh is a side effect.
729 */
730 prb_open_block(pkc, pbd);
731 goto out;
732 }
733 }
734 }
735
736 refresh_timer:
737 _prb_refresh_rx_retire_blk_timer(pkc);
738
739 out:
740 spin_unlock(&po->sk.sk_receive_queue.lock);
741 }
742
743 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
744 struct tpacket_block_desc *pbd1, __u32 status)
745 {
746 /* Flush everything minus the block header */
747
748 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
749 u8 *start, *end;
750
751 start = (u8 *)pbd1;
752
753 /* Skip the block header(we know header WILL fit in 4K) */
754 start += PAGE_SIZE;
755
756 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
757 for (; start < end; start += PAGE_SIZE)
758 flush_dcache_page(pgv_to_page(start));
759
760 smp_wmb();
761 #endif
762
763 /* Now update the block status. */
764
765 BLOCK_STATUS(pbd1) = status;
766
767 /* Flush the block header */
768
769 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
770 start = (u8 *)pbd1;
771 flush_dcache_page(pgv_to_page(start));
772
773 smp_wmb();
774 #endif
775 }
776
777 /*
778 * Side effect:
779 *
780 * 1) flush the block
781 * 2) Increment active_blk_num
782 *
783 * Note:We DONT refresh the timer on purpose.
784 * Because almost always the next block will be opened.
785 */
786 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
787 struct tpacket_block_desc *pbd1,
788 struct packet_sock *po, unsigned int stat)
789 {
790 __u32 status = TP_STATUS_USER | stat;
791
792 struct tpacket3_hdr *last_pkt;
793 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
794 struct sock *sk = &po->sk;
795
796 if (po->stats.stats3.tp_drops)
797 status |= TP_STATUS_LOSING;
798
799 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
800 last_pkt->tp_next_offset = 0;
801
802 /* Get the ts of the last pkt */
803 if (BLOCK_NUM_PKTS(pbd1)) {
804 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
805 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
806 } else {
807 /* Ok, we tmo'd - so get the current time.
808 *
809 * It shouldn't really happen as we don't close empty
810 * blocks. See prb_retire_rx_blk_timer_expired().
811 */
812 struct timespec ts;
813 getnstimeofday(&ts);
814 h1->ts_last_pkt.ts_sec = ts.tv_sec;
815 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
816 }
817
818 smp_wmb();
819
820 /* Flush the block */
821 prb_flush_block(pkc1, pbd1, status);
822
823 sk->sk_data_ready(sk);
824
825 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
826 }
827
828 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
829 {
830 pkc->reset_pending_on_curr_blk = 0;
831 }
832
833 /*
834 * Side effect of opening a block:
835 *
836 * 1) prb_queue is thawed.
837 * 2) retire_blk_timer is refreshed.
838 *
839 */
840 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
841 struct tpacket_block_desc *pbd1)
842 {
843 struct timespec ts;
844 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
845
846 smp_rmb();
847
848 /* We could have just memset this but we will lose the
849 * flexibility of making the priv area sticky
850 */
851
852 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
853 BLOCK_NUM_PKTS(pbd1) = 0;
854 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
855
856 getnstimeofday(&ts);
857
858 h1->ts_first_pkt.ts_sec = ts.tv_sec;
859 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
860
861 pkc1->pkblk_start = (char *)pbd1;
862 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
863
864 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
865 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
866
867 pbd1->version = pkc1->version;
868 pkc1->prev = pkc1->nxt_offset;
869 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
870
871 prb_thaw_queue(pkc1);
872 _prb_refresh_rx_retire_blk_timer(pkc1);
873
874 smp_wmb();
875 }
876
877 /*
878 * Queue freeze logic:
879 * 1) Assume tp_block_nr = 8 blocks.
880 * 2) At time 't0', user opens Rx ring.
881 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
882 * 4) user-space is either sleeping or processing block '0'.
883 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
884 * it will close block-7,loop around and try to fill block '0'.
885 * call-flow:
886 * __packet_lookup_frame_in_block
887 * prb_retire_current_block()
888 * prb_dispatch_next_block()
889 * |->(BLOCK_STATUS == USER) evaluates to true
890 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
891 * 6) Now there are two cases:
892 * 6.1) Link goes idle right after the queue is frozen.
893 * But remember, the last open_block() refreshed the timer.
894 * When this timer expires,it will refresh itself so that we can
895 * re-open block-0 in near future.
896 * 6.2) Link is busy and keeps on receiving packets. This is a simple
897 * case and __packet_lookup_frame_in_block will check if block-0
898 * is free and can now be re-used.
899 */
900 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
901 struct packet_sock *po)
902 {
903 pkc->reset_pending_on_curr_blk = 1;
904 po->stats.stats3.tp_freeze_q_cnt++;
905 }
906
907 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
908
909 /*
910 * If the next block is free then we will dispatch it
911 * and return a good offset.
912 * Else, we will freeze the queue.
913 * So, caller must check the return value.
914 */
915 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
916 struct packet_sock *po)
917 {
918 struct tpacket_block_desc *pbd;
919
920 smp_rmb();
921
922 /* 1. Get current block num */
923 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
924
925 /* 2. If this block is currently in_use then freeze the queue */
926 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
927 prb_freeze_queue(pkc, po);
928 return NULL;
929 }
930
931 /*
932 * 3.
933 * open this block and return the offset where the first packet
934 * needs to get stored.
935 */
936 prb_open_block(pkc, pbd);
937 return (void *)pkc->nxt_offset;
938 }
939
940 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
941 struct packet_sock *po, unsigned int status)
942 {
943 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
944
945 /* retire/close the current block */
946 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
947 /*
948 * Plug the case where copy_bits() is in progress on
949 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
950 * have space to copy the pkt in the current block and
951 * called prb_retire_current_block()
952 *
953 * We don't need to worry about the TMO case because
954 * the timer-handler already handled this case.
955 */
956 if (!(status & TP_STATUS_BLK_TMO)) {
957 while (atomic_read(&pkc->blk_fill_in_prog)) {
958 /* Waiting for skb_copy_bits to finish... */
959 cpu_relax();
960 }
961 }
962 prb_close_block(pkc, pbd, po, status);
963 return;
964 }
965 }
966
967 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
968 struct tpacket_block_desc *pbd)
969 {
970 return TP_STATUS_USER & BLOCK_STATUS(pbd);
971 }
972
973 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
974 {
975 return pkc->reset_pending_on_curr_blk;
976 }
977
978 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
979 {
980 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
981 atomic_dec(&pkc->blk_fill_in_prog);
982 }
983
984 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
985 struct tpacket3_hdr *ppd)
986 {
987 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
988 }
989
990 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
991 struct tpacket3_hdr *ppd)
992 {
993 ppd->hv1.tp_rxhash = 0;
994 }
995
996 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
997 struct tpacket3_hdr *ppd)
998 {
999 if (skb_vlan_tag_present(pkc->skb)) {
1000 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1001 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1002 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1003 } else {
1004 ppd->hv1.tp_vlan_tci = 0;
1005 ppd->hv1.tp_vlan_tpid = 0;
1006 ppd->tp_status = TP_STATUS_AVAILABLE;
1007 }
1008 }
1009
1010 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1011 struct tpacket3_hdr *ppd)
1012 {
1013 ppd->hv1.tp_padding = 0;
1014 prb_fill_vlan_info(pkc, ppd);
1015
1016 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1017 prb_fill_rxhash(pkc, ppd);
1018 else
1019 prb_clear_rxhash(pkc, ppd);
1020 }
1021
1022 static void prb_fill_curr_block(char *curr,
1023 struct tpacket_kbdq_core *pkc,
1024 struct tpacket_block_desc *pbd,
1025 unsigned int len)
1026 {
1027 struct tpacket3_hdr *ppd;
1028
1029 ppd = (struct tpacket3_hdr *)curr;
1030 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1031 pkc->prev = curr;
1032 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1033 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1034 BLOCK_NUM_PKTS(pbd) += 1;
1035 atomic_inc(&pkc->blk_fill_in_prog);
1036 prb_run_all_ft_ops(pkc, ppd);
1037 }
1038
1039 /* Assumes caller has the sk->rx_queue.lock */
1040 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1041 struct sk_buff *skb,
1042 int status,
1043 unsigned int len
1044 )
1045 {
1046 struct tpacket_kbdq_core *pkc;
1047 struct tpacket_block_desc *pbd;
1048 char *curr, *end;
1049
1050 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1051 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1052
1053 /* Queue is frozen when user space is lagging behind */
1054 if (prb_queue_frozen(pkc)) {
1055 /*
1056 * Check if that last block which caused the queue to freeze,
1057 * is still in_use by user-space.
1058 */
1059 if (prb_curr_blk_in_use(pkc, pbd)) {
1060 /* Can't record this packet */
1061 return NULL;
1062 } else {
1063 /*
1064 * Ok, the block was released by user-space.
1065 * Now let's open that block.
1066 * opening a block also thaws the queue.
1067 * Thawing is a side effect.
1068 */
1069 prb_open_block(pkc, pbd);
1070 }
1071 }
1072
1073 smp_mb();
1074 curr = pkc->nxt_offset;
1075 pkc->skb = skb;
1076 end = (char *)pbd + pkc->kblk_size;
1077
1078 /* first try the current block */
1079 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1080 prb_fill_curr_block(curr, pkc, pbd, len);
1081 return (void *)curr;
1082 }
1083
1084 /* Ok, close the current block */
1085 prb_retire_current_block(pkc, po, 0);
1086
1087 /* Now, try to dispatch the next block */
1088 curr = (char *)prb_dispatch_next_block(pkc, po);
1089 if (curr) {
1090 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1091 prb_fill_curr_block(curr, pkc, pbd, len);
1092 return (void *)curr;
1093 }
1094
1095 /*
1096 * No free blocks are available.user_space hasn't caught up yet.
1097 * Queue was just frozen and now this packet will get dropped.
1098 */
1099 return NULL;
1100 }
1101
1102 static void *packet_current_rx_frame(struct packet_sock *po,
1103 struct sk_buff *skb,
1104 int status, unsigned int len)
1105 {
1106 char *curr = NULL;
1107 switch (po->tp_version) {
1108 case TPACKET_V1:
1109 case TPACKET_V2:
1110 curr = packet_lookup_frame(po, &po->rx_ring,
1111 po->rx_ring.head, status);
1112 return curr;
1113 case TPACKET_V3:
1114 return __packet_lookup_frame_in_block(po, skb, status, len);
1115 default:
1116 WARN(1, "TPACKET version not supported\n");
1117 BUG();
1118 return NULL;
1119 }
1120 }
1121
1122 static void *prb_lookup_block(struct packet_sock *po,
1123 struct packet_ring_buffer *rb,
1124 unsigned int idx,
1125 int status)
1126 {
1127 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1128 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1129
1130 if (status != BLOCK_STATUS(pbd))
1131 return NULL;
1132 return pbd;
1133 }
1134
1135 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1136 {
1137 unsigned int prev;
1138 if (rb->prb_bdqc.kactive_blk_num)
1139 prev = rb->prb_bdqc.kactive_blk_num-1;
1140 else
1141 prev = rb->prb_bdqc.knum_blocks-1;
1142 return prev;
1143 }
1144
1145 /* Assumes caller has held the rx_queue.lock */
1146 static void *__prb_previous_block(struct packet_sock *po,
1147 struct packet_ring_buffer *rb,
1148 int status)
1149 {
1150 unsigned int previous = prb_previous_blk_num(rb);
1151 return prb_lookup_block(po, rb, previous, status);
1152 }
1153
1154 static void *packet_previous_rx_frame(struct packet_sock *po,
1155 struct packet_ring_buffer *rb,
1156 int status)
1157 {
1158 if (po->tp_version <= TPACKET_V2)
1159 return packet_previous_frame(po, rb, status);
1160
1161 return __prb_previous_block(po, rb, status);
1162 }
1163
1164 static void packet_increment_rx_head(struct packet_sock *po,
1165 struct packet_ring_buffer *rb)
1166 {
1167 switch (po->tp_version) {
1168 case TPACKET_V1:
1169 case TPACKET_V2:
1170 return packet_increment_head(rb);
1171 case TPACKET_V3:
1172 default:
1173 WARN(1, "TPACKET version not supported.\n");
1174 BUG();
1175 return;
1176 }
1177 }
1178
1179 static void *packet_previous_frame(struct packet_sock *po,
1180 struct packet_ring_buffer *rb,
1181 int status)
1182 {
1183 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1184 return packet_lookup_frame(po, rb, previous, status);
1185 }
1186
1187 static void packet_increment_head(struct packet_ring_buffer *buff)
1188 {
1189 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1190 }
1191
1192 static void packet_inc_pending(struct packet_ring_buffer *rb)
1193 {
1194 this_cpu_inc(*rb->pending_refcnt);
1195 }
1196
1197 static void packet_dec_pending(struct packet_ring_buffer *rb)
1198 {
1199 this_cpu_dec(*rb->pending_refcnt);
1200 }
1201
1202 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1203 {
1204 unsigned int refcnt = 0;
1205 int cpu;
1206
1207 /* We don't use pending refcount in rx_ring. */
1208 if (rb->pending_refcnt == NULL)
1209 return 0;
1210
1211 for_each_possible_cpu(cpu)
1212 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1213
1214 return refcnt;
1215 }
1216
1217 static int packet_alloc_pending(struct packet_sock *po)
1218 {
1219 po->rx_ring.pending_refcnt = NULL;
1220
1221 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1222 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1223 return -ENOBUFS;
1224
1225 return 0;
1226 }
1227
1228 static void packet_free_pending(struct packet_sock *po)
1229 {
1230 free_percpu(po->tx_ring.pending_refcnt);
1231 }
1232
1233 #define ROOM_POW_OFF 2
1234 #define ROOM_NONE 0x0
1235 #define ROOM_LOW 0x1
1236 #define ROOM_NORMAL 0x2
1237
1238 static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
1239 {
1240 int idx, len;
1241
1242 len = po->rx_ring.frame_max + 1;
1243 idx = po->rx_ring.head;
1244 if (pow_off)
1245 idx += len >> pow_off;
1246 if (idx >= len)
1247 idx -= len;
1248 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1249 }
1250
1251 static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
1252 {
1253 int idx, len;
1254
1255 len = po->rx_ring.prb_bdqc.knum_blocks;
1256 idx = po->rx_ring.prb_bdqc.kactive_blk_num;
1257 if (pow_off)
1258 idx += len >> pow_off;
1259 if (idx >= len)
1260 idx -= len;
1261 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1262 }
1263
1264 static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1265 {
1266 struct sock *sk = &po->sk;
1267 int ret = ROOM_NONE;
1268
1269 if (po->prot_hook.func != tpacket_rcv) {
1270 int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1271 - (skb ? skb->truesize : 0);
1272 if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
1273 return ROOM_NORMAL;
1274 else if (avail > 0)
1275 return ROOM_LOW;
1276 else
1277 return ROOM_NONE;
1278 }
1279
1280 if (po->tp_version == TPACKET_V3) {
1281 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1282 ret = ROOM_NORMAL;
1283 else if (__tpacket_v3_has_room(po, 0))
1284 ret = ROOM_LOW;
1285 } else {
1286 if (__tpacket_has_room(po, ROOM_POW_OFF))
1287 ret = ROOM_NORMAL;
1288 else if (__tpacket_has_room(po, 0))
1289 ret = ROOM_LOW;
1290 }
1291
1292 return ret;
1293 }
1294
1295 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1296 {
1297 int ret;
1298 bool has_room;
1299
1300 spin_lock_bh(&po->sk.sk_receive_queue.lock);
1301 ret = __packet_rcv_has_room(po, skb);
1302 has_room = ret == ROOM_NORMAL;
1303 if (po->pressure == has_room)
1304 po->pressure = !has_room;
1305 spin_unlock_bh(&po->sk.sk_receive_queue.lock);
1306
1307 return ret;
1308 }
1309
1310 static void packet_sock_destruct(struct sock *sk)
1311 {
1312 skb_queue_purge(&sk->sk_error_queue);
1313
1314 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1315 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1316
1317 if (!sock_flag(sk, SOCK_DEAD)) {
1318 pr_err("Attempt to release alive packet socket: %p\n", sk);
1319 return;
1320 }
1321
1322 sk_refcnt_debug_dec(sk);
1323 }
1324
1325 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1326 {
1327 u32 rxhash;
1328 int i, count = 0;
1329
1330 rxhash = skb_get_hash(skb);
1331 for (i = 0; i < ROLLOVER_HLEN; i++)
1332 if (po->rollover->history[i] == rxhash)
1333 count++;
1334
1335 po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
1336 return count > (ROLLOVER_HLEN >> 1);
1337 }
1338
1339 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1340 struct sk_buff *skb,
1341 unsigned int num)
1342 {
1343 return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1344 }
1345
1346 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1347 struct sk_buff *skb,
1348 unsigned int num)
1349 {
1350 unsigned int val = atomic_inc_return(&f->rr_cur);
1351
1352 return val % num;
1353 }
1354
1355 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1356 struct sk_buff *skb,
1357 unsigned int num)
1358 {
1359 return smp_processor_id() % num;
1360 }
1361
1362 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1363 struct sk_buff *skb,
1364 unsigned int num)
1365 {
1366 return prandom_u32_max(num);
1367 }
1368
1369 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1370 struct sk_buff *skb,
1371 unsigned int idx, bool try_self,
1372 unsigned int num)
1373 {
1374 struct packet_sock *po, *po_next, *po_skip = NULL;
1375 unsigned int i, j, room = ROOM_NONE;
1376
1377 po = pkt_sk(f->arr[idx]);
1378
1379 if (try_self) {
1380 room = packet_rcv_has_room(po, skb);
1381 if (room == ROOM_NORMAL ||
1382 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1383 return idx;
1384 po_skip = po;
1385 }
1386
1387 i = j = min_t(int, po->rollover->sock, num - 1);
1388 do {
1389 po_next = pkt_sk(f->arr[i]);
1390 if (po_next != po_skip && !po_next->pressure &&
1391 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1392 if (i != j)
1393 po->rollover->sock = i;
1394 atomic_long_inc(&po->rollover->num);
1395 if (room == ROOM_LOW)
1396 atomic_long_inc(&po->rollover->num_huge);
1397 return i;
1398 }
1399
1400 if (++i == num)
1401 i = 0;
1402 } while (i != j);
1403
1404 atomic_long_inc(&po->rollover->num_failed);
1405 return idx;
1406 }
1407
1408 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1409 struct sk_buff *skb,
1410 unsigned int num)
1411 {
1412 return skb_get_queue_mapping(skb) % num;
1413 }
1414
1415 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1416 struct sk_buff *skb,
1417 unsigned int num)
1418 {
1419 struct bpf_prog *prog;
1420 unsigned int ret = 0;
1421
1422 rcu_read_lock();
1423 prog = rcu_dereference(f->bpf_prog);
1424 if (prog)
1425 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1426 rcu_read_unlock();
1427
1428 return ret;
1429 }
1430
1431 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1432 {
1433 return f->flags & (flag >> 8);
1434 }
1435
1436 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1437 struct packet_type *pt, struct net_device *orig_dev)
1438 {
1439 struct packet_fanout *f = pt->af_packet_priv;
1440 unsigned int num = READ_ONCE(f->num_members);
1441 struct net *net = read_pnet(&f->net);
1442 struct packet_sock *po;
1443 unsigned int idx;
1444
1445 if (!net_eq(dev_net(dev), net) || !num) {
1446 kfree_skb(skb);
1447 return 0;
1448 }
1449
1450 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1451 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1452 if (!skb)
1453 return 0;
1454 }
1455 switch (f->type) {
1456 case PACKET_FANOUT_HASH:
1457 default:
1458 idx = fanout_demux_hash(f, skb, num);
1459 break;
1460 case PACKET_FANOUT_LB:
1461 idx = fanout_demux_lb(f, skb, num);
1462 break;
1463 case PACKET_FANOUT_CPU:
1464 idx = fanout_demux_cpu(f, skb, num);
1465 break;
1466 case PACKET_FANOUT_RND:
1467 idx = fanout_demux_rnd(f, skb, num);
1468 break;
1469 case PACKET_FANOUT_QM:
1470 idx = fanout_demux_qm(f, skb, num);
1471 break;
1472 case PACKET_FANOUT_ROLLOVER:
1473 idx = fanout_demux_rollover(f, skb, 0, false, num);
1474 break;
1475 case PACKET_FANOUT_CBPF:
1476 case PACKET_FANOUT_EBPF:
1477 idx = fanout_demux_bpf(f, skb, num);
1478 break;
1479 }
1480
1481 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1482 idx = fanout_demux_rollover(f, skb, idx, true, num);
1483
1484 po = pkt_sk(f->arr[idx]);
1485 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1486 }
1487
1488 DEFINE_MUTEX(fanout_mutex);
1489 EXPORT_SYMBOL_GPL(fanout_mutex);
1490 static LIST_HEAD(fanout_list);
1491
1492 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1493 {
1494 struct packet_fanout *f = po->fanout;
1495
1496 spin_lock(&f->lock);
1497 f->arr[f->num_members] = sk;
1498 smp_wmb();
1499 f->num_members++;
1500 spin_unlock(&f->lock);
1501 }
1502
1503 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1504 {
1505 struct packet_fanout *f = po->fanout;
1506 int i;
1507
1508 spin_lock(&f->lock);
1509 for (i = 0; i < f->num_members; i++) {
1510 if (f->arr[i] == sk)
1511 break;
1512 }
1513 BUG_ON(i >= f->num_members);
1514 f->arr[i] = f->arr[f->num_members - 1];
1515 f->num_members--;
1516 spin_unlock(&f->lock);
1517 }
1518
1519 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1520 {
1521 if (sk->sk_family != PF_PACKET)
1522 return false;
1523
1524 return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1525 }
1526
1527 static void fanout_init_data(struct packet_fanout *f)
1528 {
1529 switch (f->type) {
1530 case PACKET_FANOUT_LB:
1531 atomic_set(&f->rr_cur, 0);
1532 break;
1533 case PACKET_FANOUT_CBPF:
1534 case PACKET_FANOUT_EBPF:
1535 RCU_INIT_POINTER(f->bpf_prog, NULL);
1536 break;
1537 }
1538 }
1539
1540 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1541 {
1542 struct bpf_prog *old;
1543
1544 spin_lock(&f->lock);
1545 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1546 rcu_assign_pointer(f->bpf_prog, new);
1547 spin_unlock(&f->lock);
1548
1549 if (old) {
1550 synchronize_net();
1551 bpf_prog_destroy(old);
1552 }
1553 }
1554
1555 static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
1556 unsigned int len)
1557 {
1558 struct bpf_prog *new;
1559 struct sock_fprog fprog;
1560 int ret;
1561
1562 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1563 return -EPERM;
1564 if (len != sizeof(fprog))
1565 return -EINVAL;
1566 if (copy_from_user(&fprog, data, len))
1567 return -EFAULT;
1568
1569 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1570 if (ret)
1571 return ret;
1572
1573 __fanout_set_data_bpf(po->fanout, new);
1574 return 0;
1575 }
1576
1577 static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1578 unsigned int len)
1579 {
1580 struct bpf_prog *new;
1581 u32 fd;
1582
1583 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1584 return -EPERM;
1585 if (len != sizeof(fd))
1586 return -EINVAL;
1587 if (copy_from_user(&fd, data, len))
1588 return -EFAULT;
1589
1590 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1591 if (IS_ERR(new))
1592 return PTR_ERR(new);
1593
1594 __fanout_set_data_bpf(po->fanout, new);
1595 return 0;
1596 }
1597
1598 static int fanout_set_data(struct packet_sock *po, char __user *data,
1599 unsigned int len)
1600 {
1601 switch (po->fanout->type) {
1602 case PACKET_FANOUT_CBPF:
1603 return fanout_set_data_cbpf(po, data, len);
1604 case PACKET_FANOUT_EBPF:
1605 return fanout_set_data_ebpf(po, data, len);
1606 default:
1607 return -EINVAL;
1608 };
1609 }
1610
1611 static void fanout_release_data(struct packet_fanout *f)
1612 {
1613 switch (f->type) {
1614 case PACKET_FANOUT_CBPF:
1615 case PACKET_FANOUT_EBPF:
1616 __fanout_set_data_bpf(f, NULL);
1617 };
1618 }
1619
1620 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1621 {
1622 struct packet_sock *po = pkt_sk(sk);
1623 struct packet_fanout *f, *match;
1624 u8 type = type_flags & 0xff;
1625 u8 flags = type_flags >> 8;
1626 int err;
1627
1628 switch (type) {
1629 case PACKET_FANOUT_ROLLOVER:
1630 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1631 return -EINVAL;
1632 case PACKET_FANOUT_HASH:
1633 case PACKET_FANOUT_LB:
1634 case PACKET_FANOUT_CPU:
1635 case PACKET_FANOUT_RND:
1636 case PACKET_FANOUT_QM:
1637 case PACKET_FANOUT_CBPF:
1638 case PACKET_FANOUT_EBPF:
1639 break;
1640 default:
1641 return -EINVAL;
1642 }
1643
1644 if (!po->running)
1645 return -EINVAL;
1646
1647 if (po->fanout)
1648 return -EALREADY;
1649
1650 if (type == PACKET_FANOUT_ROLLOVER ||
1651 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1652 po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
1653 if (!po->rollover)
1654 return -ENOMEM;
1655 atomic_long_set(&po->rollover->num, 0);
1656 atomic_long_set(&po->rollover->num_huge, 0);
1657 atomic_long_set(&po->rollover->num_failed, 0);
1658 }
1659
1660 mutex_lock(&fanout_mutex);
1661 match = NULL;
1662 list_for_each_entry(f, &fanout_list, list) {
1663 if (f->id == id &&
1664 read_pnet(&f->net) == sock_net(sk)) {
1665 match = f;
1666 break;
1667 }
1668 }
1669 err = -EINVAL;
1670 if (match && match->flags != flags)
1671 goto out;
1672 if (!match) {
1673 err = -ENOMEM;
1674 match = kzalloc(sizeof(*match), GFP_KERNEL);
1675 if (!match)
1676 goto out;
1677 write_pnet(&match->net, sock_net(sk));
1678 match->id = id;
1679 match->type = type;
1680 match->flags = flags;
1681 INIT_LIST_HEAD(&match->list);
1682 spin_lock_init(&match->lock);
1683 atomic_set(&match->sk_ref, 0);
1684 fanout_init_data(match);
1685 match->prot_hook.type = po->prot_hook.type;
1686 match->prot_hook.dev = po->prot_hook.dev;
1687 match->prot_hook.func = packet_rcv_fanout;
1688 match->prot_hook.af_packet_priv = match;
1689 match->prot_hook.id_match = match_fanout_group;
1690 dev_add_pack(&match->prot_hook);
1691 list_add(&match->list, &fanout_list);
1692 }
1693 err = -EINVAL;
1694 if (match->type == type &&
1695 match->prot_hook.type == po->prot_hook.type &&
1696 match->prot_hook.dev == po->prot_hook.dev) {
1697 err = -ENOSPC;
1698 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1699 __dev_remove_pack(&po->prot_hook);
1700 po->fanout = match;
1701 atomic_inc(&match->sk_ref);
1702 __fanout_link(sk, po);
1703 err = 0;
1704 }
1705 }
1706 out:
1707 mutex_unlock(&fanout_mutex);
1708 if (err) {
1709 kfree(po->rollover);
1710 po->rollover = NULL;
1711 }
1712 return err;
1713 }
1714
1715 static void fanout_release(struct sock *sk)
1716 {
1717 struct packet_sock *po = pkt_sk(sk);
1718 struct packet_fanout *f;
1719
1720 f = po->fanout;
1721 if (!f)
1722 return;
1723
1724 mutex_lock(&fanout_mutex);
1725 po->fanout = NULL;
1726
1727 if (atomic_dec_and_test(&f->sk_ref)) {
1728 list_del(&f->list);
1729 dev_remove_pack(&f->prot_hook);
1730 fanout_release_data(f);
1731 kfree(f);
1732 }
1733 mutex_unlock(&fanout_mutex);
1734
1735 if (po->rollover)
1736 kfree_rcu(po->rollover, rcu);
1737 }
1738
1739 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1740 struct sk_buff *skb)
1741 {
1742 /* Earlier code assumed this would be a VLAN pkt, double-check
1743 * this now that we have the actual packet in hand. We can only
1744 * do this check on Ethernet devices.
1745 */
1746 if (unlikely(dev->type != ARPHRD_ETHER))
1747 return false;
1748
1749 skb_reset_mac_header(skb);
1750 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1751 }
1752
1753 static const struct proto_ops packet_ops;
1754
1755 static const struct proto_ops packet_ops_spkt;
1756
1757 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1758 struct packet_type *pt, struct net_device *orig_dev)
1759 {
1760 struct sock *sk;
1761 struct sockaddr_pkt *spkt;
1762
1763 /*
1764 * When we registered the protocol we saved the socket in the data
1765 * field for just this event.
1766 */
1767
1768 sk = pt->af_packet_priv;
1769
1770 /*
1771 * Yank back the headers [hope the device set this
1772 * right or kerboom...]
1773 *
1774 * Incoming packets have ll header pulled,
1775 * push it back.
1776 *
1777 * For outgoing ones skb->data == skb_mac_header(skb)
1778 * so that this procedure is noop.
1779 */
1780
1781 if (skb->pkt_type == PACKET_LOOPBACK)
1782 goto out;
1783
1784 if (!net_eq(dev_net(dev), sock_net(sk)))
1785 goto out;
1786
1787 skb = skb_share_check(skb, GFP_ATOMIC);
1788 if (skb == NULL)
1789 goto oom;
1790
1791 /* drop any routing info */
1792 skb_dst_drop(skb);
1793
1794 /* drop conntrack reference */
1795 nf_reset(skb);
1796
1797 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1798
1799 skb_push(skb, skb->data - skb_mac_header(skb));
1800
1801 /*
1802 * The SOCK_PACKET socket receives _all_ frames.
1803 */
1804
1805 spkt->spkt_family = dev->type;
1806 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1807 spkt->spkt_protocol = skb->protocol;
1808
1809 /*
1810 * Charge the memory to the socket. This is done specifically
1811 * to prevent sockets using all the memory up.
1812 */
1813
1814 if (sock_queue_rcv_skb(sk, skb) == 0)
1815 return 0;
1816
1817 out:
1818 kfree_skb(skb);
1819 oom:
1820 return 0;
1821 }
1822
1823
1824 /*
1825 * Output a raw packet to a device layer. This bypasses all the other
1826 * protocol layers and you must therefore supply it with a complete frame
1827 */
1828
1829 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1830 size_t len)
1831 {
1832 struct sock *sk = sock->sk;
1833 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1834 struct sk_buff *skb = NULL;
1835 struct net_device *dev;
1836 struct sockcm_cookie sockc;
1837 __be16 proto = 0;
1838 int err;
1839 int extra_len = 0;
1840
1841 /*
1842 * Get and verify the address.
1843 */
1844
1845 if (saddr) {
1846 if (msg->msg_namelen < sizeof(struct sockaddr))
1847 return -EINVAL;
1848 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1849 proto = saddr->spkt_protocol;
1850 } else
1851 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1852
1853 /*
1854 * Find the device first to size check it
1855 */
1856
1857 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1858 retry:
1859 rcu_read_lock();
1860 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1861 err = -ENODEV;
1862 if (dev == NULL)
1863 goto out_unlock;
1864
1865 err = -ENETDOWN;
1866 if (!(dev->flags & IFF_UP))
1867 goto out_unlock;
1868
1869 /*
1870 * You may not queue a frame bigger than the mtu. This is the lowest level
1871 * raw protocol and you must do your own fragmentation at this level.
1872 */
1873
1874 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1875 if (!netif_supports_nofcs(dev)) {
1876 err = -EPROTONOSUPPORT;
1877 goto out_unlock;
1878 }
1879 extra_len = 4; /* We're doing our own CRC */
1880 }
1881
1882 err = -EMSGSIZE;
1883 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1884 goto out_unlock;
1885
1886 if (!skb) {
1887 size_t reserved = LL_RESERVED_SPACE(dev);
1888 int tlen = dev->needed_tailroom;
1889 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1890
1891 rcu_read_unlock();
1892 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1893 if (skb == NULL)
1894 return -ENOBUFS;
1895 /* FIXME: Save some space for broken drivers that write a hard
1896 * header at transmission time by themselves. PPP is the notable
1897 * one here. This should really be fixed at the driver level.
1898 */
1899 skb_reserve(skb, reserved);
1900 skb_reset_network_header(skb);
1901
1902 /* Try to align data part correctly */
1903 if (hhlen) {
1904 skb->data -= hhlen;
1905 skb->tail -= hhlen;
1906 if (len < hhlen)
1907 skb_reset_network_header(skb);
1908 }
1909 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1910 if (err)
1911 goto out_free;
1912 goto retry;
1913 }
1914
1915 if (!dev_validate_header(dev, skb->data, len)) {
1916 err = -EINVAL;
1917 goto out_unlock;
1918 }
1919 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1920 !packet_extra_vlan_len_allowed(dev, skb)) {
1921 err = -EMSGSIZE;
1922 goto out_unlock;
1923 }
1924
1925 sockc.tsflags = sk->sk_tsflags;
1926 if (msg->msg_controllen) {
1927 err = sock_cmsg_send(sk, msg, &sockc);
1928 if (unlikely(err))
1929 goto out_unlock;
1930 }
1931
1932 skb->protocol = proto;
1933 skb->dev = dev;
1934 skb->priority = sk->sk_priority;
1935 skb->mark = sk->sk_mark;
1936
1937 sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
1938
1939 if (unlikely(extra_len == 4))
1940 skb->no_fcs = 1;
1941
1942 skb_probe_transport_header(skb, 0);
1943
1944 dev_queue_xmit(skb);
1945 rcu_read_unlock();
1946 return len;
1947
1948 out_unlock:
1949 rcu_read_unlock();
1950 out_free:
1951 kfree_skb(skb);
1952 return err;
1953 }
1954
1955 static unsigned int run_filter(struct sk_buff *skb,
1956 const struct sock *sk,
1957 unsigned int res)
1958 {
1959 struct sk_filter *filter;
1960
1961 rcu_read_lock();
1962 filter = rcu_dereference(sk->sk_filter);
1963 if (filter != NULL)
1964 res = bpf_prog_run_clear_cb(filter->prog, skb);
1965 rcu_read_unlock();
1966
1967 return res;
1968 }
1969
1970 static int __packet_rcv_vnet(const struct sk_buff *skb,
1971 struct virtio_net_hdr *vnet_hdr)
1972 {
1973 *vnet_hdr = (const struct virtio_net_hdr) { 0 };
1974
1975 if (virtio_net_hdr_from_skb(skb, vnet_hdr, vio_le()))
1976 BUG();
1977
1978 return 0;
1979 }
1980
1981 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
1982 size_t *len)
1983 {
1984 struct virtio_net_hdr vnet_hdr;
1985
1986 if (*len < sizeof(vnet_hdr))
1987 return -EINVAL;
1988 *len -= sizeof(vnet_hdr);
1989
1990 if (__packet_rcv_vnet(skb, &vnet_hdr))
1991 return -EINVAL;
1992
1993 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
1994 }
1995
1996 /*
1997 * This function makes lazy skb cloning in hope that most of packets
1998 * are discarded by BPF.
1999 *
2000 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2001 * and skb->cb are mangled. It works because (and until) packets
2002 * falling here are owned by current CPU. Output packets are cloned
2003 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2004 * sequencially, so that if we return skb to original state on exit,
2005 * we will not harm anyone.
2006 */
2007
2008 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2009 struct packet_type *pt, struct net_device *orig_dev)
2010 {
2011 struct sock *sk;
2012 struct sockaddr_ll *sll;
2013 struct packet_sock *po;
2014 u8 *skb_head = skb->data;
2015 int skb_len = skb->len;
2016 unsigned int snaplen, res;
2017 bool is_drop_n_account = false;
2018
2019 if (skb->pkt_type == PACKET_LOOPBACK)
2020 goto drop;
2021
2022 sk = pt->af_packet_priv;
2023 po = pkt_sk(sk);
2024
2025 if (!net_eq(dev_net(dev), sock_net(sk)))
2026 goto drop;
2027
2028 skb->dev = dev;
2029
2030 if (dev->header_ops) {
2031 /* The device has an explicit notion of ll header,
2032 * exported to higher levels.
2033 *
2034 * Otherwise, the device hides details of its frame
2035 * structure, so that corresponding packet head is
2036 * never delivered to user.
2037 */
2038 if (sk->sk_type != SOCK_DGRAM)
2039 skb_push(skb, skb->data - skb_mac_header(skb));
2040 else if (skb->pkt_type == PACKET_OUTGOING) {
2041 /* Special case: outgoing packets have ll header at head */
2042 skb_pull(skb, skb_network_offset(skb));
2043 }
2044 }
2045
2046 snaplen = skb->len;
2047
2048 res = run_filter(skb, sk, snaplen);
2049 if (!res)
2050 goto drop_n_restore;
2051 if (snaplen > res)
2052 snaplen = res;
2053
2054 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2055 goto drop_n_acct;
2056
2057 if (skb_shared(skb)) {
2058 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2059 if (nskb == NULL)
2060 goto drop_n_acct;
2061
2062 if (skb_head != skb->data) {
2063 skb->data = skb_head;
2064 skb->len = skb_len;
2065 }
2066 consume_skb(skb);
2067 skb = nskb;
2068 }
2069
2070 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2071
2072 sll = &PACKET_SKB_CB(skb)->sa.ll;
2073 sll->sll_hatype = dev->type;
2074 sll->sll_pkttype = skb->pkt_type;
2075 if (unlikely(po->origdev))
2076 sll->sll_ifindex = orig_dev->ifindex;
2077 else
2078 sll->sll_ifindex = dev->ifindex;
2079
2080 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2081
2082 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2083 * Use their space for storing the original skb length.
2084 */
2085 PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2086
2087 if (pskb_trim(skb, snaplen))
2088 goto drop_n_acct;
2089
2090 skb_set_owner_r(skb, sk);
2091 skb->dev = NULL;
2092 skb_dst_drop(skb);
2093
2094 /* drop conntrack reference */
2095 nf_reset(skb);
2096
2097 spin_lock(&sk->sk_receive_queue.lock);
2098 po->stats.stats1.tp_packets++;
2099 sock_skb_set_dropcount(sk, skb);
2100 __skb_queue_tail(&sk->sk_receive_queue, skb);
2101 spin_unlock(&sk->sk_receive_queue.lock);
2102 sk->sk_data_ready(sk);
2103 return 0;
2104
2105 drop_n_acct:
2106 is_drop_n_account = true;
2107 spin_lock(&sk->sk_receive_queue.lock);
2108 po->stats.stats1.tp_drops++;
2109 atomic_inc(&sk->sk_drops);
2110 spin_unlock(&sk->sk_receive_queue.lock);
2111
2112 drop_n_restore:
2113 if (skb_head != skb->data && skb_shared(skb)) {
2114 skb->data = skb_head;
2115 skb->len = skb_len;
2116 }
2117 drop:
2118 if (!is_drop_n_account)
2119 consume_skb(skb);
2120 else
2121 kfree_skb(skb);
2122 return 0;
2123 }
2124
2125 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2126 struct packet_type *pt, struct net_device *orig_dev)
2127 {
2128 struct sock *sk;
2129 struct packet_sock *po;
2130 struct sockaddr_ll *sll;
2131 union tpacket_uhdr h;
2132 u8 *skb_head = skb->data;
2133 int skb_len = skb->len;
2134 unsigned int snaplen, res;
2135 unsigned long status = TP_STATUS_USER;
2136 unsigned short macoff, netoff, hdrlen;
2137 struct sk_buff *copy_skb = NULL;
2138 struct timespec ts;
2139 __u32 ts_status;
2140 bool is_drop_n_account = false;
2141
2142 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2143 * We may add members to them until current aligned size without forcing
2144 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2145 */
2146 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2147 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2148
2149 if (skb->pkt_type == PACKET_LOOPBACK)
2150 goto drop;
2151
2152 sk = pt->af_packet_priv;
2153 po = pkt_sk(sk);
2154
2155 if (!net_eq(dev_net(dev), sock_net(sk)))
2156 goto drop;
2157
2158 if (dev->header_ops) {
2159 if (sk->sk_type != SOCK_DGRAM)
2160 skb_push(skb, skb->data - skb_mac_header(skb));
2161 else if (skb->pkt_type == PACKET_OUTGOING) {
2162 /* Special case: outgoing packets have ll header at head */
2163 skb_pull(skb, skb_network_offset(skb));
2164 }
2165 }
2166
2167 snaplen = skb->len;
2168
2169 res = run_filter(skb, sk, snaplen);
2170 if (!res)
2171 goto drop_n_restore;
2172
2173 if (skb->ip_summed == CHECKSUM_PARTIAL)
2174 status |= TP_STATUS_CSUMNOTREADY;
2175 else if (skb->pkt_type != PACKET_OUTGOING &&
2176 (skb->ip_summed == CHECKSUM_COMPLETE ||
2177 skb_csum_unnecessary(skb)))
2178 status |= TP_STATUS_CSUM_VALID;
2179
2180 if (snaplen > res)
2181 snaplen = res;
2182
2183 if (sk->sk_type == SOCK_DGRAM) {
2184 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2185 po->tp_reserve;
2186 } else {
2187 unsigned int maclen = skb_network_offset(skb);
2188 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2189 (maclen < 16 ? 16 : maclen)) +
2190 po->tp_reserve;
2191 if (po->has_vnet_hdr)
2192 netoff += sizeof(struct virtio_net_hdr);
2193 macoff = netoff - maclen;
2194 }
2195 if (po->tp_version <= TPACKET_V2) {
2196 if (macoff + snaplen > po->rx_ring.frame_size) {
2197 if (po->copy_thresh &&
2198 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2199 if (skb_shared(skb)) {
2200 copy_skb = skb_clone(skb, GFP_ATOMIC);
2201 } else {
2202 copy_skb = skb_get(skb);
2203 skb_head = skb->data;
2204 }
2205 if (copy_skb)
2206 skb_set_owner_r(copy_skb, sk);
2207 }
2208 snaplen = po->rx_ring.frame_size - macoff;
2209 if ((int)snaplen < 0)
2210 snaplen = 0;
2211 }
2212 } else if (unlikely(macoff + snaplen >
2213 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2214 u32 nval;
2215
2216 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2217 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2218 snaplen, nval, macoff);
2219 snaplen = nval;
2220 if (unlikely((int)snaplen < 0)) {
2221 snaplen = 0;
2222 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2223 }
2224 }
2225 spin_lock(&sk->sk_receive_queue.lock);
2226 h.raw = packet_current_rx_frame(po, skb,
2227 TP_STATUS_KERNEL, (macoff+snaplen));
2228 if (!h.raw)
2229 goto drop_n_account;
2230 if (po->tp_version <= TPACKET_V2) {
2231 packet_increment_rx_head(po, &po->rx_ring);
2232 /*
2233 * LOSING will be reported till you read the stats,
2234 * because it's COR - Clear On Read.
2235 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2236 * at packet level.
2237 */
2238 if (po->stats.stats1.tp_drops)
2239 status |= TP_STATUS_LOSING;
2240 }
2241 po->stats.stats1.tp_packets++;
2242 if (copy_skb) {
2243 status |= TP_STATUS_COPY;
2244 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2245 }
2246 spin_unlock(&sk->sk_receive_queue.lock);
2247
2248 if (po->has_vnet_hdr) {
2249 if (__packet_rcv_vnet(skb, h.raw + macoff -
2250 sizeof(struct virtio_net_hdr))) {
2251 spin_lock(&sk->sk_receive_queue.lock);
2252 goto drop_n_account;
2253 }
2254 }
2255
2256 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2257
2258 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2259 getnstimeofday(&ts);
2260
2261 status |= ts_status;
2262
2263 switch (po->tp_version) {
2264 case TPACKET_V1:
2265 h.h1->tp_len = skb->len;
2266 h.h1->tp_snaplen = snaplen;
2267 h.h1->tp_mac = macoff;
2268 h.h1->tp_net = netoff;
2269 h.h1->tp_sec = ts.tv_sec;
2270 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2271 hdrlen = sizeof(*h.h1);
2272 break;
2273 case TPACKET_V2:
2274 h.h2->tp_len = skb->len;
2275 h.h2->tp_snaplen = snaplen;
2276 h.h2->tp_mac = macoff;
2277 h.h2->tp_net = netoff;
2278 h.h2->tp_sec = ts.tv_sec;
2279 h.h2->tp_nsec = ts.tv_nsec;
2280 if (skb_vlan_tag_present(skb)) {
2281 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2282 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2283 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2284 } else {
2285 h.h2->tp_vlan_tci = 0;
2286 h.h2->tp_vlan_tpid = 0;
2287 }
2288 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2289 hdrlen = sizeof(*h.h2);
2290 break;
2291 case TPACKET_V3:
2292 /* tp_nxt_offset,vlan are already populated above.
2293 * So DONT clear those fields here
2294 */
2295 h.h3->tp_status |= status;
2296 h.h3->tp_len = skb->len;
2297 h.h3->tp_snaplen = snaplen;
2298 h.h3->tp_mac = macoff;
2299 h.h3->tp_net = netoff;
2300 h.h3->tp_sec = ts.tv_sec;
2301 h.h3->tp_nsec = ts.tv_nsec;
2302 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2303 hdrlen = sizeof(*h.h3);
2304 break;
2305 default:
2306 BUG();
2307 }
2308
2309 sll = h.raw + TPACKET_ALIGN(hdrlen);
2310 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2311 sll->sll_family = AF_PACKET;
2312 sll->sll_hatype = dev->type;
2313 sll->sll_protocol = skb->protocol;
2314 sll->sll_pkttype = skb->pkt_type;
2315 if (unlikely(po->origdev))
2316 sll->sll_ifindex = orig_dev->ifindex;
2317 else
2318 sll->sll_ifindex = dev->ifindex;
2319
2320 smp_mb();
2321
2322 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2323 if (po->tp_version <= TPACKET_V2) {
2324 u8 *start, *end;
2325
2326 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2327 macoff + snaplen);
2328
2329 for (start = h.raw; start < end; start += PAGE_SIZE)
2330 flush_dcache_page(pgv_to_page(start));
2331 }
2332 smp_wmb();
2333 #endif
2334
2335 if (po->tp_version <= TPACKET_V2) {
2336 __packet_set_status(po, h.raw, status);
2337 sk->sk_data_ready(sk);
2338 } else {
2339 prb_clear_blk_fill_status(&po->rx_ring);
2340 }
2341
2342 drop_n_restore:
2343 if (skb_head != skb->data && skb_shared(skb)) {
2344 skb->data = skb_head;
2345 skb->len = skb_len;
2346 }
2347 drop:
2348 if (!is_drop_n_account)
2349 consume_skb(skb);
2350 else
2351 kfree_skb(skb);
2352 return 0;
2353
2354 drop_n_account:
2355 is_drop_n_account = true;
2356 po->stats.stats1.tp_drops++;
2357 spin_unlock(&sk->sk_receive_queue.lock);
2358
2359 sk->sk_data_ready(sk);
2360 kfree_skb(copy_skb);
2361 goto drop_n_restore;
2362 }
2363
2364 static void tpacket_destruct_skb(struct sk_buff *skb)
2365 {
2366 struct packet_sock *po = pkt_sk(skb->sk);
2367
2368 if (likely(po->tx_ring.pg_vec)) {
2369 void *ph;
2370 __u32 ts;
2371
2372 ph = skb_shinfo(skb)->destructor_arg;
2373 packet_dec_pending(&po->tx_ring);
2374
2375 ts = __packet_set_timestamp(po, ph, skb);
2376 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2377 }
2378
2379 sock_wfree(skb);
2380 }
2381
2382 static void tpacket_set_protocol(const struct net_device *dev,
2383 struct sk_buff *skb)
2384 {
2385 if (dev->type == ARPHRD_ETHER) {
2386 skb_reset_mac_header(skb);
2387 skb->protocol = eth_hdr(skb)->h_proto;
2388 }
2389 }
2390
2391 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2392 {
2393 unsigned short gso_type = 0;
2394
2395 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2396 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2397 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2398 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2399 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2400 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2401 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2402
2403 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2404 return -EINVAL;
2405
2406 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2407 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2408 case VIRTIO_NET_HDR_GSO_TCPV4:
2409 gso_type = SKB_GSO_TCPV4;
2410 break;
2411 case VIRTIO_NET_HDR_GSO_TCPV6:
2412 gso_type = SKB_GSO_TCPV6;
2413 break;
2414 case VIRTIO_NET_HDR_GSO_UDP:
2415 gso_type = SKB_GSO_UDP;
2416 break;
2417 default:
2418 return -EINVAL;
2419 }
2420
2421 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
2422 gso_type |= SKB_GSO_TCP_ECN;
2423
2424 if (vnet_hdr->gso_size == 0)
2425 return -EINVAL;
2426 }
2427
2428 vnet_hdr->gso_type = gso_type; /* changes type, temporary storage */
2429 return 0;
2430 }
2431
2432 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2433 struct virtio_net_hdr *vnet_hdr)
2434 {
2435 int n;
2436
2437 if (*len < sizeof(*vnet_hdr))
2438 return -EINVAL;
2439 *len -= sizeof(*vnet_hdr);
2440
2441 n = copy_from_iter(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter);
2442 if (n != sizeof(*vnet_hdr))
2443 return -EFAULT;
2444
2445 return __packet_snd_vnet_parse(vnet_hdr, *len);
2446 }
2447
2448 static int packet_snd_vnet_gso(struct sk_buff *skb,
2449 struct virtio_net_hdr *vnet_hdr)
2450 {
2451 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2452 u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start);
2453 u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset);
2454
2455 if (!skb_partial_csum_set(skb, s, o))
2456 return -EINVAL;
2457 }
2458
2459 skb_shinfo(skb)->gso_size =
2460 __virtio16_to_cpu(vio_le(), vnet_hdr->gso_size);
2461 skb_shinfo(skb)->gso_type = vnet_hdr->gso_type;
2462
2463 /* Header must be checked, and gso_segs computed. */
2464 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2465 skb_shinfo(skb)->gso_segs = 0;
2466 return 0;
2467 }
2468
2469 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2470 void *frame, struct net_device *dev, void *data, int tp_len,
2471 __be16 proto, unsigned char *addr, int hlen, int copylen,
2472 const struct sockcm_cookie *sockc)
2473 {
2474 union tpacket_uhdr ph;
2475 int to_write, offset, len, nr_frags, len_max;
2476 struct socket *sock = po->sk.sk_socket;
2477 struct page *page;
2478 int err;
2479
2480 ph.raw = frame;
2481
2482 skb->protocol = proto;
2483 skb->dev = dev;
2484 skb->priority = po->sk.sk_priority;
2485 skb->mark = po->sk.sk_mark;
2486 sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
2487 skb_shinfo(skb)->destructor_arg = ph.raw;
2488
2489 skb_reserve(skb, hlen);
2490 skb_reset_network_header(skb);
2491
2492 to_write = tp_len;
2493
2494 if (sock->type == SOCK_DGRAM) {
2495 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2496 NULL, tp_len);
2497 if (unlikely(err < 0))
2498 return -EINVAL;
2499 } else if (copylen) {
2500 int hdrlen = min_t(int, copylen, tp_len);
2501
2502 skb_push(skb, dev->hard_header_len);
2503 skb_put(skb, copylen - dev->hard_header_len);
2504 err = skb_store_bits(skb, 0, data, hdrlen);
2505 if (unlikely(err))
2506 return err;
2507 if (!dev_validate_header(dev, skb->data, hdrlen))
2508 return -EINVAL;
2509 if (!skb->protocol)
2510 tpacket_set_protocol(dev, skb);
2511
2512 data += hdrlen;
2513 to_write -= hdrlen;
2514 }
2515
2516 offset = offset_in_page(data);
2517 len_max = PAGE_SIZE - offset;
2518 len = ((to_write > len_max) ? len_max : to_write);
2519
2520 skb->data_len = to_write;
2521 skb->len += to_write;
2522 skb->truesize += to_write;
2523 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2524
2525 while (likely(to_write)) {
2526 nr_frags = skb_shinfo(skb)->nr_frags;
2527
2528 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2529 pr_err("Packet exceed the number of skb frags(%lu)\n",
2530 MAX_SKB_FRAGS);
2531 return -EFAULT;
2532 }
2533
2534 page = pgv_to_page(data);
2535 data += len;
2536 flush_dcache_page(page);
2537 get_page(page);
2538 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2539 to_write -= len;
2540 offset = 0;
2541 len_max = PAGE_SIZE;
2542 len = ((to_write > len_max) ? len_max : to_write);
2543 }
2544
2545 skb_probe_transport_header(skb, 0);
2546
2547 return tp_len;
2548 }
2549
2550 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2551 int size_max, void **data)
2552 {
2553 union tpacket_uhdr ph;
2554 int tp_len, off;
2555
2556 ph.raw = frame;
2557
2558 switch (po->tp_version) {
2559 case TPACKET_V2:
2560 tp_len = ph.h2->tp_len;
2561 break;
2562 default:
2563 tp_len = ph.h1->tp_len;
2564 break;
2565 }
2566 if (unlikely(tp_len > size_max)) {
2567 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2568 return -EMSGSIZE;
2569 }
2570
2571 if (unlikely(po->tp_tx_has_off)) {
2572 int off_min, off_max;
2573
2574 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2575 off_max = po->tx_ring.frame_size - tp_len;
2576 if (po->sk.sk_type == SOCK_DGRAM) {
2577 switch (po->tp_version) {
2578 case TPACKET_V2:
2579 off = ph.h2->tp_net;
2580 break;
2581 default:
2582 off = ph.h1->tp_net;
2583 break;
2584 }
2585 } else {
2586 switch (po->tp_version) {
2587 case TPACKET_V2:
2588 off = ph.h2->tp_mac;
2589 break;
2590 default:
2591 off = ph.h1->tp_mac;
2592 break;
2593 }
2594 }
2595 if (unlikely((off < off_min) || (off_max < off)))
2596 return -EINVAL;
2597 } else {
2598 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2599 }
2600
2601 *data = frame + off;
2602 return tp_len;
2603 }
2604
2605 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2606 {
2607 struct sk_buff *skb;
2608 struct net_device *dev;
2609 struct virtio_net_hdr *vnet_hdr = NULL;
2610 struct sockcm_cookie sockc;
2611 __be16 proto;
2612 int err, reserve = 0;
2613 void *ph;
2614 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2615 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2616 int tp_len, size_max;
2617 unsigned char *addr;
2618 void *data;
2619 int len_sum = 0;
2620 int status = TP_STATUS_AVAILABLE;
2621 int hlen, tlen, copylen = 0;
2622
2623 mutex_lock(&po->pg_vec_lock);
2624
2625 if (likely(saddr == NULL)) {
2626 dev = packet_cached_dev_get(po);
2627 proto = po->num;
2628 addr = NULL;
2629 } else {
2630 err = -EINVAL;
2631 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2632 goto out;
2633 if (msg->msg_namelen < (saddr->sll_halen
2634 + offsetof(struct sockaddr_ll,
2635 sll_addr)))
2636 goto out;
2637 proto = saddr->sll_protocol;
2638 addr = saddr->sll_addr;
2639 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2640 }
2641
2642 sockc.tsflags = po->sk.sk_tsflags;
2643 if (msg->msg_controllen) {
2644 err = sock_cmsg_send(&po->sk, msg, &sockc);
2645 if (unlikely(err))
2646 goto out;
2647 }
2648
2649 err = -ENXIO;
2650 if (unlikely(dev == NULL))
2651 goto out;
2652 err = -ENETDOWN;
2653 if (unlikely(!(dev->flags & IFF_UP)))
2654 goto out_put;
2655
2656 if (po->sk.sk_socket->type == SOCK_RAW)
2657 reserve = dev->hard_header_len;
2658 size_max = po->tx_ring.frame_size
2659 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2660
2661 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2662 size_max = dev->mtu + reserve + VLAN_HLEN;
2663
2664 do {
2665 ph = packet_current_frame(po, &po->tx_ring,
2666 TP_STATUS_SEND_REQUEST);
2667 if (unlikely(ph == NULL)) {
2668 if (need_wait && need_resched())
2669 schedule();
2670 continue;
2671 }
2672
2673 skb = NULL;
2674 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2675 if (tp_len < 0)
2676 goto tpacket_error;
2677
2678 status = TP_STATUS_SEND_REQUEST;
2679 hlen = LL_RESERVED_SPACE(dev);
2680 tlen = dev->needed_tailroom;
2681 if (po->has_vnet_hdr) {
2682 vnet_hdr = data;
2683 data += sizeof(*vnet_hdr);
2684 tp_len -= sizeof(*vnet_hdr);
2685 if (tp_len < 0 ||
2686 __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2687 tp_len = -EINVAL;
2688 goto tpacket_error;
2689 }
2690 copylen = __virtio16_to_cpu(vio_le(),
2691 vnet_hdr->hdr_len);
2692 }
2693 copylen = max_t(int, copylen, dev->hard_header_len);
2694 skb = sock_alloc_send_skb(&po->sk,
2695 hlen + tlen + sizeof(struct sockaddr_ll) +
2696 (copylen - dev->hard_header_len),
2697 !need_wait, &err);
2698
2699 if (unlikely(skb == NULL)) {
2700 /* we assume the socket was initially writeable ... */
2701 if (likely(len_sum > 0))
2702 err = len_sum;
2703 goto out_status;
2704 }
2705 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2706 addr, hlen, copylen, &sockc);
2707 if (likely(tp_len >= 0) &&
2708 tp_len > dev->mtu + reserve &&
2709 !po->has_vnet_hdr &&
2710 !packet_extra_vlan_len_allowed(dev, skb))
2711 tp_len = -EMSGSIZE;
2712
2713 if (unlikely(tp_len < 0)) {
2714 tpacket_error:
2715 if (po->tp_loss) {
2716 __packet_set_status(po, ph,
2717 TP_STATUS_AVAILABLE);
2718 packet_increment_head(&po->tx_ring);
2719 kfree_skb(skb);
2720 continue;
2721 } else {
2722 status = TP_STATUS_WRONG_FORMAT;
2723 err = tp_len;
2724 goto out_status;
2725 }
2726 }
2727
2728 if (po->has_vnet_hdr && packet_snd_vnet_gso(skb, vnet_hdr)) {
2729 tp_len = -EINVAL;
2730 goto tpacket_error;
2731 }
2732
2733 packet_pick_tx_queue(dev, skb);
2734
2735 skb->destructor = tpacket_destruct_skb;
2736 __packet_set_status(po, ph, TP_STATUS_SENDING);
2737 packet_inc_pending(&po->tx_ring);
2738
2739 status = TP_STATUS_SEND_REQUEST;
2740 err = po->xmit(skb);
2741 if (unlikely(err > 0)) {
2742 err = net_xmit_errno(err);
2743 if (err && __packet_get_status(po, ph) ==
2744 TP_STATUS_AVAILABLE) {
2745 /* skb was destructed already */
2746 skb = NULL;
2747 goto out_status;
2748 }
2749 /*
2750 * skb was dropped but not destructed yet;
2751 * let's treat it like congestion or err < 0
2752 */
2753 err = 0;
2754 }
2755 packet_increment_head(&po->tx_ring);
2756 len_sum += tp_len;
2757 } while (likely((ph != NULL) ||
2758 /* Note: packet_read_pending() might be slow if we have
2759 * to call it as it's per_cpu variable, but in fast-path
2760 * we already short-circuit the loop with the first
2761 * condition, and luckily don't have to go that path
2762 * anyway.
2763 */
2764 (need_wait && packet_read_pending(&po->tx_ring))));
2765
2766 err = len_sum;
2767 goto out_put;
2768
2769 out_status:
2770 __packet_set_status(po, ph, status);
2771 kfree_skb(skb);
2772 out_put:
2773 dev_put(dev);
2774 out:
2775 mutex_unlock(&po->pg_vec_lock);
2776 return err;
2777 }
2778
2779 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2780 size_t reserve, size_t len,
2781 size_t linear, int noblock,
2782 int *err)
2783 {
2784 struct sk_buff *skb;
2785
2786 /* Under a page? Don't bother with paged skb. */
2787 if (prepad + len < PAGE_SIZE || !linear)
2788 linear = len;
2789
2790 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2791 err, 0);
2792 if (!skb)
2793 return NULL;
2794
2795 skb_reserve(skb, reserve);
2796 skb_put(skb, linear);
2797 skb->data_len = len - linear;
2798 skb->len += len - linear;
2799
2800 return skb;
2801 }
2802
2803 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2804 {
2805 struct sock *sk = sock->sk;
2806 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2807 struct sk_buff *skb;
2808 struct net_device *dev;
2809 __be16 proto;
2810 unsigned char *addr;
2811 int err, reserve = 0;
2812 struct sockcm_cookie sockc;
2813 struct virtio_net_hdr vnet_hdr = { 0 };
2814 int offset = 0;
2815 struct packet_sock *po = pkt_sk(sk);
2816 int hlen, tlen;
2817 int extra_len = 0;
2818
2819 /*
2820 * Get and verify the address.
2821 */
2822
2823 if (likely(saddr == NULL)) {
2824 dev = packet_cached_dev_get(po);
2825 proto = po->num;
2826 addr = NULL;
2827 } else {
2828 err = -EINVAL;
2829 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2830 goto out;
2831 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2832 goto out;
2833 proto = saddr->sll_protocol;
2834 addr = saddr->sll_addr;
2835 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2836 }
2837
2838 err = -ENXIO;
2839 if (unlikely(dev == NULL))
2840 goto out_unlock;
2841 err = -ENETDOWN;
2842 if (unlikely(!(dev->flags & IFF_UP)))
2843 goto out_unlock;
2844
2845 sockc.tsflags = sk->sk_tsflags;
2846 sockc.mark = sk->sk_mark;
2847 if (msg->msg_controllen) {
2848 err = sock_cmsg_send(sk, msg, &sockc);
2849 if (unlikely(err))
2850 goto out_unlock;
2851 }
2852
2853 if (sock->type == SOCK_RAW)
2854 reserve = dev->hard_header_len;
2855 if (po->has_vnet_hdr) {
2856 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2857 if (err)
2858 goto out_unlock;
2859 }
2860
2861 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2862 if (!netif_supports_nofcs(dev)) {
2863 err = -EPROTONOSUPPORT;
2864 goto out_unlock;
2865 }
2866 extra_len = 4; /* We're doing our own CRC */
2867 }
2868
2869 err = -EMSGSIZE;
2870 if (!vnet_hdr.gso_type &&
2871 (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2872 goto out_unlock;
2873
2874 err = -ENOBUFS;
2875 hlen = LL_RESERVED_SPACE(dev);
2876 tlen = dev->needed_tailroom;
2877 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
2878 __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
2879 msg->msg_flags & MSG_DONTWAIT, &err);
2880 if (skb == NULL)
2881 goto out_unlock;
2882
2883 skb_set_network_header(skb, reserve);
2884
2885 err = -EINVAL;
2886 if (sock->type == SOCK_DGRAM) {
2887 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2888 if (unlikely(offset < 0))
2889 goto out_free;
2890 }
2891
2892 /* Returns -EFAULT on error */
2893 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2894 if (err)
2895 goto out_free;
2896
2897 if (sock->type == SOCK_RAW &&
2898 !dev_validate_header(dev, skb->data, len)) {
2899 err = -EINVAL;
2900 goto out_free;
2901 }
2902
2903 sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
2904
2905 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2906 !packet_extra_vlan_len_allowed(dev, skb)) {
2907 err = -EMSGSIZE;
2908 goto out_free;
2909 }
2910
2911 skb->protocol = proto;
2912 skb->dev = dev;
2913 skb->priority = sk->sk_priority;
2914 skb->mark = sockc.mark;
2915
2916 packet_pick_tx_queue(dev, skb);
2917
2918 if (po->has_vnet_hdr) {
2919 err = packet_snd_vnet_gso(skb, &vnet_hdr);
2920 if (err)
2921 goto out_free;
2922 len += sizeof(vnet_hdr);
2923 }
2924
2925 skb_probe_transport_header(skb, reserve);
2926
2927 if (unlikely(extra_len == 4))
2928 skb->no_fcs = 1;
2929
2930 err = po->xmit(skb);
2931 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2932 goto out_unlock;
2933
2934 dev_put(dev);
2935
2936 return len;
2937
2938 out_free:
2939 kfree_skb(skb);
2940 out_unlock:
2941 if (dev)
2942 dev_put(dev);
2943 out:
2944 return err;
2945 }
2946
2947 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2948 {
2949 struct sock *sk = sock->sk;
2950 struct packet_sock *po = pkt_sk(sk);
2951
2952 if (po->tx_ring.pg_vec)
2953 return tpacket_snd(po, msg);
2954 else
2955 return packet_snd(sock, msg, len);
2956 }
2957
2958 /*
2959 * Close a PACKET socket. This is fairly simple. We immediately go
2960 * to 'closed' state and remove our protocol entry in the device list.
2961 */
2962
2963 static int packet_release(struct socket *sock)
2964 {
2965 struct sock *sk = sock->sk;
2966 struct packet_sock *po;
2967 struct net *net;
2968 union tpacket_req_u req_u;
2969
2970 if (!sk)
2971 return 0;
2972
2973 net = sock_net(sk);
2974 po = pkt_sk(sk);
2975
2976 mutex_lock(&net->packet.sklist_lock);
2977 sk_del_node_init_rcu(sk);
2978 mutex_unlock(&net->packet.sklist_lock);
2979
2980 preempt_disable();
2981 sock_prot_inuse_add(net, sk->sk_prot, -1);
2982 preempt_enable();
2983
2984 spin_lock(&po->bind_lock);
2985 unregister_prot_hook(sk, false);
2986 packet_cached_dev_reset(po);
2987
2988 if (po->prot_hook.dev) {
2989 dev_put(po->prot_hook.dev);
2990 po->prot_hook.dev = NULL;
2991 }
2992 spin_unlock(&po->bind_lock);
2993
2994 packet_flush_mclist(sk);
2995
2996 if (po->rx_ring.pg_vec) {
2997 memset(&req_u, 0, sizeof(req_u));
2998 packet_set_ring(sk, &req_u, 1, 0);
2999 }
3000
3001 if (po->tx_ring.pg_vec) {
3002 memset(&req_u, 0, sizeof(req_u));
3003 packet_set_ring(sk, &req_u, 1, 1);
3004 }
3005
3006 fanout_release(sk);
3007
3008 synchronize_net();
3009 /*
3010 * Now the socket is dead. No more input will appear.
3011 */
3012 sock_orphan(sk);
3013 sock->sk = NULL;
3014
3015 /* Purge queues */
3016
3017 skb_queue_purge(&sk->sk_receive_queue);
3018 packet_free_pending(po);
3019 sk_refcnt_debug_release(sk);
3020
3021 sock_put(sk);
3022 return 0;
3023 }
3024
3025 /*
3026 * Attach a packet hook.
3027 */
3028
3029 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3030 __be16 proto)
3031 {
3032 struct packet_sock *po = pkt_sk(sk);
3033 struct net_device *dev_curr;
3034 __be16 proto_curr;
3035 bool need_rehook;
3036 struct net_device *dev = NULL;
3037 int ret = 0;
3038 bool unlisted = false;
3039
3040 if (po->fanout)
3041 return -EINVAL;
3042
3043 lock_sock(sk);
3044 spin_lock(&po->bind_lock);
3045 rcu_read_lock();
3046
3047 if (name) {
3048 dev = dev_get_by_name_rcu(sock_net(sk), name);
3049 if (!dev) {
3050 ret = -ENODEV;
3051 goto out_unlock;
3052 }
3053 } else if (ifindex) {
3054 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3055 if (!dev) {
3056 ret = -ENODEV;
3057 goto out_unlock;
3058 }
3059 }
3060
3061 if (dev)
3062 dev_hold(dev);
3063
3064 proto_curr = po->prot_hook.type;
3065 dev_curr = po->prot_hook.dev;
3066
3067 need_rehook = proto_curr != proto || dev_curr != dev;
3068
3069 if (need_rehook) {
3070 if (po->running) {
3071 rcu_read_unlock();
3072 __unregister_prot_hook(sk, true);
3073 rcu_read_lock();
3074 dev_curr = po->prot_hook.dev;
3075 if (dev)
3076 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3077 dev->ifindex);
3078 }
3079
3080 po->num = proto;
3081 po->prot_hook.type = proto;
3082
3083 if (unlikely(unlisted)) {
3084 dev_put(dev);
3085 po->prot_hook.dev = NULL;
3086 po->ifindex = -1;
3087 packet_cached_dev_reset(po);
3088 } else {
3089 po->prot_hook.dev = dev;
3090 po->ifindex = dev ? dev->ifindex : 0;
3091 packet_cached_dev_assign(po, dev);
3092 }
3093 }
3094 if (dev_curr)
3095 dev_put(dev_curr);
3096
3097 if (proto == 0 || !need_rehook)
3098 goto out_unlock;
3099
3100 if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3101 register_prot_hook(sk);
3102 } else {
3103 sk->sk_err = ENETDOWN;
3104 if (!sock_flag(sk, SOCK_DEAD))
3105 sk->sk_error_report(sk);
3106 }
3107
3108 out_unlock:
3109 rcu_read_unlock();
3110 spin_unlock(&po->bind_lock);
3111 release_sock(sk);
3112 return ret;
3113 }
3114
3115 /*
3116 * Bind a packet socket to a device
3117 */
3118
3119 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3120 int addr_len)
3121 {
3122 struct sock *sk = sock->sk;
3123 char name[15];
3124
3125 /*
3126 * Check legality
3127 */
3128
3129 if (addr_len != sizeof(struct sockaddr))
3130 return -EINVAL;
3131 strlcpy(name, uaddr->sa_data, sizeof(name));
3132
3133 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3134 }
3135
3136 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3137 {
3138 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3139 struct sock *sk = sock->sk;
3140
3141 /*
3142 * Check legality
3143 */
3144
3145 if (addr_len < sizeof(struct sockaddr_ll))
3146 return -EINVAL;
3147 if (sll->sll_family != AF_PACKET)
3148 return -EINVAL;
3149
3150 return packet_do_bind(sk, NULL, sll->sll_ifindex,
3151 sll->sll_protocol ? : pkt_sk(sk)->num);
3152 }
3153
3154 static struct proto packet_proto = {
3155 .name = "PACKET",
3156 .owner = THIS_MODULE,
3157 .obj_size = sizeof(struct packet_sock),
3158 };
3159
3160 /*
3161 * Create a packet of type SOCK_PACKET.
3162 */
3163
3164 static int packet_create(struct net *net, struct socket *sock, int protocol,
3165 int kern)
3166 {
3167 struct sock *sk;
3168 struct packet_sock *po;
3169 __be16 proto = (__force __be16)protocol; /* weird, but documented */
3170 int err;
3171
3172 if (!ns_capable(net->user_ns, CAP_NET_RAW))
3173 return -EPERM;
3174 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3175 sock->type != SOCK_PACKET)
3176 return -ESOCKTNOSUPPORT;
3177
3178 sock->state = SS_UNCONNECTED;
3179
3180 err = -ENOBUFS;
3181 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3182 if (sk == NULL)
3183 goto out;
3184
3185 sock->ops = &packet_ops;
3186 if (sock->type == SOCK_PACKET)
3187 sock->ops = &packet_ops_spkt;
3188
3189 sock_init_data(sock, sk);
3190
3191 po = pkt_sk(sk);
3192 sk->sk_family = PF_PACKET;
3193 po->num = proto;
3194 po->xmit = dev_queue_xmit;
3195
3196 err = packet_alloc_pending(po);
3197 if (err)
3198 goto out2;
3199
3200 packet_cached_dev_reset(po);
3201
3202 sk->sk_destruct = packet_sock_destruct;
3203 sk_refcnt_debug_inc(sk);
3204
3205 /*
3206 * Attach a protocol block
3207 */
3208
3209 spin_lock_init(&po->bind_lock);
3210 mutex_init(&po->pg_vec_lock);
3211 po->rollover = NULL;
3212 po->prot_hook.func = packet_rcv;
3213
3214 if (sock->type == SOCK_PACKET)
3215 po->prot_hook.func = packet_rcv_spkt;
3216
3217 po->prot_hook.af_packet_priv = sk;
3218
3219 if (proto) {
3220 po->prot_hook.type = proto;
3221 register_prot_hook(sk);
3222 }
3223
3224 mutex_lock(&net->packet.sklist_lock);
3225 sk_add_node_rcu(sk, &net->packet.sklist);
3226 mutex_unlock(&net->packet.sklist_lock);
3227
3228 preempt_disable();
3229 sock_prot_inuse_add(net, &packet_proto, 1);
3230 preempt_enable();
3231
3232 return 0;
3233 out2:
3234 sk_free(sk);
3235 out:
3236 return err;
3237 }
3238
3239 /*
3240 * Pull a packet from our receive queue and hand it to the user.
3241 * If necessary we block.
3242 */
3243
3244 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3245 int flags)
3246 {
3247 struct sock *sk = sock->sk;
3248 struct sk_buff *skb;
3249 int copied, err;
3250 int vnet_hdr_len = 0;
3251 unsigned int origlen = 0;
3252
3253 err = -EINVAL;
3254 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3255 goto out;
3256
3257 #if 0
3258 /* What error should we return now? EUNATTACH? */
3259 if (pkt_sk(sk)->ifindex < 0)
3260 return -ENODEV;
3261 #endif
3262
3263 if (flags & MSG_ERRQUEUE) {
3264 err = sock_recv_errqueue(sk, msg, len,
3265 SOL_PACKET, PACKET_TX_TIMESTAMP);
3266 goto out;
3267 }
3268
3269 /*
3270 * Call the generic datagram receiver. This handles all sorts
3271 * of horrible races and re-entrancy so we can forget about it
3272 * in the protocol layers.
3273 *
3274 * Now it will return ENETDOWN, if device have just gone down,
3275 * but then it will block.
3276 */
3277
3278 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3279
3280 /*
3281 * An error occurred so return it. Because skb_recv_datagram()
3282 * handles the blocking we don't see and worry about blocking
3283 * retries.
3284 */
3285
3286 if (skb == NULL)
3287 goto out;
3288
3289 if (pkt_sk(sk)->pressure)
3290 packet_rcv_has_room(pkt_sk(sk), NULL);
3291
3292 if (pkt_sk(sk)->has_vnet_hdr) {
3293 err = packet_rcv_vnet(msg, skb, &len);
3294 if (err)
3295 goto out_free;
3296 vnet_hdr_len = sizeof(struct virtio_net_hdr);
3297 }
3298
3299 /* You lose any data beyond the buffer you gave. If it worries
3300 * a user program they can ask the device for its MTU
3301 * anyway.
3302 */
3303 copied = skb->len;
3304 if (copied > len) {
3305 copied = len;
3306 msg->msg_flags |= MSG_TRUNC;
3307 }
3308
3309 err = skb_copy_datagram_msg(skb, 0, msg, copied);
3310 if (err)
3311 goto out_free;
3312
3313 if (sock->type != SOCK_PACKET) {
3314 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3315
3316 /* Original length was stored in sockaddr_ll fields */
3317 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3318 sll->sll_family = AF_PACKET;
3319 sll->sll_protocol = skb->protocol;
3320 }
3321
3322 sock_recv_ts_and_drops(msg, sk, skb);
3323
3324 if (msg->msg_name) {
3325 /* If the address length field is there to be filled
3326 * in, we fill it in now.
3327 */
3328 if (sock->type == SOCK_PACKET) {
3329 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3330 msg->msg_namelen = sizeof(struct sockaddr_pkt);
3331 } else {
3332 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3333
3334 msg->msg_namelen = sll->sll_halen +
3335 offsetof(struct sockaddr_ll, sll_addr);
3336 }
3337 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3338 msg->msg_namelen);
3339 }
3340
3341 if (pkt_sk(sk)->auxdata) {
3342 struct tpacket_auxdata aux;
3343
3344 aux.tp_status = TP_STATUS_USER;
3345 if (skb->ip_summed == CHECKSUM_PARTIAL)
3346 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3347 else if (skb->pkt_type != PACKET_OUTGOING &&
3348 (skb->ip_summed == CHECKSUM_COMPLETE ||
3349 skb_csum_unnecessary(skb)))
3350 aux.tp_status |= TP_STATUS_CSUM_VALID;
3351
3352 aux.tp_len = origlen;
3353 aux.tp_snaplen = skb->len;
3354 aux.tp_mac = 0;
3355 aux.tp_net = skb_network_offset(skb);
3356 if (skb_vlan_tag_present(skb)) {
3357 aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3358 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3359 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3360 } else {
3361 aux.tp_vlan_tci = 0;
3362 aux.tp_vlan_tpid = 0;
3363 }
3364 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3365 }
3366
3367 /*
3368 * Free or return the buffer as appropriate. Again this
3369 * hides all the races and re-entrancy issues from us.
3370 */
3371 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3372
3373 out_free:
3374 skb_free_datagram(sk, skb);
3375 out:
3376 return err;
3377 }
3378
3379 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3380 int *uaddr_len, int peer)
3381 {
3382 struct net_device *dev;
3383 struct sock *sk = sock->sk;
3384
3385 if (peer)
3386 return -EOPNOTSUPP;
3387
3388 uaddr->sa_family = AF_PACKET;
3389 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3390 rcu_read_lock();
3391 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3392 if (dev)
3393 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3394 rcu_read_unlock();
3395 *uaddr_len = sizeof(*uaddr);
3396
3397 return 0;
3398 }
3399
3400 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3401 int *uaddr_len, int peer)
3402 {
3403 struct net_device *dev;
3404 struct sock *sk = sock->sk;
3405 struct packet_sock *po = pkt_sk(sk);
3406 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3407
3408 if (peer)
3409 return -EOPNOTSUPP;
3410
3411 sll->sll_family = AF_PACKET;
3412 sll->sll_ifindex = po->ifindex;
3413 sll->sll_protocol = po->num;
3414 sll->sll_pkttype = 0;
3415 rcu_read_lock();
3416 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3417 if (dev) {
3418 sll->sll_hatype = dev->type;
3419 sll->sll_halen = dev->addr_len;
3420 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3421 } else {
3422 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
3423 sll->sll_halen = 0;
3424 }
3425 rcu_read_unlock();
3426 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3427
3428 return 0;
3429 }
3430
3431 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3432 int what)
3433 {
3434 switch (i->type) {
3435 case PACKET_MR_MULTICAST:
3436 if (i->alen != dev->addr_len)
3437 return -EINVAL;
3438 if (what > 0)
3439 return dev_mc_add(dev, i->addr);
3440 else
3441 return dev_mc_del(dev, i->addr);
3442 break;
3443 case PACKET_MR_PROMISC:
3444 return dev_set_promiscuity(dev, what);
3445 case PACKET_MR_ALLMULTI:
3446 return dev_set_allmulti(dev, what);
3447 case PACKET_MR_UNICAST:
3448 if (i->alen != dev->addr_len)
3449 return -EINVAL;
3450 if (what > 0)
3451 return dev_uc_add(dev, i->addr);
3452 else
3453 return dev_uc_del(dev, i->addr);
3454 break;
3455 default:
3456 break;
3457 }
3458 return 0;
3459 }
3460
3461 static void packet_dev_mclist_delete(struct net_device *dev,
3462 struct packet_mclist **mlp)
3463 {
3464 struct packet_mclist *ml;
3465
3466 while ((ml = *mlp) != NULL) {
3467 if (ml->ifindex == dev->ifindex) {
3468 packet_dev_mc(dev, ml, -1);
3469 *mlp = ml->next;
3470 kfree(ml);
3471 } else
3472 mlp = &ml->next;
3473 }
3474 }
3475
3476 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3477 {
3478 struct packet_sock *po = pkt_sk(sk);
3479 struct packet_mclist *ml, *i;
3480 struct net_device *dev;
3481 int err;
3482
3483 rtnl_lock();
3484
3485 err = -ENODEV;
3486 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3487 if (!dev)
3488 goto done;
3489
3490 err = -EINVAL;
3491 if (mreq->mr_alen > dev->addr_len)
3492 goto done;
3493
3494 err = -ENOBUFS;
3495 i = kmalloc(sizeof(*i), GFP_KERNEL);
3496 if (i == NULL)
3497 goto done;
3498
3499 err = 0;
3500 for (ml = po->mclist; ml; ml = ml->next) {
3501 if (ml->ifindex == mreq->mr_ifindex &&
3502 ml->type == mreq->mr_type &&
3503 ml->alen == mreq->mr_alen &&
3504 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3505 ml->count++;
3506 /* Free the new element ... */
3507 kfree(i);
3508 goto done;
3509 }
3510 }
3511
3512 i->type = mreq->mr_type;
3513 i->ifindex = mreq->mr_ifindex;
3514 i->alen = mreq->mr_alen;
3515 memcpy(i->addr, mreq->mr_address, i->alen);
3516 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3517 i->count = 1;
3518 i->next = po->mclist;
3519 po->mclist = i;
3520 err = packet_dev_mc(dev, i, 1);
3521 if (err) {
3522 po->mclist = i->next;
3523 kfree(i);
3524 }
3525
3526 done:
3527 rtnl_unlock();
3528 return err;
3529 }
3530
3531 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3532 {
3533 struct packet_mclist *ml, **mlp;
3534
3535 rtnl_lock();
3536
3537 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3538 if (ml->ifindex == mreq->mr_ifindex &&
3539 ml->type == mreq->mr_type &&
3540 ml->alen == mreq->mr_alen &&
3541 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3542 if (--ml->count == 0) {
3543 struct net_device *dev;
3544 *mlp = ml->next;
3545 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3546 if (dev)
3547 packet_dev_mc(dev, ml, -1);
3548 kfree(ml);
3549 }
3550 break;
3551 }
3552 }
3553 rtnl_unlock();
3554 return 0;
3555 }
3556
3557 static void packet_flush_mclist(struct sock *sk)
3558 {
3559 struct packet_sock *po = pkt_sk(sk);
3560 struct packet_mclist *ml;
3561
3562 if (!po->mclist)
3563 return;
3564
3565 rtnl_lock();
3566 while ((ml = po->mclist) != NULL) {
3567 struct net_device *dev;
3568
3569 po->mclist = ml->next;
3570 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3571 if (dev != NULL)
3572 packet_dev_mc(dev, ml, -1);
3573 kfree(ml);
3574 }
3575 rtnl_unlock();
3576 }
3577
3578 static int
3579 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3580 {
3581 struct sock *sk = sock->sk;
3582 struct packet_sock *po = pkt_sk(sk);
3583 int ret;
3584
3585 if (level != SOL_PACKET)
3586 return -ENOPROTOOPT;
3587
3588 switch (optname) {
3589 case PACKET_ADD_MEMBERSHIP:
3590 case PACKET_DROP_MEMBERSHIP:
3591 {
3592 struct packet_mreq_max mreq;
3593 int len = optlen;
3594 memset(&mreq, 0, sizeof(mreq));
3595 if (len < sizeof(struct packet_mreq))
3596 return -EINVAL;
3597 if (len > sizeof(mreq))
3598 len = sizeof(mreq);
3599 if (copy_from_user(&mreq, optval, len))
3600 return -EFAULT;
3601 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3602 return -EINVAL;
3603 if (optname == PACKET_ADD_MEMBERSHIP)
3604 ret = packet_mc_add(sk, &mreq);
3605 else
3606 ret = packet_mc_drop(sk, &mreq);
3607 return ret;
3608 }
3609
3610 case PACKET_RX_RING:
3611 case PACKET_TX_RING:
3612 {
3613 union tpacket_req_u req_u;
3614 int len;
3615
3616 switch (po->tp_version) {
3617 case TPACKET_V1:
3618 case TPACKET_V2:
3619 len = sizeof(req_u.req);
3620 break;
3621 case TPACKET_V3:
3622 default:
3623 len = sizeof(req_u.req3);
3624 break;
3625 }
3626 if (optlen < len)
3627 return -EINVAL;
3628 if (copy_from_user(&req_u.req, optval, len))
3629 return -EFAULT;
3630 return packet_set_ring(sk, &req_u, 0,
3631 optname == PACKET_TX_RING);
3632 }
3633 case PACKET_COPY_THRESH:
3634 {
3635 int val;
3636
3637 if (optlen != sizeof(val))
3638 return -EINVAL;
3639 if (copy_from_user(&val, optval, sizeof(val)))
3640 return -EFAULT;
3641
3642 pkt_sk(sk)->copy_thresh = val;
3643 return 0;
3644 }
3645 case PACKET_VERSION:
3646 {
3647 int val;
3648
3649 if (optlen != sizeof(val))
3650 return -EINVAL;
3651 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3652 return -EBUSY;
3653 if (copy_from_user(&val, optval, sizeof(val)))
3654 return -EFAULT;
3655 switch (val) {
3656 case TPACKET_V1:
3657 case TPACKET_V2:
3658 case TPACKET_V3:
3659 po->tp_version = val;
3660 return 0;
3661 default:
3662 return -EINVAL;
3663 }
3664 }
3665 case PACKET_RESERVE:
3666 {
3667 unsigned int val;
3668
3669 if (optlen != sizeof(val))
3670 return -EINVAL;
3671 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3672 return -EBUSY;
3673 if (copy_from_user(&val, optval, sizeof(val)))
3674 return -EFAULT;
3675 po->tp_reserve = val;
3676 return 0;
3677 }
3678 case PACKET_LOSS:
3679 {
3680 unsigned int val;
3681
3682 if (optlen != sizeof(val))
3683 return -EINVAL;
3684 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3685 return -EBUSY;
3686 if (copy_from_user(&val, optval, sizeof(val)))
3687 return -EFAULT;
3688 po->tp_loss = !!val;
3689 return 0;
3690 }
3691 case PACKET_AUXDATA:
3692 {
3693 int val;
3694
3695 if (optlen < sizeof(val))
3696 return -EINVAL;
3697 if (copy_from_user(&val, optval, sizeof(val)))
3698 return -EFAULT;
3699
3700 po->auxdata = !!val;
3701 return 0;
3702 }
3703 case PACKET_ORIGDEV:
3704 {
3705 int val;
3706
3707 if (optlen < sizeof(val))
3708 return -EINVAL;
3709 if (copy_from_user(&val, optval, sizeof(val)))
3710 return -EFAULT;
3711
3712 po->origdev = !!val;
3713 return 0;
3714 }
3715 case PACKET_VNET_HDR:
3716 {
3717 int val;
3718
3719 if (sock->type != SOCK_RAW)
3720 return -EINVAL;
3721 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3722 return -EBUSY;
3723 if (optlen < sizeof(val))
3724 return -EINVAL;
3725 if (copy_from_user(&val, optval, sizeof(val)))
3726 return -EFAULT;
3727
3728 po->has_vnet_hdr = !!val;
3729 return 0;
3730 }
3731 case PACKET_TIMESTAMP:
3732 {
3733 int val;
3734
3735 if (optlen != sizeof(val))
3736 return -EINVAL;
3737 if (copy_from_user(&val, optval, sizeof(val)))
3738 return -EFAULT;
3739
3740 po->tp_tstamp = val;
3741 return 0;
3742 }
3743 case PACKET_FANOUT:
3744 {
3745 int val;
3746
3747 if (optlen != sizeof(val))
3748 return -EINVAL;
3749 if (copy_from_user(&val, optval, sizeof(val)))
3750 return -EFAULT;
3751
3752 return fanout_add(sk, val & 0xffff, val >> 16);
3753 }
3754 case PACKET_FANOUT_DATA:
3755 {
3756 if (!po->fanout)
3757 return -EINVAL;
3758
3759 return fanout_set_data(po, optval, optlen);
3760 }
3761 case PACKET_TX_HAS_OFF:
3762 {
3763 unsigned int val;
3764
3765 if (optlen != sizeof(val))
3766 return -EINVAL;
3767 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3768 return -EBUSY;
3769 if (copy_from_user(&val, optval, sizeof(val)))
3770 return -EFAULT;
3771 po->tp_tx_has_off = !!val;
3772 return 0;
3773 }
3774 case PACKET_QDISC_BYPASS:
3775 {
3776 int val;
3777
3778 if (optlen != sizeof(val))
3779 return -EINVAL;
3780 if (copy_from_user(&val, optval, sizeof(val)))
3781 return -EFAULT;
3782
3783 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3784 return 0;
3785 }
3786 default:
3787 return -ENOPROTOOPT;
3788 }
3789 }
3790
3791 static int packet_getsockopt(struct socket *sock, int level, int optname,
3792 char __user *optval, int __user *optlen)
3793 {
3794 int len;
3795 int val, lv = sizeof(val);
3796 struct sock *sk = sock->sk;
3797 struct packet_sock *po = pkt_sk(sk);
3798 void *data = &val;
3799 union tpacket_stats_u st;
3800 struct tpacket_rollover_stats rstats;
3801
3802 if (level != SOL_PACKET)
3803 return -ENOPROTOOPT;
3804
3805 if (get_user(len, optlen))
3806 return -EFAULT;
3807
3808 if (len < 0)
3809 return -EINVAL;
3810
3811 switch (optname) {
3812 case PACKET_STATISTICS:
3813 spin_lock_bh(&sk->sk_receive_queue.lock);
3814 memcpy(&st, &po->stats, sizeof(st));
3815 memset(&po->stats, 0, sizeof(po->stats));
3816 spin_unlock_bh(&sk->sk_receive_queue.lock);
3817
3818 if (po->tp_version == TPACKET_V3) {
3819 lv = sizeof(struct tpacket_stats_v3);
3820 st.stats3.tp_packets += st.stats3.tp_drops;
3821 data = &st.stats3;
3822 } else {
3823 lv = sizeof(struct tpacket_stats);
3824 st.stats1.tp_packets += st.stats1.tp_drops;
3825 data = &st.stats1;
3826 }
3827
3828 break;
3829 case PACKET_AUXDATA:
3830 val = po->auxdata;
3831 break;
3832 case PACKET_ORIGDEV:
3833 val = po->origdev;
3834 break;
3835 case PACKET_VNET_HDR:
3836 val = po->has_vnet_hdr;
3837 break;
3838 case PACKET_VERSION:
3839 val = po->tp_version;
3840 break;
3841 case PACKET_HDRLEN:
3842 if (len > sizeof(int))
3843 len = sizeof(int);
3844 if (copy_from_user(&val, optval, len))
3845 return -EFAULT;
3846 switch (val) {
3847 case TPACKET_V1:
3848 val = sizeof(struct tpacket_hdr);
3849 break;
3850 case TPACKET_V2:
3851 val = sizeof(struct tpacket2_hdr);
3852 break;
3853 case TPACKET_V3:
3854 val = sizeof(struct tpacket3_hdr);
3855 break;
3856 default:
3857 return -EINVAL;
3858 }
3859 break;
3860 case PACKET_RESERVE:
3861 val = po->tp_reserve;
3862 break;
3863 case PACKET_LOSS:
3864 val = po->tp_loss;
3865 break;
3866 case PACKET_TIMESTAMP:
3867 val = po->tp_tstamp;
3868 break;
3869 case PACKET_FANOUT:
3870 val = (po->fanout ?
3871 ((u32)po->fanout->id |
3872 ((u32)po->fanout->type << 16) |
3873 ((u32)po->fanout->flags << 24)) :
3874 0);
3875 break;
3876 case PACKET_ROLLOVER_STATS:
3877 if (!po->rollover)
3878 return -EINVAL;
3879 rstats.tp_all = atomic_long_read(&po->rollover->num);
3880 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
3881 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
3882 data = &rstats;
3883 lv = sizeof(rstats);
3884 break;
3885 case PACKET_TX_HAS_OFF:
3886 val = po->tp_tx_has_off;
3887 break;
3888 case PACKET_QDISC_BYPASS:
3889 val = packet_use_direct_xmit(po);
3890 break;
3891 default:
3892 return -ENOPROTOOPT;
3893 }
3894
3895 if (len > lv)
3896 len = lv;
3897 if (put_user(len, optlen))
3898 return -EFAULT;
3899 if (copy_to_user(optval, data, len))
3900 return -EFAULT;
3901 return 0;
3902 }
3903
3904
3905 #ifdef CONFIG_COMPAT
3906 static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
3907 char __user *optval, unsigned int optlen)
3908 {
3909 struct packet_sock *po = pkt_sk(sock->sk);
3910
3911 if (level != SOL_PACKET)
3912 return -ENOPROTOOPT;
3913
3914 if (optname == PACKET_FANOUT_DATA &&
3915 po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
3916 optval = (char __user *)get_compat_bpf_fprog(optval);
3917 if (!optval)
3918 return -EFAULT;
3919 optlen = sizeof(struct sock_fprog);
3920 }
3921
3922 return packet_setsockopt(sock, level, optname, optval, optlen);
3923 }
3924 #endif
3925
3926 static int packet_notifier(struct notifier_block *this,
3927 unsigned long msg, void *ptr)
3928 {
3929 struct sock *sk;
3930 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3931 struct net *net = dev_net(dev);
3932
3933 rcu_read_lock();
3934 sk_for_each_rcu(sk, &net->packet.sklist) {
3935 struct packet_sock *po = pkt_sk(sk);
3936
3937 switch (msg) {
3938 case NETDEV_UNREGISTER:
3939 if (po->mclist)
3940 packet_dev_mclist_delete(dev, &po->mclist);
3941 /* fallthrough */
3942
3943 case NETDEV_DOWN:
3944 if (dev->ifindex == po->ifindex) {
3945 spin_lock(&po->bind_lock);
3946 if (po->running) {
3947 __unregister_prot_hook(sk, false);
3948 sk->sk_err = ENETDOWN;
3949 if (!sock_flag(sk, SOCK_DEAD))
3950 sk->sk_error_report(sk);
3951 }
3952 if (msg == NETDEV_UNREGISTER) {
3953 packet_cached_dev_reset(po);
3954 fanout_release(sk);
3955 po->ifindex = -1;
3956 if (po->prot_hook.dev)
3957 dev_put(po->prot_hook.dev);
3958 po->prot_hook.dev = NULL;
3959 }
3960 spin_unlock(&po->bind_lock);
3961 }
3962 break;
3963 case NETDEV_UP:
3964 if (dev->ifindex == po->ifindex) {
3965 spin_lock(&po->bind_lock);
3966 if (po->num)
3967 register_prot_hook(sk);
3968 spin_unlock(&po->bind_lock);
3969 }
3970 break;
3971 }
3972 }
3973 rcu_read_unlock();
3974 return NOTIFY_DONE;
3975 }
3976
3977
3978 static int packet_ioctl(struct socket *sock, unsigned int cmd,
3979 unsigned long arg)
3980 {
3981 struct sock *sk = sock->sk;
3982
3983 switch (cmd) {
3984 case SIOCOUTQ:
3985 {
3986 int amount = sk_wmem_alloc_get(sk);
3987
3988 return put_user(amount, (int __user *)arg);
3989 }
3990 case SIOCINQ:
3991 {
3992 struct sk_buff *skb;
3993 int amount = 0;
3994
3995 spin_lock_bh(&sk->sk_receive_queue.lock);
3996 skb = skb_peek(&sk->sk_receive_queue);
3997 if (skb)
3998 amount = skb->len;
3999 spin_unlock_bh(&sk->sk_receive_queue.lock);
4000 return put_user(amount, (int __user *)arg);
4001 }
4002 case SIOCGSTAMP:
4003 return sock_get_timestamp(sk, (struct timeval __user *)arg);
4004 case SIOCGSTAMPNS:
4005 return sock_get_timestampns(sk, (struct timespec __user *)arg);
4006
4007 #ifdef CONFIG_INET
4008 case SIOCADDRT:
4009 case SIOCDELRT:
4010 case SIOCDARP:
4011 case SIOCGARP:
4012 case SIOCSARP:
4013 case SIOCGIFADDR:
4014 case SIOCSIFADDR:
4015 case SIOCGIFBRDADDR:
4016 case SIOCSIFBRDADDR:
4017 case SIOCGIFNETMASK:
4018 case SIOCSIFNETMASK:
4019 case SIOCGIFDSTADDR:
4020 case SIOCSIFDSTADDR:
4021 case SIOCSIFFLAGS:
4022 return inet_dgram_ops.ioctl(sock, cmd, arg);
4023 #endif
4024
4025 default:
4026 return -ENOIOCTLCMD;
4027 }
4028 return 0;
4029 }
4030
4031 static unsigned int packet_poll(struct file *file, struct socket *sock,
4032 poll_table *wait)
4033 {
4034 struct sock *sk = sock->sk;
4035 struct packet_sock *po = pkt_sk(sk);
4036 unsigned int mask = datagram_poll(file, sock, wait);
4037
4038 spin_lock_bh(&sk->sk_receive_queue.lock);
4039 if (po->rx_ring.pg_vec) {
4040 if (!packet_previous_rx_frame(po, &po->rx_ring,
4041 TP_STATUS_KERNEL))
4042 mask |= POLLIN | POLLRDNORM;
4043 }
4044 if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
4045 po->pressure = 0;
4046 spin_unlock_bh(&sk->sk_receive_queue.lock);
4047 spin_lock_bh(&sk->sk_write_queue.lock);
4048 if (po->tx_ring.pg_vec) {
4049 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4050 mask |= POLLOUT | POLLWRNORM;
4051 }
4052 spin_unlock_bh(&sk->sk_write_queue.lock);
4053 return mask;
4054 }
4055
4056
4057 /* Dirty? Well, I still did not learn better way to account
4058 * for user mmaps.
4059 */
4060
4061 static void packet_mm_open(struct vm_area_struct *vma)
4062 {
4063 struct file *file = vma->vm_file;
4064 struct socket *sock = file->private_data;
4065 struct sock *sk = sock->sk;
4066
4067 if (sk)
4068 atomic_inc(&pkt_sk(sk)->mapped);
4069 }
4070
4071 static void packet_mm_close(struct vm_area_struct *vma)
4072 {
4073 struct file *file = vma->vm_file;
4074 struct socket *sock = file->private_data;
4075 struct sock *sk = sock->sk;
4076
4077 if (sk)
4078 atomic_dec(&pkt_sk(sk)->mapped);
4079 }
4080
4081 static const struct vm_operations_struct packet_mmap_ops = {
4082 .open = packet_mm_open,
4083 .close = packet_mm_close,
4084 };
4085
4086 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4087 unsigned int len)
4088 {
4089 int i;
4090
4091 for (i = 0; i < len; i++) {
4092 if (likely(pg_vec[i].buffer)) {
4093 if (is_vmalloc_addr(pg_vec[i].buffer))
4094 vfree(pg_vec[i].buffer);
4095 else
4096 free_pages((unsigned long)pg_vec[i].buffer,
4097 order);
4098 pg_vec[i].buffer = NULL;
4099 }
4100 }
4101 kfree(pg_vec);
4102 }
4103
4104 static char *alloc_one_pg_vec_page(unsigned long order)
4105 {
4106 char *buffer;
4107 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4108 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4109
4110 buffer = (char *) __get_free_pages(gfp_flags, order);
4111 if (buffer)
4112 return buffer;
4113
4114 /* __get_free_pages failed, fall back to vmalloc */
4115 buffer = vzalloc((1 << order) * PAGE_SIZE);
4116 if (buffer)
4117 return buffer;
4118
4119 /* vmalloc failed, lets dig into swap here */
4120 gfp_flags &= ~__GFP_NORETRY;
4121 buffer = (char *) __get_free_pages(gfp_flags, order);
4122 if (buffer)
4123 return buffer;
4124
4125 /* complete and utter failure */
4126 return NULL;
4127 }
4128
4129 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4130 {
4131 unsigned int block_nr = req->tp_block_nr;
4132 struct pgv *pg_vec;
4133 int i;
4134
4135 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4136 if (unlikely(!pg_vec))
4137 goto out;
4138
4139 for (i = 0; i < block_nr; i++) {
4140 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4141 if (unlikely(!pg_vec[i].buffer))
4142 goto out_free_pgvec;
4143 }
4144
4145 out:
4146 return pg_vec;
4147
4148 out_free_pgvec:
4149 free_pg_vec(pg_vec, order, block_nr);
4150 pg_vec = NULL;
4151 goto out;
4152 }
4153
4154 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4155 int closing, int tx_ring)
4156 {
4157 struct pgv *pg_vec = NULL;
4158 struct packet_sock *po = pkt_sk(sk);
4159 int was_running, order = 0;
4160 struct packet_ring_buffer *rb;
4161 struct sk_buff_head *rb_queue;
4162 __be16 num;
4163 int err = -EINVAL;
4164 /* Added to avoid minimal code churn */
4165 struct tpacket_req *req = &req_u->req;
4166
4167 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
4168 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
4169 net_warn_ratelimited("Tx-ring is not supported.\n");
4170 goto out;
4171 }
4172
4173 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4174 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4175
4176 err = -EBUSY;
4177 if (!closing) {
4178 if (atomic_read(&po->mapped))
4179 goto out;
4180 if (packet_read_pending(rb))
4181 goto out;
4182 }
4183
4184 if (req->tp_block_nr) {
4185 /* Sanity tests and some calculations */
4186 err = -EBUSY;
4187 if (unlikely(rb->pg_vec))
4188 goto out;
4189
4190 switch (po->tp_version) {
4191 case TPACKET_V1:
4192 po->tp_hdrlen = TPACKET_HDRLEN;
4193 break;
4194 case TPACKET_V2:
4195 po->tp_hdrlen = TPACKET2_HDRLEN;
4196 break;
4197 case TPACKET_V3:
4198 po->tp_hdrlen = TPACKET3_HDRLEN;
4199 break;
4200 }
4201
4202 err = -EINVAL;
4203 if (unlikely((int)req->tp_block_size <= 0))
4204 goto out;
4205 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4206 goto out;
4207 if (po->tp_version >= TPACKET_V3 &&
4208 (int)(req->tp_block_size -
4209 BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
4210 goto out;
4211 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
4212 po->tp_reserve))
4213 goto out;
4214 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4215 goto out;
4216
4217 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4218 if (unlikely(rb->frames_per_block == 0))
4219 goto out;
4220 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4221 req->tp_frame_nr))
4222 goto out;
4223
4224 err = -ENOMEM;
4225 order = get_order(req->tp_block_size);
4226 pg_vec = alloc_pg_vec(req, order);
4227 if (unlikely(!pg_vec))
4228 goto out;
4229 switch (po->tp_version) {
4230 case TPACKET_V3:
4231 /* Transmit path is not supported. We checked
4232 * it above but just being paranoid
4233 */
4234 if (!tx_ring)
4235 init_prb_bdqc(po, rb, pg_vec, req_u);
4236 break;
4237 default:
4238 break;
4239 }
4240 }
4241 /* Done */
4242 else {
4243 err = -EINVAL;
4244 if (unlikely(req->tp_frame_nr))
4245 goto out;
4246 }
4247
4248 lock_sock(sk);
4249
4250 /* Detach socket from network */
4251 spin_lock(&po->bind_lock);
4252 was_running = po->running;
4253 num = po->num;
4254 if (was_running) {
4255 po->num = 0;
4256 __unregister_prot_hook(sk, false);
4257 }
4258 spin_unlock(&po->bind_lock);
4259
4260 synchronize_net();
4261
4262 err = -EBUSY;
4263 mutex_lock(&po->pg_vec_lock);
4264 if (closing || atomic_read(&po->mapped) == 0) {
4265 err = 0;
4266 spin_lock_bh(&rb_queue->lock);
4267 swap(rb->pg_vec, pg_vec);
4268 rb->frame_max = (req->tp_frame_nr - 1);
4269 rb->head = 0;
4270 rb->frame_size = req->tp_frame_size;
4271 spin_unlock_bh(&rb_queue->lock);
4272
4273 swap(rb->pg_vec_order, order);
4274 swap(rb->pg_vec_len, req->tp_block_nr);
4275
4276 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4277 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4278 tpacket_rcv : packet_rcv;
4279 skb_queue_purge(rb_queue);
4280 if (atomic_read(&po->mapped))
4281 pr_err("packet_mmap: vma is busy: %d\n",
4282 atomic_read(&po->mapped));
4283 }
4284 mutex_unlock(&po->pg_vec_lock);
4285
4286 spin_lock(&po->bind_lock);
4287 if (was_running) {
4288 po->num = num;
4289 register_prot_hook(sk);
4290 }
4291 spin_unlock(&po->bind_lock);
4292 if (closing && (po->tp_version > TPACKET_V2)) {
4293 /* Because we don't support block-based V3 on tx-ring */
4294 if (!tx_ring)
4295 prb_shutdown_retire_blk_timer(po, rb_queue);
4296 }
4297 release_sock(sk);
4298
4299 if (pg_vec)
4300 free_pg_vec(pg_vec, order, req->tp_block_nr);
4301 out:
4302 return err;
4303 }
4304
4305 static int packet_mmap(struct file *file, struct socket *sock,
4306 struct vm_area_struct *vma)
4307 {
4308 struct sock *sk = sock->sk;
4309 struct packet_sock *po = pkt_sk(sk);
4310 unsigned long size, expected_size;
4311 struct packet_ring_buffer *rb;
4312 unsigned long start;
4313 int err = -EINVAL;
4314 int i;
4315
4316 if (vma->vm_pgoff)
4317 return -EINVAL;
4318
4319 mutex_lock(&po->pg_vec_lock);
4320
4321 expected_size = 0;
4322 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4323 if (rb->pg_vec) {
4324 expected_size += rb->pg_vec_len
4325 * rb->pg_vec_pages
4326 * PAGE_SIZE;
4327 }
4328 }
4329
4330 if (expected_size == 0)
4331 goto out;
4332
4333 size = vma->vm_end - vma->vm_start;
4334 if (size != expected_size)
4335 goto out;
4336
4337 start = vma->vm_start;
4338 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4339 if (rb->pg_vec == NULL)
4340 continue;
4341
4342 for (i = 0; i < rb->pg_vec_len; i++) {
4343 struct page *page;
4344 void *kaddr = rb->pg_vec[i].buffer;
4345 int pg_num;
4346
4347 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4348 page = pgv_to_page(kaddr);
4349 err = vm_insert_page(vma, start, page);
4350 if (unlikely(err))
4351 goto out;
4352 start += PAGE_SIZE;
4353 kaddr += PAGE_SIZE;
4354 }
4355 }
4356 }
4357
4358 atomic_inc(&po->mapped);
4359 vma->vm_ops = &packet_mmap_ops;
4360 err = 0;
4361
4362 out:
4363 mutex_unlock(&po->pg_vec_lock);
4364 return err;
4365 }
4366
4367 static const struct proto_ops packet_ops_spkt = {
4368 .family = PF_PACKET,
4369 .owner = THIS_MODULE,
4370 .release = packet_release,
4371 .bind = packet_bind_spkt,
4372 .connect = sock_no_connect,
4373 .socketpair = sock_no_socketpair,
4374 .accept = sock_no_accept,
4375 .getname = packet_getname_spkt,
4376 .poll = datagram_poll,
4377 .ioctl = packet_ioctl,
4378 .listen = sock_no_listen,
4379 .shutdown = sock_no_shutdown,
4380 .setsockopt = sock_no_setsockopt,
4381 .getsockopt = sock_no_getsockopt,
4382 .sendmsg = packet_sendmsg_spkt,
4383 .recvmsg = packet_recvmsg,
4384 .mmap = sock_no_mmap,
4385 .sendpage = sock_no_sendpage,
4386 };
4387
4388 static const struct proto_ops packet_ops = {
4389 .family = PF_PACKET,
4390 .owner = THIS_MODULE,
4391 .release = packet_release,
4392 .bind = packet_bind,
4393 .connect = sock_no_connect,
4394 .socketpair = sock_no_socketpair,
4395 .accept = sock_no_accept,
4396 .getname = packet_getname,
4397 .poll = packet_poll,
4398 .ioctl = packet_ioctl,
4399 .listen = sock_no_listen,
4400 .shutdown = sock_no_shutdown,
4401 .setsockopt = packet_setsockopt,
4402 .getsockopt = packet_getsockopt,
4403 #ifdef CONFIG_COMPAT
4404 .compat_setsockopt = compat_packet_setsockopt,
4405 #endif
4406 .sendmsg = packet_sendmsg,
4407 .recvmsg = packet_recvmsg,
4408 .mmap = packet_mmap,
4409 .sendpage = sock_no_sendpage,
4410 };
4411
4412 static const struct net_proto_family packet_family_ops = {
4413 .family = PF_PACKET,
4414 .create = packet_create,
4415 .owner = THIS_MODULE,
4416 };
4417
4418 static struct notifier_block packet_netdev_notifier = {
4419 .notifier_call = packet_notifier,
4420 };
4421
4422 #ifdef CONFIG_PROC_FS
4423
4424 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4425 __acquires(RCU)
4426 {
4427 struct net *net = seq_file_net(seq);
4428
4429 rcu_read_lock();
4430 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4431 }
4432
4433 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4434 {
4435 struct net *net = seq_file_net(seq);
4436 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4437 }
4438
4439 static void packet_seq_stop(struct seq_file *seq, void *v)
4440 __releases(RCU)
4441 {
4442 rcu_read_unlock();
4443 }
4444
4445 static int packet_seq_show(struct seq_file *seq, void *v)
4446 {
4447 if (v == SEQ_START_TOKEN)
4448 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
4449 else {
4450 struct sock *s = sk_entry(v);
4451 const struct packet_sock *po = pkt_sk(s);
4452
4453 seq_printf(seq,
4454 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
4455 s,
4456 atomic_read(&s->sk_refcnt),
4457 s->sk_type,
4458 ntohs(po->num),
4459 po->ifindex,
4460 po->running,
4461 atomic_read(&s->sk_rmem_alloc),
4462 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4463 sock_i_ino(s));
4464 }
4465
4466 return 0;
4467 }
4468
4469 static const struct seq_operations packet_seq_ops = {
4470 .start = packet_seq_start,
4471 .next = packet_seq_next,
4472 .stop = packet_seq_stop,
4473 .show = packet_seq_show,
4474 };
4475
4476 static int packet_seq_open(struct inode *inode, struct file *file)
4477 {
4478 return seq_open_net(inode, file, &packet_seq_ops,
4479 sizeof(struct seq_net_private));
4480 }
4481
4482 static const struct file_operations packet_seq_fops = {
4483 .owner = THIS_MODULE,
4484 .open = packet_seq_open,
4485 .read = seq_read,
4486 .llseek = seq_lseek,
4487 .release = seq_release_net,
4488 };
4489
4490 #endif
4491
4492 static int __net_init packet_net_init(struct net *net)
4493 {
4494 mutex_init(&net->packet.sklist_lock);
4495 INIT_HLIST_HEAD(&net->packet.sklist);
4496
4497 if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
4498 return -ENOMEM;
4499
4500 return 0;
4501 }
4502
4503 static void __net_exit packet_net_exit(struct net *net)
4504 {
4505 remove_proc_entry("packet", net->proc_net);
4506 }
4507
4508 static struct pernet_operations packet_net_ops = {
4509 .init = packet_net_init,
4510 .exit = packet_net_exit,
4511 };
4512
4513
4514 static void __exit packet_exit(void)
4515 {
4516 unregister_netdevice_notifier(&packet_netdev_notifier);
4517 unregister_pernet_subsys(&packet_net_ops);
4518 sock_unregister(PF_PACKET);
4519 proto_unregister(&packet_proto);
4520 }
4521
4522 static int __init packet_init(void)
4523 {
4524 int rc = proto_register(&packet_proto, 0);
4525
4526 if (rc != 0)
4527 goto out;
4528
4529 sock_register(&packet_family_ops);
4530 register_pernet_subsys(&packet_net_ops);
4531 register_netdevice_notifier(&packet_netdev_notifier);
4532 out:
4533 return rc;
4534 }
4535
4536 module_init(packet_init);
4537 module_exit(packet_exit);
4538 MODULE_LICENSE("GPL");
4539 MODULE_ALIAS_NETPROTO(PF_PACKET);