]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * PACKET - implements raw packet sockets. | |
7 | * | |
02c30a84 | 8 | * Authors: Ross Biro |
1da177e4 LT |
9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
10 | * Alan Cox, <gw4pts@gw4pts.ampr.org> | |
11 | * | |
1ce4f28b | 12 | * Fixes: |
1da177e4 LT |
13 | * Alan Cox : verify_area() now used correctly |
14 | * Alan Cox : new skbuff lists, look ma no backlogs! | |
15 | * Alan Cox : tidied skbuff lists. | |
16 | * Alan Cox : Now uses generic datagram routines I | |
17 | * added. Also fixed the peek/read crash | |
18 | * from all old Linux datagram code. | |
19 | * Alan Cox : Uses the improved datagram code. | |
20 | * Alan Cox : Added NULL's for socket options. | |
21 | * Alan Cox : Re-commented the code. | |
22 | * Alan Cox : Use new kernel side addressing | |
23 | * Rob Janssen : Correct MTU usage. | |
24 | * Dave Platt : Counter leaks caused by incorrect | |
25 | * interrupt locking and some slightly | |
26 | * dubious gcc output. Can you read | |
27 | * compiler: it said _VOLATILE_ | |
28 | * Richard Kooijman : Timestamp fixes. | |
29 | * Alan Cox : New buffers. Use sk->mac.raw. | |
30 | * Alan Cox : sendmsg/recvmsg support. | |
31 | * Alan Cox : Protocol setting support | |
32 | * Alexey Kuznetsov : Untied from IPv4 stack. | |
33 | * Cyrus Durgin : Fixed kerneld for kmod. | |
34 | * Michal Ostrowski : Module initialization cleanup. | |
1ce4f28b | 35 | * Ulises Alonso : Frame number limit removal and |
1da177e4 | 36 | * packet_set_ring memory leak. |
0fb375fb EB |
37 | * Eric Biederman : Allow for > 8 byte hardware addresses. |
38 | * The convention is that longer addresses | |
39 | * will simply extend the hardware address | |
1ce4f28b | 40 | * byte arrays at the end of sockaddr_ll |
0fb375fb | 41 | * and packet_mreq. |
69e3c75f | 42 | * Johann Baudy : Added TX RING. |
f6fb8f10 | 43 | * Chetan Loke : Implemented TPACKET_V3 block abstraction |
44 | * layer. | |
45 | * Copyright (C) 2011, <lokec@ccs.neu.edu> | |
46 | * | |
1da177e4 LT |
47 | * |
48 | * This program is free software; you can redistribute it and/or | |
49 | * modify it under the terms of the GNU General Public License | |
50 | * as published by the Free Software Foundation; either version | |
51 | * 2 of the License, or (at your option) any later version. | |
52 | * | |
53 | */ | |
1ce4f28b | 54 | |
1da177e4 | 55 | #include <linux/types.h> |
1da177e4 | 56 | #include <linux/mm.h> |
4fc268d2 | 57 | #include <linux/capability.h> |
1da177e4 LT |
58 | #include <linux/fcntl.h> |
59 | #include <linux/socket.h> | |
60 | #include <linux/in.h> | |
61 | #include <linux/inet.h> | |
62 | #include <linux/netdevice.h> | |
63 | #include <linux/if_packet.h> | |
64 | #include <linux/wireless.h> | |
ffbc6111 | 65 | #include <linux/kernel.h> |
1da177e4 | 66 | #include <linux/kmod.h> |
5a0e3ad6 | 67 | #include <linux/slab.h> |
0e3125c7 | 68 | #include <linux/vmalloc.h> |
457c4cbc | 69 | #include <net/net_namespace.h> |
1da177e4 LT |
70 | #include <net/ip.h> |
71 | #include <net/protocol.h> | |
72 | #include <linux/skbuff.h> | |
73 | #include <net/sock.h> | |
74 | #include <linux/errno.h> | |
75 | #include <linux/timer.h> | |
1da177e4 LT |
76 | #include <asm/uaccess.h> |
77 | #include <asm/ioctls.h> | |
78 | #include <asm/page.h> | |
a1f8e7f7 | 79 | #include <asm/cacheflush.h> |
1da177e4 LT |
80 | #include <asm/io.h> |
81 | #include <linux/proc_fs.h> | |
82 | #include <linux/seq_file.h> | |
83 | #include <linux/poll.h> | |
84 | #include <linux/module.h> | |
85 | #include <linux/init.h> | |
905db440 | 86 | #include <linux/mutex.h> |
05423b24 | 87 | #include <linux/if_vlan.h> |
bfd5f4a3 | 88 | #include <linux/virtio_net.h> |
ed85b565 | 89 | #include <linux/errqueue.h> |
614f60fa | 90 | #include <linux/net_tstamp.h> |
b0138408 | 91 | #include <linux/percpu.h> |
1da177e4 LT |
92 | #ifdef CONFIG_INET |
93 | #include <net/inet_common.h> | |
94 | #endif | |
47dceb8e | 95 | #include <linux/bpf.h> |
1da177e4 | 96 | |
2787b04b PE |
97 | #include "internal.h" |
98 | ||
1da177e4 LT |
99 | /* |
100 | Assumptions: | |
101 | - if device has no dev->hard_header routine, it adds and removes ll header | |
102 | inside itself. In this case ll header is invisible outside of device, | |
103 | but higher levels still should reserve dev->hard_header_len. | |
104 | Some devices are enough clever to reallocate skb, when header | |
105 | will not fit to reserved space (tunnel), another ones are silly | |
106 | (PPP). | |
107 | - packet socket receives packets with pulled ll header, | |
108 | so that SOCK_RAW should push it back. | |
109 | ||
110 | On receive: | |
111 | ----------- | |
112 | ||
113 | Incoming, dev->hard_header!=NULL | |
b0e380b1 ACM |
114 | mac_header -> ll header |
115 | data -> data | |
1da177e4 LT |
116 | |
117 | Outgoing, dev->hard_header!=NULL | |
b0e380b1 ACM |
118 | mac_header -> ll header |
119 | data -> ll header | |
1da177e4 LT |
120 | |
121 | Incoming, dev->hard_header==NULL | |
b0e380b1 ACM |
122 | mac_header -> UNKNOWN position. It is very likely, that it points to ll |
123 | header. PPP makes it, that is wrong, because introduce | |
db0c58f9 | 124 | assymetry between rx and tx paths. |
b0e380b1 | 125 | data -> data |
1da177e4 LT |
126 | |
127 | Outgoing, dev->hard_header==NULL | |
b0e380b1 ACM |
128 | mac_header -> data. ll header is still not built! |
129 | data -> data | |
1da177e4 LT |
130 | |
131 | Resume | |
132 | If dev->hard_header==NULL we are unlikely to restore sensible ll header. | |
133 | ||
134 | ||
135 | On transmit: | |
136 | ------------ | |
137 | ||
138 | dev->hard_header != NULL | |
b0e380b1 ACM |
139 | mac_header -> ll header |
140 | data -> ll header | |
1da177e4 LT |
141 | |
142 | dev->hard_header == NULL (ll header is added by device, we cannot control it) | |
b0e380b1 ACM |
143 | mac_header -> data |
144 | data -> data | |
1da177e4 LT |
145 | |
146 | We should set nh.raw on output to correct posistion, | |
147 | packet classifier depends on it. | |
148 | */ | |
149 | ||
1da177e4 LT |
150 | /* Private packet socket structures. */ |
151 | ||
0fb375fb EB |
152 | /* identical to struct packet_mreq except it has |
153 | * a longer address field. | |
154 | */ | |
40d4e3df | 155 | struct packet_mreq_max { |
0fb375fb EB |
156 | int mr_ifindex; |
157 | unsigned short mr_type; | |
158 | unsigned short mr_alen; | |
159 | unsigned char mr_address[MAX_ADDR_LEN]; | |
1da177e4 | 160 | }; |
a2efcfa0 | 161 | |
184f489e DB |
162 | union tpacket_uhdr { |
163 | struct tpacket_hdr *h1; | |
164 | struct tpacket2_hdr *h2; | |
165 | struct tpacket3_hdr *h3; | |
166 | void *raw; | |
167 | }; | |
168 | ||
f6fb8f10 | 169 | static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
69e3c75f JB |
170 | int closing, int tx_ring); |
171 | ||
f6fb8f10 | 172 | #define V3_ALIGNMENT (8) |
173 | ||
bc59ba39 | 174 | #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) |
f6fb8f10 | 175 | |
176 | #define BLK_PLUS_PRIV(sz_of_priv) \ | |
177 | (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) | |
178 | ||
f6fb8f10 | 179 | #define PGV_FROM_VMALLOC 1 |
69e3c75f | 180 | |
f6fb8f10 | 181 | #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) |
182 | #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) | |
183 | #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) | |
184 | #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) | |
185 | #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) | |
186 | #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) | |
187 | #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x))) | |
188 | ||
69e3c75f JB |
189 | struct packet_sock; |
190 | static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); | |
77f65ebd WB |
191 | static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, |
192 | struct packet_type *pt, struct net_device *orig_dev); | |
1da177e4 | 193 | |
f6fb8f10 | 194 | static void *packet_previous_frame(struct packet_sock *po, |
195 | struct packet_ring_buffer *rb, | |
196 | int status); | |
197 | static void packet_increment_head(struct packet_ring_buffer *buff); | |
bc59ba39 | 198 | static int prb_curr_blk_in_use(struct tpacket_kbdq_core *, |
199 | struct tpacket_block_desc *); | |
200 | static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, | |
f6fb8f10 | 201 | struct packet_sock *); |
bc59ba39 | 202 | static void prb_retire_current_block(struct tpacket_kbdq_core *, |
f6fb8f10 | 203 | struct packet_sock *, unsigned int status); |
bc59ba39 | 204 | static int prb_queue_frozen(struct tpacket_kbdq_core *); |
205 | static void prb_open_block(struct tpacket_kbdq_core *, | |
206 | struct tpacket_block_desc *); | |
f6fb8f10 | 207 | static void prb_retire_rx_blk_timer_expired(unsigned long); |
bc59ba39 | 208 | static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); |
209 | static void prb_init_blk_timer(struct packet_sock *, | |
210 | struct tpacket_kbdq_core *, | |
211 | void (*func) (unsigned long)); | |
212 | static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); | |
213 | static void prb_clear_rxhash(struct tpacket_kbdq_core *, | |
214 | struct tpacket3_hdr *); | |
215 | static void prb_fill_vlan_info(struct tpacket_kbdq_core *, | |
216 | struct tpacket3_hdr *); | |
1da177e4 LT |
217 | static void packet_flush_mclist(struct sock *sk); |
218 | ||
ffbc6111 | 219 | struct packet_skb_cb { |
ffbc6111 HX |
220 | union { |
221 | struct sockaddr_pkt pkt; | |
2472d761 EB |
222 | union { |
223 | /* Trick: alias skb original length with | |
224 | * ll.sll_family and ll.protocol in order | |
225 | * to save room. | |
226 | */ | |
227 | unsigned int origlen; | |
228 | struct sockaddr_ll ll; | |
229 | }; | |
ffbc6111 HX |
230 | } sa; |
231 | }; | |
232 | ||
233 | #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) | |
8dc41944 | 234 | |
bc59ba39 | 235 | #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) |
f6fb8f10 | 236 | #define GET_PBLOCK_DESC(x, bid) \ |
bc59ba39 | 237 | ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) |
f6fb8f10 | 238 | #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ |
bc59ba39 | 239 | ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) |
f6fb8f10 | 240 | #define GET_NEXT_PRB_BLK_NUM(x) \ |
241 | (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ | |
242 | ((x)->kactive_blk_num+1) : 0) | |
243 | ||
dc99f600 DM |
244 | static void __fanout_unlink(struct sock *sk, struct packet_sock *po); |
245 | static void __fanout_link(struct sock *sk, struct packet_sock *po); | |
246 | ||
d346a3fa DB |
247 | static int packet_direct_xmit(struct sk_buff *skb) |
248 | { | |
249 | struct net_device *dev = skb->dev; | |
d346a3fa DB |
250 | netdev_features_t features; |
251 | struct netdev_queue *txq; | |
43279500 | 252 | int ret = NETDEV_TX_BUSY; |
d346a3fa DB |
253 | |
254 | if (unlikely(!netif_running(dev) || | |
43279500 DB |
255 | !netif_carrier_ok(dev))) |
256 | goto drop; | |
d346a3fa DB |
257 | |
258 | features = netif_skb_features(skb); | |
259 | if (skb_needs_linearize(skb, features) && | |
43279500 DB |
260 | __skb_linearize(skb)) |
261 | goto drop; | |
d346a3fa | 262 | |
10c51b56 | 263 | txq = skb_get_tx_queue(dev, skb); |
d346a3fa | 264 | |
43279500 DB |
265 | local_bh_disable(); |
266 | ||
267 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | |
10b3ad8c | 268 | if (!netif_xmit_frozen_or_drv_stopped(txq)) |
fa2dbdc2 | 269 | ret = netdev_start_xmit(skb, dev, txq, false); |
43279500 | 270 | HARD_TX_UNLOCK(dev, txq); |
d346a3fa | 271 | |
43279500 DB |
272 | local_bh_enable(); |
273 | ||
274 | if (!dev_xmit_complete(ret)) | |
d346a3fa | 275 | kfree_skb(skb); |
43279500 | 276 | |
d346a3fa | 277 | return ret; |
43279500 | 278 | drop: |
0f97ede4 | 279 | atomic_long_inc(&dev->tx_dropped); |
43279500 DB |
280 | kfree_skb(skb); |
281 | return NET_XMIT_DROP; | |
d346a3fa DB |
282 | } |
283 | ||
66e56cd4 DB |
284 | static struct net_device *packet_cached_dev_get(struct packet_sock *po) |
285 | { | |
286 | struct net_device *dev; | |
287 | ||
288 | rcu_read_lock(); | |
289 | dev = rcu_dereference(po->cached_dev); | |
290 | if (likely(dev)) | |
291 | dev_hold(dev); | |
292 | rcu_read_unlock(); | |
293 | ||
294 | return dev; | |
295 | } | |
296 | ||
297 | static void packet_cached_dev_assign(struct packet_sock *po, | |
298 | struct net_device *dev) | |
299 | { | |
300 | rcu_assign_pointer(po->cached_dev, dev); | |
301 | } | |
302 | ||
303 | static void packet_cached_dev_reset(struct packet_sock *po) | |
304 | { | |
305 | RCU_INIT_POINTER(po->cached_dev, NULL); | |
306 | } | |
307 | ||
d346a3fa DB |
308 | static bool packet_use_direct_xmit(const struct packet_sock *po) |
309 | { | |
310 | return po->xmit == packet_direct_xmit; | |
311 | } | |
312 | ||
0fd5d57b | 313 | static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) |
d346a3fa | 314 | { |
1cbac010 | 315 | return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; |
d346a3fa DB |
316 | } |
317 | ||
0fd5d57b DB |
318 | static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) |
319 | { | |
320 | const struct net_device_ops *ops = dev->netdev_ops; | |
321 | u16 queue_index; | |
322 | ||
323 | if (ops->ndo_select_queue) { | |
324 | queue_index = ops->ndo_select_queue(dev, skb, NULL, | |
325 | __packet_pick_tx_queue); | |
326 | queue_index = netdev_cap_txqueue(dev, queue_index); | |
327 | } else { | |
328 | queue_index = __packet_pick_tx_queue(dev, skb); | |
329 | } | |
330 | ||
331 | skb_set_queue_mapping(skb, queue_index); | |
332 | } | |
333 | ||
ce06b03e DM |
334 | /* register_prot_hook must be invoked with the po->bind_lock held, |
335 | * or from a context in which asynchronous accesses to the packet | |
336 | * socket is not possible (packet_create()). | |
337 | */ | |
338 | static void register_prot_hook(struct sock *sk) | |
339 | { | |
340 | struct packet_sock *po = pkt_sk(sk); | |
e40526cb | 341 | |
ce06b03e | 342 | if (!po->running) { |
66e56cd4 | 343 | if (po->fanout) |
dc99f600 | 344 | __fanout_link(sk, po); |
66e56cd4 | 345 | else |
dc99f600 | 346 | dev_add_pack(&po->prot_hook); |
e40526cb | 347 | |
ce06b03e DM |
348 | sock_hold(sk); |
349 | po->running = 1; | |
350 | } | |
351 | } | |
352 | ||
353 | /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock | |
354 | * held. If the sync parameter is true, we will temporarily drop | |
355 | * the po->bind_lock and do a synchronize_net to make sure no | |
356 | * asynchronous packet processing paths still refer to the elements | |
357 | * of po->prot_hook. If the sync parameter is false, it is the | |
358 | * callers responsibility to take care of this. | |
359 | */ | |
360 | static void __unregister_prot_hook(struct sock *sk, bool sync) | |
361 | { | |
362 | struct packet_sock *po = pkt_sk(sk); | |
363 | ||
364 | po->running = 0; | |
66e56cd4 DB |
365 | |
366 | if (po->fanout) | |
dc99f600 | 367 | __fanout_unlink(sk, po); |
66e56cd4 | 368 | else |
dc99f600 | 369 | __dev_remove_pack(&po->prot_hook); |
e40526cb | 370 | |
ce06b03e DM |
371 | __sock_put(sk); |
372 | ||
373 | if (sync) { | |
374 | spin_unlock(&po->bind_lock); | |
375 | synchronize_net(); | |
376 | spin_lock(&po->bind_lock); | |
377 | } | |
378 | } | |
379 | ||
380 | static void unregister_prot_hook(struct sock *sk, bool sync) | |
381 | { | |
382 | struct packet_sock *po = pkt_sk(sk); | |
383 | ||
384 | if (po->running) | |
385 | __unregister_prot_hook(sk, sync); | |
386 | } | |
387 | ||
6e58040b | 388 | static inline struct page * __pure pgv_to_page(void *addr) |
0af55bb5 CG |
389 | { |
390 | if (is_vmalloc_addr(addr)) | |
391 | return vmalloc_to_page(addr); | |
392 | return virt_to_page(addr); | |
393 | } | |
394 | ||
69e3c75f | 395 | static void __packet_set_status(struct packet_sock *po, void *frame, int status) |
1da177e4 | 396 | { |
184f489e | 397 | union tpacket_uhdr h; |
1da177e4 | 398 | |
69e3c75f | 399 | h.raw = frame; |
bbd6ef87 PM |
400 | switch (po->tp_version) { |
401 | case TPACKET_V1: | |
69e3c75f | 402 | h.h1->tp_status = status; |
0af55bb5 | 403 | flush_dcache_page(pgv_to_page(&h.h1->tp_status)); |
bbd6ef87 PM |
404 | break; |
405 | case TPACKET_V2: | |
69e3c75f | 406 | h.h2->tp_status = status; |
0af55bb5 | 407 | flush_dcache_page(pgv_to_page(&h.h2->tp_status)); |
bbd6ef87 | 408 | break; |
f6fb8f10 | 409 | case TPACKET_V3: |
69e3c75f | 410 | default: |
f6fb8f10 | 411 | WARN(1, "TPACKET version not supported.\n"); |
69e3c75f | 412 | BUG(); |
bbd6ef87 | 413 | } |
69e3c75f JB |
414 | |
415 | smp_wmb(); | |
bbd6ef87 PM |
416 | } |
417 | ||
69e3c75f | 418 | static int __packet_get_status(struct packet_sock *po, void *frame) |
bbd6ef87 | 419 | { |
184f489e | 420 | union tpacket_uhdr h; |
bbd6ef87 | 421 | |
69e3c75f JB |
422 | smp_rmb(); |
423 | ||
bbd6ef87 PM |
424 | h.raw = frame; |
425 | switch (po->tp_version) { | |
426 | case TPACKET_V1: | |
0af55bb5 | 427 | flush_dcache_page(pgv_to_page(&h.h1->tp_status)); |
69e3c75f | 428 | return h.h1->tp_status; |
bbd6ef87 | 429 | case TPACKET_V2: |
0af55bb5 | 430 | flush_dcache_page(pgv_to_page(&h.h2->tp_status)); |
69e3c75f | 431 | return h.h2->tp_status; |
f6fb8f10 | 432 | case TPACKET_V3: |
69e3c75f | 433 | default: |
f6fb8f10 | 434 | WARN(1, "TPACKET version not supported.\n"); |
69e3c75f JB |
435 | BUG(); |
436 | return 0; | |
bbd6ef87 | 437 | } |
1da177e4 | 438 | } |
69e3c75f | 439 | |
b9c32fb2 DB |
440 | static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts, |
441 | unsigned int flags) | |
7a51384c DB |
442 | { |
443 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); | |
444 | ||
68a360e8 WB |
445 | if (shhwtstamps && |
446 | (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && | |
447 | ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts)) | |
448 | return TP_STATUS_TS_RAW_HARDWARE; | |
7a51384c DB |
449 | |
450 | if (ktime_to_timespec_cond(skb->tstamp, ts)) | |
b9c32fb2 | 451 | return TP_STATUS_TS_SOFTWARE; |
7a51384c | 452 | |
b9c32fb2 | 453 | return 0; |
7a51384c DB |
454 | } |
455 | ||
b9c32fb2 DB |
456 | static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, |
457 | struct sk_buff *skb) | |
2e31396f WB |
458 | { |
459 | union tpacket_uhdr h; | |
460 | struct timespec ts; | |
b9c32fb2 | 461 | __u32 ts_status; |
2e31396f | 462 | |
b9c32fb2 DB |
463 | if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) |
464 | return 0; | |
2e31396f WB |
465 | |
466 | h.raw = frame; | |
467 | switch (po->tp_version) { | |
468 | case TPACKET_V1: | |
469 | h.h1->tp_sec = ts.tv_sec; | |
470 | h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; | |
471 | break; | |
472 | case TPACKET_V2: | |
473 | h.h2->tp_sec = ts.tv_sec; | |
474 | h.h2->tp_nsec = ts.tv_nsec; | |
475 | break; | |
476 | case TPACKET_V3: | |
477 | default: | |
478 | WARN(1, "TPACKET version not supported.\n"); | |
479 | BUG(); | |
480 | } | |
481 | ||
482 | /* one flush is safe, as both fields always lie on the same cacheline */ | |
483 | flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); | |
484 | smp_wmb(); | |
b9c32fb2 DB |
485 | |
486 | return ts_status; | |
2e31396f WB |
487 | } |
488 | ||
69e3c75f JB |
489 | static void *packet_lookup_frame(struct packet_sock *po, |
490 | struct packet_ring_buffer *rb, | |
491 | unsigned int position, | |
492 | int status) | |
493 | { | |
494 | unsigned int pg_vec_pos, frame_offset; | |
184f489e | 495 | union tpacket_uhdr h; |
69e3c75f JB |
496 | |
497 | pg_vec_pos = position / rb->frames_per_block; | |
498 | frame_offset = position % rb->frames_per_block; | |
499 | ||
0e3125c7 NH |
500 | h.raw = rb->pg_vec[pg_vec_pos].buffer + |
501 | (frame_offset * rb->frame_size); | |
69e3c75f JB |
502 | |
503 | if (status != __packet_get_status(po, h.raw)) | |
504 | return NULL; | |
505 | ||
506 | return h.raw; | |
507 | } | |
508 | ||
eea49cc9 | 509 | static void *packet_current_frame(struct packet_sock *po, |
69e3c75f JB |
510 | struct packet_ring_buffer *rb, |
511 | int status) | |
512 | { | |
513 | return packet_lookup_frame(po, rb, rb->head, status); | |
514 | } | |
515 | ||
bc59ba39 | 516 | static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 517 | { |
518 | del_timer_sync(&pkc->retire_blk_timer); | |
519 | } | |
520 | ||
521 | static void prb_shutdown_retire_blk_timer(struct packet_sock *po, | |
f6fb8f10 | 522 | struct sk_buff_head *rb_queue) |
523 | { | |
bc59ba39 | 524 | struct tpacket_kbdq_core *pkc; |
f6fb8f10 | 525 | |
73d0fcf2 | 526 | pkc = GET_PBDQC_FROM_RB(&po->rx_ring); |
f6fb8f10 | 527 | |
ec6f809f | 528 | spin_lock_bh(&rb_queue->lock); |
f6fb8f10 | 529 | pkc->delete_blk_timer = 1; |
ec6f809f | 530 | spin_unlock_bh(&rb_queue->lock); |
f6fb8f10 | 531 | |
532 | prb_del_retire_blk_timer(pkc); | |
533 | } | |
534 | ||
535 | static void prb_init_blk_timer(struct packet_sock *po, | |
bc59ba39 | 536 | struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 537 | void (*func) (unsigned long)) |
538 | { | |
539 | init_timer(&pkc->retire_blk_timer); | |
540 | pkc->retire_blk_timer.data = (long)po; | |
541 | pkc->retire_blk_timer.function = func; | |
542 | pkc->retire_blk_timer.expires = jiffies; | |
543 | } | |
544 | ||
e8e85cc5 | 545 | static void prb_setup_retire_blk_timer(struct packet_sock *po) |
f6fb8f10 | 546 | { |
bc59ba39 | 547 | struct tpacket_kbdq_core *pkc; |
f6fb8f10 | 548 | |
e8e85cc5 | 549 | pkc = GET_PBDQC_FROM_RB(&po->rx_ring); |
f6fb8f10 | 550 | prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired); |
551 | } | |
552 | ||
553 | static int prb_calc_retire_blk_tmo(struct packet_sock *po, | |
554 | int blk_size_in_bytes) | |
555 | { | |
556 | struct net_device *dev; | |
557 | unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; | |
4bc71cb9 JP |
558 | struct ethtool_cmd ecmd; |
559 | int err; | |
e440cf2c | 560 | u32 speed; |
f6fb8f10 | 561 | |
4bc71cb9 JP |
562 | rtnl_lock(); |
563 | dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); | |
564 | if (unlikely(!dev)) { | |
565 | rtnl_unlock(); | |
f6fb8f10 | 566 | return DEFAULT_PRB_RETIRE_TOV; |
4bc71cb9 JP |
567 | } |
568 | err = __ethtool_get_settings(dev, &ecmd); | |
e440cf2c | 569 | speed = ethtool_cmd_speed(&ecmd); |
4bc71cb9 JP |
570 | rtnl_unlock(); |
571 | if (!err) { | |
4bc71cb9 JP |
572 | /* |
573 | * If the link speed is so slow you don't really | |
574 | * need to worry about perf anyways | |
575 | */ | |
e440cf2c | 576 | if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) { |
4bc71cb9 | 577 | return DEFAULT_PRB_RETIRE_TOV; |
e440cf2c | 578 | } else { |
579 | msec = 1; | |
580 | div = speed / 1000; | |
f6fb8f10 | 581 | } |
582 | } | |
583 | ||
584 | mbits = (blk_size_in_bytes * 8) / (1024 * 1024); | |
585 | ||
586 | if (div) | |
587 | mbits /= div; | |
588 | ||
589 | tmo = mbits * msec; | |
590 | ||
591 | if (div) | |
592 | return tmo+1; | |
593 | return tmo; | |
594 | } | |
595 | ||
bc59ba39 | 596 | static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, |
f6fb8f10 | 597 | union tpacket_req_u *req_u) |
598 | { | |
599 | p1->feature_req_word = req_u->req3.tp_feature_req_word; | |
600 | } | |
601 | ||
602 | static void init_prb_bdqc(struct packet_sock *po, | |
603 | struct packet_ring_buffer *rb, | |
604 | struct pgv *pg_vec, | |
e8e85cc5 | 605 | union tpacket_req_u *req_u) |
f6fb8f10 | 606 | { |
22781a5b | 607 | struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); |
bc59ba39 | 608 | struct tpacket_block_desc *pbd; |
f6fb8f10 | 609 | |
610 | memset(p1, 0x0, sizeof(*p1)); | |
611 | ||
612 | p1->knxt_seq_num = 1; | |
613 | p1->pkbdq = pg_vec; | |
bc59ba39 | 614 | pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; |
e3192690 | 615 | p1->pkblk_start = pg_vec[0].buffer; |
f6fb8f10 | 616 | p1->kblk_size = req_u->req3.tp_block_size; |
617 | p1->knum_blocks = req_u->req3.tp_block_nr; | |
618 | p1->hdrlen = po->tp_hdrlen; | |
619 | p1->version = po->tp_version; | |
620 | p1->last_kactive_blk_num = 0; | |
ee80fbf3 | 621 | po->stats.stats3.tp_freeze_q_cnt = 0; |
f6fb8f10 | 622 | if (req_u->req3.tp_retire_blk_tov) |
623 | p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; | |
624 | else | |
625 | p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, | |
626 | req_u->req3.tp_block_size); | |
627 | p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); | |
628 | p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; | |
629 | ||
dc808110 | 630 | p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); |
f6fb8f10 | 631 | prb_init_ft_ops(p1, req_u); |
e8e85cc5 | 632 | prb_setup_retire_blk_timer(po); |
f6fb8f10 | 633 | prb_open_block(p1, pbd); |
634 | } | |
635 | ||
636 | /* Do NOT update the last_blk_num first. | |
637 | * Assumes sk_buff_head lock is held. | |
638 | */ | |
bc59ba39 | 639 | static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 640 | { |
641 | mod_timer(&pkc->retire_blk_timer, | |
642 | jiffies + pkc->tov_in_jiffies); | |
643 | pkc->last_kactive_blk_num = pkc->kactive_blk_num; | |
644 | } | |
645 | ||
646 | /* | |
647 | * Timer logic: | |
648 | * 1) We refresh the timer only when we open a block. | |
649 | * By doing this we don't waste cycles refreshing the timer | |
650 | * on packet-by-packet basis. | |
651 | * | |
652 | * With a 1MB block-size, on a 1Gbps line, it will take | |
653 | * i) ~8 ms to fill a block + ii) memcpy etc. | |
654 | * In this cut we are not accounting for the memcpy time. | |
655 | * | |
656 | * So, if the user sets the 'tmo' to 10ms then the timer | |
657 | * will never fire while the block is still getting filled | |
658 | * (which is what we want). However, the user could choose | |
659 | * to close a block early and that's fine. | |
660 | * | |
661 | * But when the timer does fire, we check whether or not to refresh it. | |
662 | * Since the tmo granularity is in msecs, it is not too expensive | |
663 | * to refresh the timer, lets say every '8' msecs. | |
664 | * Either the user can set the 'tmo' or we can derive it based on | |
665 | * a) line-speed and b) block-size. | |
666 | * prb_calc_retire_blk_tmo() calculates the tmo. | |
667 | * | |
668 | */ | |
669 | static void prb_retire_rx_blk_timer_expired(unsigned long data) | |
670 | { | |
671 | struct packet_sock *po = (struct packet_sock *)data; | |
22781a5b | 672 | struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); |
f6fb8f10 | 673 | unsigned int frozen; |
bc59ba39 | 674 | struct tpacket_block_desc *pbd; |
f6fb8f10 | 675 | |
676 | spin_lock(&po->sk.sk_receive_queue.lock); | |
677 | ||
678 | frozen = prb_queue_frozen(pkc); | |
679 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | |
680 | ||
681 | if (unlikely(pkc->delete_blk_timer)) | |
682 | goto out; | |
683 | ||
684 | /* We only need to plug the race when the block is partially filled. | |
685 | * tpacket_rcv: | |
686 | * lock(); increment BLOCK_NUM_PKTS; unlock() | |
687 | * copy_bits() is in progress ... | |
688 | * timer fires on other cpu: | |
689 | * we can't retire the current block because copy_bits | |
690 | * is in progress. | |
691 | * | |
692 | */ | |
693 | if (BLOCK_NUM_PKTS(pbd)) { | |
694 | while (atomic_read(&pkc->blk_fill_in_prog)) { | |
695 | /* Waiting for skb_copy_bits to finish... */ | |
696 | cpu_relax(); | |
697 | } | |
698 | } | |
699 | ||
700 | if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { | |
701 | if (!frozen) { | |
41a50d62 AD |
702 | if (!BLOCK_NUM_PKTS(pbd)) { |
703 | /* An empty block. Just refresh the timer. */ | |
704 | goto refresh_timer; | |
705 | } | |
f6fb8f10 | 706 | prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); |
707 | if (!prb_dispatch_next_block(pkc, po)) | |
708 | goto refresh_timer; | |
709 | else | |
710 | goto out; | |
711 | } else { | |
712 | /* Case 1. Queue was frozen because user-space was | |
713 | * lagging behind. | |
714 | */ | |
715 | if (prb_curr_blk_in_use(pkc, pbd)) { | |
716 | /* | |
717 | * Ok, user-space is still behind. | |
718 | * So just refresh the timer. | |
719 | */ | |
720 | goto refresh_timer; | |
721 | } else { | |
722 | /* Case 2. queue was frozen,user-space caught up, | |
723 | * now the link went idle && the timer fired. | |
724 | * We don't have a block to close.So we open this | |
725 | * block and restart the timer. | |
726 | * opening a block thaws the queue,restarts timer | |
727 | * Thawing/timer-refresh is a side effect. | |
728 | */ | |
729 | prb_open_block(pkc, pbd); | |
730 | goto out; | |
731 | } | |
732 | } | |
733 | } | |
734 | ||
735 | refresh_timer: | |
736 | _prb_refresh_rx_retire_blk_timer(pkc); | |
737 | ||
738 | out: | |
739 | spin_unlock(&po->sk.sk_receive_queue.lock); | |
740 | } | |
741 | ||
eea49cc9 | 742 | static void prb_flush_block(struct tpacket_kbdq_core *pkc1, |
bc59ba39 | 743 | struct tpacket_block_desc *pbd1, __u32 status) |
f6fb8f10 | 744 | { |
745 | /* Flush everything minus the block header */ | |
746 | ||
747 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 | |
748 | u8 *start, *end; | |
749 | ||
750 | start = (u8 *)pbd1; | |
751 | ||
752 | /* Skip the block header(we know header WILL fit in 4K) */ | |
753 | start += PAGE_SIZE; | |
754 | ||
755 | end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); | |
756 | for (; start < end; start += PAGE_SIZE) | |
757 | flush_dcache_page(pgv_to_page(start)); | |
758 | ||
759 | smp_wmb(); | |
760 | #endif | |
761 | ||
762 | /* Now update the block status. */ | |
763 | ||
764 | BLOCK_STATUS(pbd1) = status; | |
765 | ||
766 | /* Flush the block header */ | |
767 | ||
768 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 | |
769 | start = (u8 *)pbd1; | |
770 | flush_dcache_page(pgv_to_page(start)); | |
771 | ||
772 | smp_wmb(); | |
773 | #endif | |
774 | } | |
775 | ||
776 | /* | |
777 | * Side effect: | |
778 | * | |
779 | * 1) flush the block | |
780 | * 2) Increment active_blk_num | |
781 | * | |
782 | * Note:We DONT refresh the timer on purpose. | |
783 | * Because almost always the next block will be opened. | |
784 | */ | |
bc59ba39 | 785 | static void prb_close_block(struct tpacket_kbdq_core *pkc1, |
786 | struct tpacket_block_desc *pbd1, | |
f6fb8f10 | 787 | struct packet_sock *po, unsigned int stat) |
788 | { | |
789 | __u32 status = TP_STATUS_USER | stat; | |
790 | ||
791 | struct tpacket3_hdr *last_pkt; | |
bc59ba39 | 792 | struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; |
da413eec | 793 | struct sock *sk = &po->sk; |
f6fb8f10 | 794 | |
ee80fbf3 | 795 | if (po->stats.stats3.tp_drops) |
f6fb8f10 | 796 | status |= TP_STATUS_LOSING; |
797 | ||
798 | last_pkt = (struct tpacket3_hdr *)pkc1->prev; | |
799 | last_pkt->tp_next_offset = 0; | |
800 | ||
801 | /* Get the ts of the last pkt */ | |
802 | if (BLOCK_NUM_PKTS(pbd1)) { | |
803 | h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; | |
804 | h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; | |
805 | } else { | |
41a50d62 AD |
806 | /* Ok, we tmo'd - so get the current time. |
807 | * | |
808 | * It shouldn't really happen as we don't close empty | |
809 | * blocks. See prb_retire_rx_blk_timer_expired(). | |
810 | */ | |
f6fb8f10 | 811 | struct timespec ts; |
812 | getnstimeofday(&ts); | |
813 | h1->ts_last_pkt.ts_sec = ts.tv_sec; | |
814 | h1->ts_last_pkt.ts_nsec = ts.tv_nsec; | |
815 | } | |
816 | ||
817 | smp_wmb(); | |
818 | ||
819 | /* Flush the block */ | |
820 | prb_flush_block(pkc1, pbd1, status); | |
821 | ||
da413eec DC |
822 | sk->sk_data_ready(sk); |
823 | ||
f6fb8f10 | 824 | pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); |
825 | } | |
826 | ||
eea49cc9 | 827 | static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 828 | { |
829 | pkc->reset_pending_on_curr_blk = 0; | |
830 | } | |
831 | ||
832 | /* | |
833 | * Side effect of opening a block: | |
834 | * | |
835 | * 1) prb_queue is thawed. | |
836 | * 2) retire_blk_timer is refreshed. | |
837 | * | |
838 | */ | |
bc59ba39 | 839 | static void prb_open_block(struct tpacket_kbdq_core *pkc1, |
840 | struct tpacket_block_desc *pbd1) | |
f6fb8f10 | 841 | { |
842 | struct timespec ts; | |
bc59ba39 | 843 | struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; |
f6fb8f10 | 844 | |
845 | smp_rmb(); | |
846 | ||
8da3056c DB |
847 | /* We could have just memset this but we will lose the |
848 | * flexibility of making the priv area sticky | |
849 | */ | |
f6fb8f10 | 850 | |
8da3056c DB |
851 | BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; |
852 | BLOCK_NUM_PKTS(pbd1) = 0; | |
853 | BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); | |
f6fb8f10 | 854 | |
8da3056c DB |
855 | getnstimeofday(&ts); |
856 | ||
857 | h1->ts_first_pkt.ts_sec = ts.tv_sec; | |
858 | h1->ts_first_pkt.ts_nsec = ts.tv_nsec; | |
f6fb8f10 | 859 | |
8da3056c DB |
860 | pkc1->pkblk_start = (char *)pbd1; |
861 | pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); | |
862 | ||
863 | BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); | |
864 | BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; | |
865 | ||
866 | pbd1->version = pkc1->version; | |
867 | pkc1->prev = pkc1->nxt_offset; | |
868 | pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; | |
869 | ||
870 | prb_thaw_queue(pkc1); | |
871 | _prb_refresh_rx_retire_blk_timer(pkc1); | |
872 | ||
873 | smp_wmb(); | |
f6fb8f10 | 874 | } |
875 | ||
876 | /* | |
877 | * Queue freeze logic: | |
878 | * 1) Assume tp_block_nr = 8 blocks. | |
879 | * 2) At time 't0', user opens Rx ring. | |
880 | * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 | |
881 | * 4) user-space is either sleeping or processing block '0'. | |
882 | * 5) tpacket_rcv is currently filling block '7', since there is no space left, | |
883 | * it will close block-7,loop around and try to fill block '0'. | |
884 | * call-flow: | |
885 | * __packet_lookup_frame_in_block | |
886 | * prb_retire_current_block() | |
887 | * prb_dispatch_next_block() | |
888 | * |->(BLOCK_STATUS == USER) evaluates to true | |
889 | * 5.1) Since block-0 is currently in-use, we just freeze the queue. | |
890 | * 6) Now there are two cases: | |
891 | * 6.1) Link goes idle right after the queue is frozen. | |
892 | * But remember, the last open_block() refreshed the timer. | |
893 | * When this timer expires,it will refresh itself so that we can | |
894 | * re-open block-0 in near future. | |
895 | * 6.2) Link is busy and keeps on receiving packets. This is a simple | |
896 | * case and __packet_lookup_frame_in_block will check if block-0 | |
897 | * is free and can now be re-used. | |
898 | */ | |
eea49cc9 | 899 | static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 900 | struct packet_sock *po) |
901 | { | |
902 | pkc->reset_pending_on_curr_blk = 1; | |
ee80fbf3 | 903 | po->stats.stats3.tp_freeze_q_cnt++; |
f6fb8f10 | 904 | } |
905 | ||
906 | #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) | |
907 | ||
908 | /* | |
909 | * If the next block is free then we will dispatch it | |
910 | * and return a good offset. | |
911 | * Else, we will freeze the queue. | |
912 | * So, caller must check the return value. | |
913 | */ | |
bc59ba39 | 914 | static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 915 | struct packet_sock *po) |
916 | { | |
bc59ba39 | 917 | struct tpacket_block_desc *pbd; |
f6fb8f10 | 918 | |
919 | smp_rmb(); | |
920 | ||
921 | /* 1. Get current block num */ | |
922 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | |
923 | ||
924 | /* 2. If this block is currently in_use then freeze the queue */ | |
925 | if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { | |
926 | prb_freeze_queue(pkc, po); | |
927 | return NULL; | |
928 | } | |
929 | ||
930 | /* | |
931 | * 3. | |
932 | * open this block and return the offset where the first packet | |
933 | * needs to get stored. | |
934 | */ | |
935 | prb_open_block(pkc, pbd); | |
936 | return (void *)pkc->nxt_offset; | |
937 | } | |
938 | ||
bc59ba39 | 939 | static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 940 | struct packet_sock *po, unsigned int status) |
941 | { | |
bc59ba39 | 942 | struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); |
f6fb8f10 | 943 | |
944 | /* retire/close the current block */ | |
945 | if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { | |
946 | /* | |
947 | * Plug the case where copy_bits() is in progress on | |
948 | * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't | |
949 | * have space to copy the pkt in the current block and | |
950 | * called prb_retire_current_block() | |
951 | * | |
952 | * We don't need to worry about the TMO case because | |
953 | * the timer-handler already handled this case. | |
954 | */ | |
955 | if (!(status & TP_STATUS_BLK_TMO)) { | |
956 | while (atomic_read(&pkc->blk_fill_in_prog)) { | |
957 | /* Waiting for skb_copy_bits to finish... */ | |
958 | cpu_relax(); | |
959 | } | |
960 | } | |
961 | prb_close_block(pkc, pbd, po, status); | |
962 | return; | |
963 | } | |
f6fb8f10 | 964 | } |
965 | ||
eea49cc9 | 966 | static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc, |
bc59ba39 | 967 | struct tpacket_block_desc *pbd) |
f6fb8f10 | 968 | { |
969 | return TP_STATUS_USER & BLOCK_STATUS(pbd); | |
970 | } | |
971 | ||
eea49cc9 | 972 | static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 973 | { |
974 | return pkc->reset_pending_on_curr_blk; | |
975 | } | |
976 | ||
eea49cc9 | 977 | static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) |
f6fb8f10 | 978 | { |
bc59ba39 | 979 | struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); |
f6fb8f10 | 980 | atomic_dec(&pkc->blk_fill_in_prog); |
981 | } | |
982 | ||
eea49cc9 | 983 | static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 984 | struct tpacket3_hdr *ppd) |
985 | { | |
3958afa1 | 986 | ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); |
f6fb8f10 | 987 | } |
988 | ||
eea49cc9 | 989 | static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 990 | struct tpacket3_hdr *ppd) |
991 | { | |
992 | ppd->hv1.tp_rxhash = 0; | |
993 | } | |
994 | ||
eea49cc9 | 995 | static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 996 | struct tpacket3_hdr *ppd) |
997 | { | |
df8a39de JP |
998 | if (skb_vlan_tag_present(pkc->skb)) { |
999 | ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); | |
a0cdfcf3 AW |
1000 | ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); |
1001 | ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; | |
f6fb8f10 | 1002 | } else { |
9e67030a | 1003 | ppd->hv1.tp_vlan_tci = 0; |
a0cdfcf3 | 1004 | ppd->hv1.tp_vlan_tpid = 0; |
9e67030a | 1005 | ppd->tp_status = TP_STATUS_AVAILABLE; |
f6fb8f10 | 1006 | } |
1007 | } | |
1008 | ||
bc59ba39 | 1009 | static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 1010 | struct tpacket3_hdr *ppd) |
1011 | { | |
a0cdfcf3 | 1012 | ppd->hv1.tp_padding = 0; |
f6fb8f10 | 1013 | prb_fill_vlan_info(pkc, ppd); |
1014 | ||
1015 | if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) | |
1016 | prb_fill_rxhash(pkc, ppd); | |
1017 | else | |
1018 | prb_clear_rxhash(pkc, ppd); | |
1019 | } | |
1020 | ||
eea49cc9 | 1021 | static void prb_fill_curr_block(char *curr, |
bc59ba39 | 1022 | struct tpacket_kbdq_core *pkc, |
1023 | struct tpacket_block_desc *pbd, | |
f6fb8f10 | 1024 | unsigned int len) |
1025 | { | |
1026 | struct tpacket3_hdr *ppd; | |
1027 | ||
1028 | ppd = (struct tpacket3_hdr *)curr; | |
1029 | ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); | |
1030 | pkc->prev = curr; | |
1031 | pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); | |
1032 | BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); | |
1033 | BLOCK_NUM_PKTS(pbd) += 1; | |
1034 | atomic_inc(&pkc->blk_fill_in_prog); | |
1035 | prb_run_all_ft_ops(pkc, ppd); | |
1036 | } | |
1037 | ||
1038 | /* Assumes caller has the sk->rx_queue.lock */ | |
1039 | static void *__packet_lookup_frame_in_block(struct packet_sock *po, | |
1040 | struct sk_buff *skb, | |
1041 | int status, | |
1042 | unsigned int len | |
1043 | ) | |
1044 | { | |
bc59ba39 | 1045 | struct tpacket_kbdq_core *pkc; |
1046 | struct tpacket_block_desc *pbd; | |
f6fb8f10 | 1047 | char *curr, *end; |
1048 | ||
e3192690 | 1049 | pkc = GET_PBDQC_FROM_RB(&po->rx_ring); |
f6fb8f10 | 1050 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); |
1051 | ||
1052 | /* Queue is frozen when user space is lagging behind */ | |
1053 | if (prb_queue_frozen(pkc)) { | |
1054 | /* | |
1055 | * Check if that last block which caused the queue to freeze, | |
1056 | * is still in_use by user-space. | |
1057 | */ | |
1058 | if (prb_curr_blk_in_use(pkc, pbd)) { | |
1059 | /* Can't record this packet */ | |
1060 | return NULL; | |
1061 | } else { | |
1062 | /* | |
1063 | * Ok, the block was released by user-space. | |
1064 | * Now let's open that block. | |
1065 | * opening a block also thaws the queue. | |
1066 | * Thawing is a side effect. | |
1067 | */ | |
1068 | prb_open_block(pkc, pbd); | |
1069 | } | |
1070 | } | |
1071 | ||
1072 | smp_mb(); | |
1073 | curr = pkc->nxt_offset; | |
1074 | pkc->skb = skb; | |
e3192690 | 1075 | end = (char *)pbd + pkc->kblk_size; |
f6fb8f10 | 1076 | |
1077 | /* first try the current block */ | |
1078 | if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { | |
1079 | prb_fill_curr_block(curr, pkc, pbd, len); | |
1080 | return (void *)curr; | |
1081 | } | |
1082 | ||
1083 | /* Ok, close the current block */ | |
1084 | prb_retire_current_block(pkc, po, 0); | |
1085 | ||
1086 | /* Now, try to dispatch the next block */ | |
1087 | curr = (char *)prb_dispatch_next_block(pkc, po); | |
1088 | if (curr) { | |
1089 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | |
1090 | prb_fill_curr_block(curr, pkc, pbd, len); | |
1091 | return (void *)curr; | |
1092 | } | |
1093 | ||
1094 | /* | |
1095 | * No free blocks are available.user_space hasn't caught up yet. | |
1096 | * Queue was just frozen and now this packet will get dropped. | |
1097 | */ | |
1098 | return NULL; | |
1099 | } | |
1100 | ||
eea49cc9 | 1101 | static void *packet_current_rx_frame(struct packet_sock *po, |
f6fb8f10 | 1102 | struct sk_buff *skb, |
1103 | int status, unsigned int len) | |
1104 | { | |
1105 | char *curr = NULL; | |
1106 | switch (po->tp_version) { | |
1107 | case TPACKET_V1: | |
1108 | case TPACKET_V2: | |
1109 | curr = packet_lookup_frame(po, &po->rx_ring, | |
1110 | po->rx_ring.head, status); | |
1111 | return curr; | |
1112 | case TPACKET_V3: | |
1113 | return __packet_lookup_frame_in_block(po, skb, status, len); | |
1114 | default: | |
1115 | WARN(1, "TPACKET version not supported\n"); | |
1116 | BUG(); | |
99aa3473 | 1117 | return NULL; |
f6fb8f10 | 1118 | } |
1119 | } | |
1120 | ||
eea49cc9 | 1121 | static void *prb_lookup_block(struct packet_sock *po, |
f6fb8f10 | 1122 | struct packet_ring_buffer *rb, |
77f65ebd | 1123 | unsigned int idx, |
f6fb8f10 | 1124 | int status) |
1125 | { | |
bc59ba39 | 1126 | struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); |
77f65ebd | 1127 | struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); |
f6fb8f10 | 1128 | |
1129 | if (status != BLOCK_STATUS(pbd)) | |
1130 | return NULL; | |
1131 | return pbd; | |
1132 | } | |
1133 | ||
eea49cc9 | 1134 | static int prb_previous_blk_num(struct packet_ring_buffer *rb) |
f6fb8f10 | 1135 | { |
1136 | unsigned int prev; | |
1137 | if (rb->prb_bdqc.kactive_blk_num) | |
1138 | prev = rb->prb_bdqc.kactive_blk_num-1; | |
1139 | else | |
1140 | prev = rb->prb_bdqc.knum_blocks-1; | |
1141 | return prev; | |
1142 | } | |
1143 | ||
1144 | /* Assumes caller has held the rx_queue.lock */ | |
eea49cc9 | 1145 | static void *__prb_previous_block(struct packet_sock *po, |
f6fb8f10 | 1146 | struct packet_ring_buffer *rb, |
1147 | int status) | |
1148 | { | |
1149 | unsigned int previous = prb_previous_blk_num(rb); | |
1150 | return prb_lookup_block(po, rb, previous, status); | |
1151 | } | |
1152 | ||
eea49cc9 | 1153 | static void *packet_previous_rx_frame(struct packet_sock *po, |
f6fb8f10 | 1154 | struct packet_ring_buffer *rb, |
1155 | int status) | |
1156 | { | |
1157 | if (po->tp_version <= TPACKET_V2) | |
1158 | return packet_previous_frame(po, rb, status); | |
1159 | ||
1160 | return __prb_previous_block(po, rb, status); | |
1161 | } | |
1162 | ||
eea49cc9 | 1163 | static void packet_increment_rx_head(struct packet_sock *po, |
f6fb8f10 | 1164 | struct packet_ring_buffer *rb) |
1165 | { | |
1166 | switch (po->tp_version) { | |
1167 | case TPACKET_V1: | |
1168 | case TPACKET_V2: | |
1169 | return packet_increment_head(rb); | |
1170 | case TPACKET_V3: | |
1171 | default: | |
1172 | WARN(1, "TPACKET version not supported.\n"); | |
1173 | BUG(); | |
1174 | return; | |
1175 | } | |
1176 | } | |
1177 | ||
eea49cc9 | 1178 | static void *packet_previous_frame(struct packet_sock *po, |
69e3c75f JB |
1179 | struct packet_ring_buffer *rb, |
1180 | int status) | |
1181 | { | |
1182 | unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; | |
1183 | return packet_lookup_frame(po, rb, previous, status); | |
1184 | } | |
1185 | ||
eea49cc9 | 1186 | static void packet_increment_head(struct packet_ring_buffer *buff) |
69e3c75f JB |
1187 | { |
1188 | buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; | |
1189 | } | |
1190 | ||
b0138408 DB |
1191 | static void packet_inc_pending(struct packet_ring_buffer *rb) |
1192 | { | |
1193 | this_cpu_inc(*rb->pending_refcnt); | |
1194 | } | |
1195 | ||
1196 | static void packet_dec_pending(struct packet_ring_buffer *rb) | |
1197 | { | |
1198 | this_cpu_dec(*rb->pending_refcnt); | |
1199 | } | |
1200 | ||
1201 | static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) | |
1202 | { | |
1203 | unsigned int refcnt = 0; | |
1204 | int cpu; | |
1205 | ||
1206 | /* We don't use pending refcount in rx_ring. */ | |
1207 | if (rb->pending_refcnt == NULL) | |
1208 | return 0; | |
1209 | ||
1210 | for_each_possible_cpu(cpu) | |
1211 | refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); | |
1212 | ||
1213 | return refcnt; | |
1214 | } | |
1215 | ||
1216 | static int packet_alloc_pending(struct packet_sock *po) | |
1217 | { | |
1218 | po->rx_ring.pending_refcnt = NULL; | |
1219 | ||
1220 | po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); | |
1221 | if (unlikely(po->tx_ring.pending_refcnt == NULL)) | |
1222 | return -ENOBUFS; | |
1223 | ||
1224 | return 0; | |
1225 | } | |
1226 | ||
1227 | static void packet_free_pending(struct packet_sock *po) | |
1228 | { | |
1229 | free_percpu(po->tx_ring.pending_refcnt); | |
1230 | } | |
1231 | ||
9954729b WB |
1232 | #define ROOM_POW_OFF 2 |
1233 | #define ROOM_NONE 0x0 | |
1234 | #define ROOM_LOW 0x1 | |
1235 | #define ROOM_NORMAL 0x2 | |
1236 | ||
1237 | static bool __tpacket_has_room(struct packet_sock *po, int pow_off) | |
77f65ebd | 1238 | { |
9954729b WB |
1239 | int idx, len; |
1240 | ||
1241 | len = po->rx_ring.frame_max + 1; | |
1242 | idx = po->rx_ring.head; | |
1243 | if (pow_off) | |
1244 | idx += len >> pow_off; | |
1245 | if (idx >= len) | |
1246 | idx -= len; | |
1247 | return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); | |
1248 | } | |
1249 | ||
1250 | static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off) | |
1251 | { | |
1252 | int idx, len; | |
1253 | ||
1254 | len = po->rx_ring.prb_bdqc.knum_blocks; | |
1255 | idx = po->rx_ring.prb_bdqc.kactive_blk_num; | |
1256 | if (pow_off) | |
1257 | idx += len >> pow_off; | |
1258 | if (idx >= len) | |
1259 | idx -= len; | |
1260 | return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); | |
1261 | } | |
77f65ebd | 1262 | |
2ccdbaa6 | 1263 | static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) |
9954729b WB |
1264 | { |
1265 | struct sock *sk = &po->sk; | |
1266 | int ret = ROOM_NONE; | |
1267 | ||
1268 | if (po->prot_hook.func != tpacket_rcv) { | |
1269 | int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc) | |
2ccdbaa6 | 1270 | - (skb ? skb->truesize : 0); |
9954729b WB |
1271 | if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF)) |
1272 | return ROOM_NORMAL; | |
1273 | else if (avail > 0) | |
1274 | return ROOM_LOW; | |
1275 | else | |
1276 | return ROOM_NONE; | |
1277 | } | |
77f65ebd | 1278 | |
9954729b WB |
1279 | if (po->tp_version == TPACKET_V3) { |
1280 | if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) | |
1281 | ret = ROOM_NORMAL; | |
1282 | else if (__tpacket_v3_has_room(po, 0)) | |
1283 | ret = ROOM_LOW; | |
1284 | } else { | |
1285 | if (__tpacket_has_room(po, ROOM_POW_OFF)) | |
1286 | ret = ROOM_NORMAL; | |
1287 | else if (__tpacket_has_room(po, 0)) | |
1288 | ret = ROOM_LOW; | |
1289 | } | |
2ccdbaa6 WB |
1290 | |
1291 | return ret; | |
1292 | } | |
1293 | ||
1294 | static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) | |
1295 | { | |
1296 | int ret; | |
1297 | bool has_room; | |
1298 | ||
54d7c01d WB |
1299 | spin_lock_bh(&po->sk.sk_receive_queue.lock); |
1300 | ret = __packet_rcv_has_room(po, skb); | |
2ccdbaa6 WB |
1301 | has_room = ret == ROOM_NORMAL; |
1302 | if (po->pressure == has_room) | |
54d7c01d WB |
1303 | po->pressure = !has_room; |
1304 | spin_unlock_bh(&po->sk.sk_receive_queue.lock); | |
77f65ebd | 1305 | |
9954729b | 1306 | return ret; |
77f65ebd WB |
1307 | } |
1308 | ||
1da177e4 LT |
1309 | static void packet_sock_destruct(struct sock *sk) |
1310 | { | |
ed85b565 RC |
1311 | skb_queue_purge(&sk->sk_error_queue); |
1312 | ||
547b792c IJ |
1313 | WARN_ON(atomic_read(&sk->sk_rmem_alloc)); |
1314 | WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | |
1da177e4 LT |
1315 | |
1316 | if (!sock_flag(sk, SOCK_DEAD)) { | |
40d4e3df | 1317 | pr_err("Attempt to release alive packet socket: %p\n", sk); |
1da177e4 LT |
1318 | return; |
1319 | } | |
1320 | ||
17ab56a2 | 1321 | sk_refcnt_debug_dec(sk); |
1da177e4 LT |
1322 | } |
1323 | ||
3b3a5b0a WB |
1324 | static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) |
1325 | { | |
1326 | u32 rxhash; | |
1327 | int i, count = 0; | |
1328 | ||
1329 | rxhash = skb_get_hash(skb); | |
1330 | for (i = 0; i < ROLLOVER_HLEN; i++) | |
1331 | if (po->rollover->history[i] == rxhash) | |
1332 | count++; | |
1333 | ||
1334 | po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash; | |
1335 | return count > (ROLLOVER_HLEN >> 1); | |
1336 | } | |
1337 | ||
77f65ebd WB |
1338 | static unsigned int fanout_demux_hash(struct packet_fanout *f, |
1339 | struct sk_buff *skb, | |
1340 | unsigned int num) | |
dc99f600 | 1341 | { |
61b905da | 1342 | return reciprocal_scale(skb_get_hash(skb), num); |
dc99f600 DM |
1343 | } |
1344 | ||
77f65ebd WB |
1345 | static unsigned int fanout_demux_lb(struct packet_fanout *f, |
1346 | struct sk_buff *skb, | |
1347 | unsigned int num) | |
dc99f600 | 1348 | { |
468479e6 | 1349 | unsigned int val = atomic_inc_return(&f->rr_cur); |
dc99f600 | 1350 | |
468479e6 | 1351 | return val % num; |
77f65ebd WB |
1352 | } |
1353 | ||
1354 | static unsigned int fanout_demux_cpu(struct packet_fanout *f, | |
1355 | struct sk_buff *skb, | |
1356 | unsigned int num) | |
1357 | { | |
1358 | return smp_processor_id() % num; | |
dc99f600 DM |
1359 | } |
1360 | ||
5df0ddfb DB |
1361 | static unsigned int fanout_demux_rnd(struct packet_fanout *f, |
1362 | struct sk_buff *skb, | |
1363 | unsigned int num) | |
1364 | { | |
f337db64 | 1365 | return prandom_u32_max(num); |
5df0ddfb DB |
1366 | } |
1367 | ||
77f65ebd WB |
1368 | static unsigned int fanout_demux_rollover(struct packet_fanout *f, |
1369 | struct sk_buff *skb, | |
ad377cab | 1370 | unsigned int idx, bool try_self, |
77f65ebd | 1371 | unsigned int num) |
95ec3eb4 | 1372 | { |
4633c9e0 | 1373 | struct packet_sock *po, *po_next, *po_skip = NULL; |
a9b63918 | 1374 | unsigned int i, j, room = ROOM_NONE; |
95ec3eb4 | 1375 | |
0648ab70 | 1376 | po = pkt_sk(f->arr[idx]); |
3b3a5b0a WB |
1377 | |
1378 | if (try_self) { | |
1379 | room = packet_rcv_has_room(po, skb); | |
1380 | if (room == ROOM_NORMAL || | |
1381 | (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) | |
1382 | return idx; | |
4633c9e0 | 1383 | po_skip = po; |
3b3a5b0a | 1384 | } |
ad377cab | 1385 | |
0648ab70 | 1386 | i = j = min_t(int, po->rollover->sock, num - 1); |
77f65ebd | 1387 | do { |
2ccdbaa6 | 1388 | po_next = pkt_sk(f->arr[i]); |
4633c9e0 | 1389 | if (po_next != po_skip && !po_next->pressure && |
2ccdbaa6 | 1390 | packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) { |
77f65ebd | 1391 | if (i != j) |
0648ab70 | 1392 | po->rollover->sock = i; |
a9b63918 WB |
1393 | atomic_long_inc(&po->rollover->num); |
1394 | if (room == ROOM_LOW) | |
1395 | atomic_long_inc(&po->rollover->num_huge); | |
77f65ebd WB |
1396 | return i; |
1397 | } | |
ad377cab | 1398 | |
77f65ebd WB |
1399 | if (++i == num) |
1400 | i = 0; | |
1401 | } while (i != j); | |
1402 | ||
a9b63918 | 1403 | atomic_long_inc(&po->rollover->num_failed); |
77f65ebd WB |
1404 | return idx; |
1405 | } | |
1406 | ||
2d36097d NH |
1407 | static unsigned int fanout_demux_qm(struct packet_fanout *f, |
1408 | struct sk_buff *skb, | |
1409 | unsigned int num) | |
1410 | { | |
1411 | return skb_get_queue_mapping(skb) % num; | |
1412 | } | |
1413 | ||
47dceb8e WB |
1414 | static unsigned int fanout_demux_bpf(struct packet_fanout *f, |
1415 | struct sk_buff *skb, | |
1416 | unsigned int num) | |
1417 | { | |
1418 | struct bpf_prog *prog; | |
1419 | unsigned int ret = 0; | |
1420 | ||
1421 | rcu_read_lock(); | |
1422 | prog = rcu_dereference(f->bpf_prog); | |
1423 | if (prog) | |
1424 | ret = BPF_PROG_RUN(prog, skb) % num; | |
1425 | rcu_read_unlock(); | |
1426 | ||
1427 | return ret; | |
1428 | } | |
1429 | ||
77f65ebd WB |
1430 | static bool fanout_has_flag(struct packet_fanout *f, u16 flag) |
1431 | { | |
1432 | return f->flags & (flag >> 8); | |
95ec3eb4 DM |
1433 | } |
1434 | ||
95ec3eb4 DM |
1435 | static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, |
1436 | struct packet_type *pt, struct net_device *orig_dev) | |
dc99f600 DM |
1437 | { |
1438 | struct packet_fanout *f = pt->af_packet_priv; | |
f98f4514 | 1439 | unsigned int num = READ_ONCE(f->num_members); |
dc99f600 | 1440 | struct packet_sock *po; |
77f65ebd | 1441 | unsigned int idx; |
dc99f600 DM |
1442 | |
1443 | if (!net_eq(dev_net(dev), read_pnet(&f->net)) || | |
1444 | !num) { | |
1445 | kfree_skb(skb); | |
1446 | return 0; | |
1447 | } | |
1448 | ||
3f34b24a AD |
1449 | if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { |
1450 | skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET); | |
1451 | if (!skb) | |
1452 | return 0; | |
1453 | } | |
95ec3eb4 DM |
1454 | switch (f->type) { |
1455 | case PACKET_FANOUT_HASH: | |
1456 | default: | |
77f65ebd | 1457 | idx = fanout_demux_hash(f, skb, num); |
95ec3eb4 DM |
1458 | break; |
1459 | case PACKET_FANOUT_LB: | |
77f65ebd | 1460 | idx = fanout_demux_lb(f, skb, num); |
95ec3eb4 DM |
1461 | break; |
1462 | case PACKET_FANOUT_CPU: | |
77f65ebd WB |
1463 | idx = fanout_demux_cpu(f, skb, num); |
1464 | break; | |
5df0ddfb DB |
1465 | case PACKET_FANOUT_RND: |
1466 | idx = fanout_demux_rnd(f, skb, num); | |
1467 | break; | |
2d36097d NH |
1468 | case PACKET_FANOUT_QM: |
1469 | idx = fanout_demux_qm(f, skb, num); | |
1470 | break; | |
77f65ebd | 1471 | case PACKET_FANOUT_ROLLOVER: |
ad377cab | 1472 | idx = fanout_demux_rollover(f, skb, 0, false, num); |
95ec3eb4 | 1473 | break; |
47dceb8e WB |
1474 | case PACKET_FANOUT_CBPF: |
1475 | idx = fanout_demux_bpf(f, skb, num); | |
1476 | break; | |
dc99f600 DM |
1477 | } |
1478 | ||
ad377cab WB |
1479 | if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) |
1480 | idx = fanout_demux_rollover(f, skb, idx, true, num); | |
dc99f600 | 1481 | |
ad377cab | 1482 | po = pkt_sk(f->arr[idx]); |
dc99f600 DM |
1483 | return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); |
1484 | } | |
1485 | ||
fff3321d PE |
1486 | DEFINE_MUTEX(fanout_mutex); |
1487 | EXPORT_SYMBOL_GPL(fanout_mutex); | |
dc99f600 DM |
1488 | static LIST_HEAD(fanout_list); |
1489 | ||
1490 | static void __fanout_link(struct sock *sk, struct packet_sock *po) | |
1491 | { | |
1492 | struct packet_fanout *f = po->fanout; | |
1493 | ||
1494 | spin_lock(&f->lock); | |
1495 | f->arr[f->num_members] = sk; | |
1496 | smp_wmb(); | |
1497 | f->num_members++; | |
1498 | spin_unlock(&f->lock); | |
1499 | } | |
1500 | ||
1501 | static void __fanout_unlink(struct sock *sk, struct packet_sock *po) | |
1502 | { | |
1503 | struct packet_fanout *f = po->fanout; | |
1504 | int i; | |
1505 | ||
1506 | spin_lock(&f->lock); | |
1507 | for (i = 0; i < f->num_members; i++) { | |
1508 | if (f->arr[i] == sk) | |
1509 | break; | |
1510 | } | |
1511 | BUG_ON(i >= f->num_members); | |
1512 | f->arr[i] = f->arr[f->num_members - 1]; | |
1513 | f->num_members--; | |
1514 | spin_unlock(&f->lock); | |
1515 | } | |
1516 | ||
d4dd8aee | 1517 | static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) |
c0de08d0 | 1518 | { |
d4dd8aee | 1519 | if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout) |
c0de08d0 EL |
1520 | return true; |
1521 | ||
1522 | return false; | |
1523 | } | |
1524 | ||
47dceb8e WB |
1525 | static void fanout_init_data(struct packet_fanout *f) |
1526 | { | |
1527 | switch (f->type) { | |
1528 | case PACKET_FANOUT_LB: | |
1529 | atomic_set(&f->rr_cur, 0); | |
1530 | break; | |
1531 | case PACKET_FANOUT_CBPF: | |
1532 | RCU_INIT_POINTER(f->bpf_prog, NULL); | |
1533 | break; | |
1534 | } | |
1535 | } | |
1536 | ||
1537 | static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) | |
1538 | { | |
1539 | struct bpf_prog *old; | |
1540 | ||
1541 | spin_lock(&f->lock); | |
1542 | old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); | |
1543 | rcu_assign_pointer(f->bpf_prog, new); | |
1544 | spin_unlock(&f->lock); | |
1545 | ||
1546 | if (old) { | |
1547 | synchronize_net(); | |
1548 | bpf_prog_destroy(old); | |
1549 | } | |
1550 | } | |
1551 | ||
1552 | static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, | |
1553 | unsigned int len) | |
1554 | { | |
1555 | struct bpf_prog *new; | |
1556 | struct sock_fprog fprog; | |
1557 | int ret; | |
1558 | ||
1559 | if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) | |
1560 | return -EPERM; | |
1561 | if (len != sizeof(fprog)) | |
1562 | return -EINVAL; | |
1563 | if (copy_from_user(&fprog, data, len)) | |
1564 | return -EFAULT; | |
1565 | ||
1566 | ret = bpf_prog_create_from_user(&new, &fprog, NULL); | |
1567 | if (ret) | |
1568 | return ret; | |
1569 | ||
1570 | __fanout_set_data_bpf(po->fanout, new); | |
1571 | return 0; | |
1572 | } | |
1573 | ||
1574 | static int fanout_set_data(struct packet_sock *po, char __user *data, | |
1575 | unsigned int len) | |
1576 | { | |
1577 | switch (po->fanout->type) { | |
1578 | case PACKET_FANOUT_CBPF: | |
1579 | return fanout_set_data_cbpf(po, data, len); | |
1580 | default: | |
1581 | return -EINVAL; | |
1582 | }; | |
1583 | } | |
1584 | ||
1585 | static void fanout_release_data(struct packet_fanout *f) | |
1586 | { | |
1587 | switch (f->type) { | |
1588 | case PACKET_FANOUT_CBPF: | |
1589 | __fanout_set_data_bpf(f, NULL); | |
1590 | }; | |
1591 | } | |
1592 | ||
7736d33f | 1593 | static int fanout_add(struct sock *sk, u16 id, u16 type_flags) |
dc99f600 DM |
1594 | { |
1595 | struct packet_sock *po = pkt_sk(sk); | |
1596 | struct packet_fanout *f, *match; | |
7736d33f | 1597 | u8 type = type_flags & 0xff; |
77f65ebd | 1598 | u8 flags = type_flags >> 8; |
dc99f600 DM |
1599 | int err; |
1600 | ||
1601 | switch (type) { | |
77f65ebd WB |
1602 | case PACKET_FANOUT_ROLLOVER: |
1603 | if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) | |
1604 | return -EINVAL; | |
dc99f600 DM |
1605 | case PACKET_FANOUT_HASH: |
1606 | case PACKET_FANOUT_LB: | |
95ec3eb4 | 1607 | case PACKET_FANOUT_CPU: |
5df0ddfb | 1608 | case PACKET_FANOUT_RND: |
2d36097d | 1609 | case PACKET_FANOUT_QM: |
47dceb8e | 1610 | case PACKET_FANOUT_CBPF: |
dc99f600 DM |
1611 | break; |
1612 | default: | |
1613 | return -EINVAL; | |
1614 | } | |
1615 | ||
1616 | if (!po->running) | |
1617 | return -EINVAL; | |
1618 | ||
1619 | if (po->fanout) | |
1620 | return -EALREADY; | |
1621 | ||
4633c9e0 WB |
1622 | if (type == PACKET_FANOUT_ROLLOVER || |
1623 | (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { | |
0648ab70 WB |
1624 | po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL); |
1625 | if (!po->rollover) | |
1626 | return -ENOMEM; | |
a9b63918 WB |
1627 | atomic_long_set(&po->rollover->num, 0); |
1628 | atomic_long_set(&po->rollover->num_huge, 0); | |
1629 | atomic_long_set(&po->rollover->num_failed, 0); | |
0648ab70 WB |
1630 | } |
1631 | ||
dc99f600 DM |
1632 | mutex_lock(&fanout_mutex); |
1633 | match = NULL; | |
1634 | list_for_each_entry(f, &fanout_list, list) { | |
1635 | if (f->id == id && | |
1636 | read_pnet(&f->net) == sock_net(sk)) { | |
1637 | match = f; | |
1638 | break; | |
1639 | } | |
1640 | } | |
afe62c68 | 1641 | err = -EINVAL; |
77f65ebd | 1642 | if (match && match->flags != flags) |
afe62c68 | 1643 | goto out; |
dc99f600 | 1644 | if (!match) { |
afe62c68 | 1645 | err = -ENOMEM; |
dc99f600 | 1646 | match = kzalloc(sizeof(*match), GFP_KERNEL); |
afe62c68 ED |
1647 | if (!match) |
1648 | goto out; | |
1649 | write_pnet(&match->net, sock_net(sk)); | |
1650 | match->id = id; | |
1651 | match->type = type; | |
77f65ebd | 1652 | match->flags = flags; |
afe62c68 ED |
1653 | INIT_LIST_HEAD(&match->list); |
1654 | spin_lock_init(&match->lock); | |
1655 | atomic_set(&match->sk_ref, 0); | |
47dceb8e | 1656 | fanout_init_data(match); |
afe62c68 ED |
1657 | match->prot_hook.type = po->prot_hook.type; |
1658 | match->prot_hook.dev = po->prot_hook.dev; | |
1659 | match->prot_hook.func = packet_rcv_fanout; | |
1660 | match->prot_hook.af_packet_priv = match; | |
c0de08d0 | 1661 | match->prot_hook.id_match = match_fanout_group; |
afe62c68 ED |
1662 | dev_add_pack(&match->prot_hook); |
1663 | list_add(&match->list, &fanout_list); | |
dc99f600 | 1664 | } |
afe62c68 ED |
1665 | err = -EINVAL; |
1666 | if (match->type == type && | |
1667 | match->prot_hook.type == po->prot_hook.type && | |
1668 | match->prot_hook.dev == po->prot_hook.dev) { | |
1669 | err = -ENOSPC; | |
1670 | if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) { | |
1671 | __dev_remove_pack(&po->prot_hook); | |
1672 | po->fanout = match; | |
1673 | atomic_inc(&match->sk_ref); | |
1674 | __fanout_link(sk, po); | |
1675 | err = 0; | |
dc99f600 DM |
1676 | } |
1677 | } | |
afe62c68 | 1678 | out: |
dc99f600 | 1679 | mutex_unlock(&fanout_mutex); |
0648ab70 WB |
1680 | if (err) { |
1681 | kfree(po->rollover); | |
1682 | po->rollover = NULL; | |
1683 | } | |
dc99f600 DM |
1684 | return err; |
1685 | } | |
1686 | ||
1687 | static void fanout_release(struct sock *sk) | |
1688 | { | |
1689 | struct packet_sock *po = pkt_sk(sk); | |
1690 | struct packet_fanout *f; | |
1691 | ||
1692 | f = po->fanout; | |
1693 | if (!f) | |
1694 | return; | |
1695 | ||
fff3321d | 1696 | mutex_lock(&fanout_mutex); |
dc99f600 DM |
1697 | po->fanout = NULL; |
1698 | ||
dc99f600 DM |
1699 | if (atomic_dec_and_test(&f->sk_ref)) { |
1700 | list_del(&f->list); | |
1701 | dev_remove_pack(&f->prot_hook); | |
47dceb8e | 1702 | fanout_release_data(f); |
dc99f600 DM |
1703 | kfree(f); |
1704 | } | |
1705 | mutex_unlock(&fanout_mutex); | |
0648ab70 | 1706 | |
59f21118 WB |
1707 | if (po->rollover) |
1708 | kfree_rcu(po->rollover, rcu); | |
dc99f600 | 1709 | } |
1da177e4 | 1710 | |
90ddc4f0 | 1711 | static const struct proto_ops packet_ops; |
1da177e4 | 1712 | |
90ddc4f0 | 1713 | static const struct proto_ops packet_ops_spkt; |
1da177e4 | 1714 | |
40d4e3df ED |
1715 | static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, |
1716 | struct packet_type *pt, struct net_device *orig_dev) | |
1da177e4 LT |
1717 | { |
1718 | struct sock *sk; | |
1719 | struct sockaddr_pkt *spkt; | |
1720 | ||
1721 | /* | |
1722 | * When we registered the protocol we saved the socket in the data | |
1723 | * field for just this event. | |
1724 | */ | |
1725 | ||
1726 | sk = pt->af_packet_priv; | |
1ce4f28b | 1727 | |
1da177e4 LT |
1728 | /* |
1729 | * Yank back the headers [hope the device set this | |
1730 | * right or kerboom...] | |
1731 | * | |
1732 | * Incoming packets have ll header pulled, | |
1733 | * push it back. | |
1734 | * | |
98e399f8 | 1735 | * For outgoing ones skb->data == skb_mac_header(skb) |
1da177e4 LT |
1736 | * so that this procedure is noop. |
1737 | */ | |
1738 | ||
1739 | if (skb->pkt_type == PACKET_LOOPBACK) | |
1740 | goto out; | |
1741 | ||
09ad9bc7 | 1742 | if (!net_eq(dev_net(dev), sock_net(sk))) |
d12d01d6 DL |
1743 | goto out; |
1744 | ||
40d4e3df ED |
1745 | skb = skb_share_check(skb, GFP_ATOMIC); |
1746 | if (skb == NULL) | |
1da177e4 LT |
1747 | goto oom; |
1748 | ||
1749 | /* drop any routing info */ | |
adf30907 | 1750 | skb_dst_drop(skb); |
1da177e4 | 1751 | |
84531c24 PO |
1752 | /* drop conntrack reference */ |
1753 | nf_reset(skb); | |
1754 | ||
ffbc6111 | 1755 | spkt = &PACKET_SKB_CB(skb)->sa.pkt; |
1da177e4 | 1756 | |
98e399f8 | 1757 | skb_push(skb, skb->data - skb_mac_header(skb)); |
1da177e4 LT |
1758 | |
1759 | /* | |
1760 | * The SOCK_PACKET socket receives _all_ frames. | |
1761 | */ | |
1762 | ||
1763 | spkt->spkt_family = dev->type; | |
1764 | strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); | |
1765 | spkt->spkt_protocol = skb->protocol; | |
1766 | ||
1767 | /* | |
1768 | * Charge the memory to the socket. This is done specifically | |
1769 | * to prevent sockets using all the memory up. | |
1770 | */ | |
1771 | ||
40d4e3df | 1772 | if (sock_queue_rcv_skb(sk, skb) == 0) |
1da177e4 LT |
1773 | return 0; |
1774 | ||
1775 | out: | |
1776 | kfree_skb(skb); | |
1777 | oom: | |
1778 | return 0; | |
1779 | } | |
1780 | ||
1781 | ||
1782 | /* | |
1783 | * Output a raw packet to a device layer. This bypasses all the other | |
1784 | * protocol layers and you must therefore supply it with a complete frame | |
1785 | */ | |
1ce4f28b | 1786 | |
1b784140 YX |
1787 | static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, |
1788 | size_t len) | |
1da177e4 LT |
1789 | { |
1790 | struct sock *sk = sock->sk; | |
342dfc30 | 1791 | DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); |
1a35ca80 | 1792 | struct sk_buff *skb = NULL; |
1da177e4 | 1793 | struct net_device *dev; |
40d4e3df | 1794 | __be16 proto = 0; |
1da177e4 | 1795 | int err; |
3bdc0eba | 1796 | int extra_len = 0; |
1ce4f28b | 1797 | |
1da177e4 | 1798 | /* |
1ce4f28b | 1799 | * Get and verify the address. |
1da177e4 LT |
1800 | */ |
1801 | ||
40d4e3df | 1802 | if (saddr) { |
1da177e4 | 1803 | if (msg->msg_namelen < sizeof(struct sockaddr)) |
40d4e3df ED |
1804 | return -EINVAL; |
1805 | if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) | |
1806 | proto = saddr->spkt_protocol; | |
1807 | } else | |
1808 | return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ | |
1da177e4 LT |
1809 | |
1810 | /* | |
1ce4f28b | 1811 | * Find the device first to size check it |
1da177e4 LT |
1812 | */ |
1813 | ||
de74e92a | 1814 | saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; |
1a35ca80 | 1815 | retry: |
654d1f8a ED |
1816 | rcu_read_lock(); |
1817 | dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); | |
1da177e4 LT |
1818 | err = -ENODEV; |
1819 | if (dev == NULL) | |
1820 | goto out_unlock; | |
1ce4f28b | 1821 | |
d5e76b0a DM |
1822 | err = -ENETDOWN; |
1823 | if (!(dev->flags & IFF_UP)) | |
1824 | goto out_unlock; | |
1825 | ||
1da177e4 | 1826 | /* |
40d4e3df ED |
1827 | * You may not queue a frame bigger than the mtu. This is the lowest level |
1828 | * raw protocol and you must do your own fragmentation at this level. | |
1da177e4 | 1829 | */ |
1ce4f28b | 1830 | |
3bdc0eba BG |
1831 | if (unlikely(sock_flag(sk, SOCK_NOFCS))) { |
1832 | if (!netif_supports_nofcs(dev)) { | |
1833 | err = -EPROTONOSUPPORT; | |
1834 | goto out_unlock; | |
1835 | } | |
1836 | extra_len = 4; /* We're doing our own CRC */ | |
1837 | } | |
1838 | ||
1da177e4 | 1839 | err = -EMSGSIZE; |
3bdc0eba | 1840 | if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) |
1da177e4 LT |
1841 | goto out_unlock; |
1842 | ||
1a35ca80 ED |
1843 | if (!skb) { |
1844 | size_t reserved = LL_RESERVED_SPACE(dev); | |
4ce40912 | 1845 | int tlen = dev->needed_tailroom; |
1a35ca80 ED |
1846 | unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; |
1847 | ||
1848 | rcu_read_unlock(); | |
4ce40912 | 1849 | skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); |
1a35ca80 ED |
1850 | if (skb == NULL) |
1851 | return -ENOBUFS; | |
1852 | /* FIXME: Save some space for broken drivers that write a hard | |
1853 | * header at transmission time by themselves. PPP is the notable | |
1854 | * one here. This should really be fixed at the driver level. | |
1855 | */ | |
1856 | skb_reserve(skb, reserved); | |
1857 | skb_reset_network_header(skb); | |
1858 | ||
1859 | /* Try to align data part correctly */ | |
1860 | if (hhlen) { | |
1861 | skb->data -= hhlen; | |
1862 | skb->tail -= hhlen; | |
1863 | if (len < hhlen) | |
1864 | skb_reset_network_header(skb); | |
1865 | } | |
6ce8e9ce | 1866 | err = memcpy_from_msg(skb_put(skb, len), msg, len); |
1a35ca80 ED |
1867 | if (err) |
1868 | goto out_free; | |
1869 | goto retry; | |
1da177e4 LT |
1870 | } |
1871 | ||
3bdc0eba | 1872 | if (len > (dev->mtu + dev->hard_header_len + extra_len)) { |
57f89bfa BG |
1873 | /* Earlier code assumed this would be a VLAN pkt, |
1874 | * double-check this now that we have the actual | |
1875 | * packet in hand. | |
1876 | */ | |
1877 | struct ethhdr *ehdr; | |
1878 | skb_reset_mac_header(skb); | |
1879 | ehdr = eth_hdr(skb); | |
1880 | if (ehdr->h_proto != htons(ETH_P_8021Q)) { | |
1881 | err = -EMSGSIZE; | |
1882 | goto out_unlock; | |
1883 | } | |
1884 | } | |
1a35ca80 | 1885 | |
1da177e4 LT |
1886 | skb->protocol = proto; |
1887 | skb->dev = dev; | |
1888 | skb->priority = sk->sk_priority; | |
2d37a186 | 1889 | skb->mark = sk->sk_mark; |
bf84a010 DB |
1890 | |
1891 | sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); | |
1da177e4 | 1892 | |
3bdc0eba BG |
1893 | if (unlikely(extra_len == 4)) |
1894 | skb->no_fcs = 1; | |
1895 | ||
40893fd0 | 1896 | skb_probe_transport_header(skb, 0); |
c1aad275 | 1897 | |
1da177e4 | 1898 | dev_queue_xmit(skb); |
654d1f8a | 1899 | rcu_read_unlock(); |
40d4e3df | 1900 | return len; |
1da177e4 | 1901 | |
1da177e4 | 1902 | out_unlock: |
654d1f8a | 1903 | rcu_read_unlock(); |
1a35ca80 ED |
1904 | out_free: |
1905 | kfree_skb(skb); | |
1da177e4 LT |
1906 | return err; |
1907 | } | |
1da177e4 | 1908 | |
eea49cc9 | 1909 | static unsigned int run_filter(const struct sk_buff *skb, |
62ab0812 | 1910 | const struct sock *sk, |
dbcb5855 | 1911 | unsigned int res) |
1da177e4 LT |
1912 | { |
1913 | struct sk_filter *filter; | |
fda9ef5d | 1914 | |
80f8f102 ED |
1915 | rcu_read_lock(); |
1916 | filter = rcu_dereference(sk->sk_filter); | |
dbcb5855 | 1917 | if (filter != NULL) |
0a14842f | 1918 | res = SK_RUN_FILTER(filter, skb); |
80f8f102 | 1919 | rcu_read_unlock(); |
1da177e4 | 1920 | |
dbcb5855 | 1921 | return res; |
1da177e4 LT |
1922 | } |
1923 | ||
1924 | /* | |
62ab0812 ED |
1925 | * This function makes lazy skb cloning in hope that most of packets |
1926 | * are discarded by BPF. | |
1927 | * | |
1928 | * Note tricky part: we DO mangle shared skb! skb->data, skb->len | |
1929 | * and skb->cb are mangled. It works because (and until) packets | |
1930 | * falling here are owned by current CPU. Output packets are cloned | |
1931 | * by dev_queue_xmit_nit(), input packets are processed by net_bh | |
1932 | * sequencially, so that if we return skb to original state on exit, | |
1933 | * we will not harm anyone. | |
1da177e4 LT |
1934 | */ |
1935 | ||
40d4e3df ED |
1936 | static int packet_rcv(struct sk_buff *skb, struct net_device *dev, |
1937 | struct packet_type *pt, struct net_device *orig_dev) | |
1da177e4 LT |
1938 | { |
1939 | struct sock *sk; | |
1940 | struct sockaddr_ll *sll; | |
1941 | struct packet_sock *po; | |
40d4e3df | 1942 | u8 *skb_head = skb->data; |
1da177e4 | 1943 | int skb_len = skb->len; |
dbcb5855 | 1944 | unsigned int snaplen, res; |
1da177e4 LT |
1945 | |
1946 | if (skb->pkt_type == PACKET_LOOPBACK) | |
1947 | goto drop; | |
1948 | ||
1949 | sk = pt->af_packet_priv; | |
1950 | po = pkt_sk(sk); | |
1951 | ||
09ad9bc7 | 1952 | if (!net_eq(dev_net(dev), sock_net(sk))) |
d12d01d6 DL |
1953 | goto drop; |
1954 | ||
1da177e4 LT |
1955 | skb->dev = dev; |
1956 | ||
3b04ddde | 1957 | if (dev->header_ops) { |
1da177e4 | 1958 | /* The device has an explicit notion of ll header, |
62ab0812 ED |
1959 | * exported to higher levels. |
1960 | * | |
1961 | * Otherwise, the device hides details of its frame | |
1962 | * structure, so that corresponding packet head is | |
1963 | * never delivered to user. | |
1da177e4 LT |
1964 | */ |
1965 | if (sk->sk_type != SOCK_DGRAM) | |
98e399f8 | 1966 | skb_push(skb, skb->data - skb_mac_header(skb)); |
1da177e4 LT |
1967 | else if (skb->pkt_type == PACKET_OUTGOING) { |
1968 | /* Special case: outgoing packets have ll header at head */ | |
bbe735e4 | 1969 | skb_pull(skb, skb_network_offset(skb)); |
1da177e4 LT |
1970 | } |
1971 | } | |
1972 | ||
1973 | snaplen = skb->len; | |
1974 | ||
dbcb5855 DM |
1975 | res = run_filter(skb, sk, snaplen); |
1976 | if (!res) | |
fda9ef5d | 1977 | goto drop_n_restore; |
dbcb5855 DM |
1978 | if (snaplen > res) |
1979 | snaplen = res; | |
1da177e4 | 1980 | |
0fd7bac6 | 1981 | if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) |
1da177e4 LT |
1982 | goto drop_n_acct; |
1983 | ||
1984 | if (skb_shared(skb)) { | |
1985 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); | |
1986 | if (nskb == NULL) | |
1987 | goto drop_n_acct; | |
1988 | ||
1989 | if (skb_head != skb->data) { | |
1990 | skb->data = skb_head; | |
1991 | skb->len = skb_len; | |
1992 | } | |
abc4e4fa | 1993 | consume_skb(skb); |
1da177e4 LT |
1994 | skb = nskb; |
1995 | } | |
1996 | ||
b4772ef8 | 1997 | sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); |
ffbc6111 HX |
1998 | |
1999 | sll = &PACKET_SKB_CB(skb)->sa.ll; | |
1da177e4 | 2000 | sll->sll_hatype = dev->type; |
1da177e4 | 2001 | sll->sll_pkttype = skb->pkt_type; |
8032b464 | 2002 | if (unlikely(po->origdev)) |
80feaacb PWJ |
2003 | sll->sll_ifindex = orig_dev->ifindex; |
2004 | else | |
2005 | sll->sll_ifindex = dev->ifindex; | |
1da177e4 | 2006 | |
b95cce35 | 2007 | sll->sll_halen = dev_parse_header(skb, sll->sll_addr); |
1da177e4 | 2008 | |
2472d761 EB |
2009 | /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). |
2010 | * Use their space for storing the original skb length. | |
2011 | */ | |
2012 | PACKET_SKB_CB(skb)->sa.origlen = skb->len; | |
8dc41944 | 2013 | |
1da177e4 LT |
2014 | if (pskb_trim(skb, snaplen)) |
2015 | goto drop_n_acct; | |
2016 | ||
2017 | skb_set_owner_r(skb, sk); | |
2018 | skb->dev = NULL; | |
adf30907 | 2019 | skb_dst_drop(skb); |
1da177e4 | 2020 | |
84531c24 PO |
2021 | /* drop conntrack reference */ |
2022 | nf_reset(skb); | |
2023 | ||
1da177e4 | 2024 | spin_lock(&sk->sk_receive_queue.lock); |
ee80fbf3 | 2025 | po->stats.stats1.tp_packets++; |
3bc3b96f | 2026 | sock_skb_set_dropcount(sk, skb); |
1da177e4 LT |
2027 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
2028 | spin_unlock(&sk->sk_receive_queue.lock); | |
676d2369 | 2029 | sk->sk_data_ready(sk); |
1da177e4 LT |
2030 | return 0; |
2031 | ||
2032 | drop_n_acct: | |
7091fbd8 | 2033 | spin_lock(&sk->sk_receive_queue.lock); |
ee80fbf3 | 2034 | po->stats.stats1.tp_drops++; |
7091fbd8 WB |
2035 | atomic_inc(&sk->sk_drops); |
2036 | spin_unlock(&sk->sk_receive_queue.lock); | |
1da177e4 LT |
2037 | |
2038 | drop_n_restore: | |
2039 | if (skb_head != skb->data && skb_shared(skb)) { | |
2040 | skb->data = skb_head; | |
2041 | skb->len = skb_len; | |
2042 | } | |
2043 | drop: | |
ead2ceb0 | 2044 | consume_skb(skb); |
1da177e4 LT |
2045 | return 0; |
2046 | } | |
2047 | ||
40d4e3df ED |
2048 | static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, |
2049 | struct packet_type *pt, struct net_device *orig_dev) | |
1da177e4 LT |
2050 | { |
2051 | struct sock *sk; | |
2052 | struct packet_sock *po; | |
2053 | struct sockaddr_ll *sll; | |
184f489e | 2054 | union tpacket_uhdr h; |
40d4e3df | 2055 | u8 *skb_head = skb->data; |
1da177e4 | 2056 | int skb_len = skb->len; |
dbcb5855 | 2057 | unsigned int snaplen, res; |
f6fb8f10 | 2058 | unsigned long status = TP_STATUS_USER; |
bbd6ef87 | 2059 | unsigned short macoff, netoff, hdrlen; |
1da177e4 | 2060 | struct sk_buff *copy_skb = NULL; |
bbd6ef87 | 2061 | struct timespec ts; |
b9c32fb2 | 2062 | __u32 ts_status; |
1da177e4 | 2063 | |
51846355 AW |
2064 | /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. |
2065 | * We may add members to them until current aligned size without forcing | |
2066 | * userspace to call getsockopt(..., PACKET_HDRLEN, ...). | |
2067 | */ | |
2068 | BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); | |
2069 | BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); | |
2070 | ||
1da177e4 LT |
2071 | if (skb->pkt_type == PACKET_LOOPBACK) |
2072 | goto drop; | |
2073 | ||
2074 | sk = pt->af_packet_priv; | |
2075 | po = pkt_sk(sk); | |
2076 | ||
09ad9bc7 | 2077 | if (!net_eq(dev_net(dev), sock_net(sk))) |
d12d01d6 DL |
2078 | goto drop; |
2079 | ||
3b04ddde | 2080 | if (dev->header_ops) { |
1da177e4 | 2081 | if (sk->sk_type != SOCK_DGRAM) |
98e399f8 | 2082 | skb_push(skb, skb->data - skb_mac_header(skb)); |
1da177e4 LT |
2083 | else if (skb->pkt_type == PACKET_OUTGOING) { |
2084 | /* Special case: outgoing packets have ll header at head */ | |
bbe735e4 | 2085 | skb_pull(skb, skb_network_offset(skb)); |
1da177e4 LT |
2086 | } |
2087 | } | |
2088 | ||
2089 | snaplen = skb->len; | |
2090 | ||
dbcb5855 DM |
2091 | res = run_filter(skb, sk, snaplen); |
2092 | if (!res) | |
fda9ef5d | 2093 | goto drop_n_restore; |
68c2e5de AD |
2094 | |
2095 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
2096 | status |= TP_STATUS_CSUMNOTREADY; | |
682f048b AD |
2097 | else if (skb->pkt_type != PACKET_OUTGOING && |
2098 | (skb->ip_summed == CHECKSUM_COMPLETE || | |
2099 | skb_csum_unnecessary(skb))) | |
2100 | status |= TP_STATUS_CSUM_VALID; | |
68c2e5de | 2101 | |
dbcb5855 DM |
2102 | if (snaplen > res) |
2103 | snaplen = res; | |
1da177e4 LT |
2104 | |
2105 | if (sk->sk_type == SOCK_DGRAM) { | |
8913336a PM |
2106 | macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + |
2107 | po->tp_reserve; | |
1da177e4 | 2108 | } else { |
95c96174 | 2109 | unsigned int maclen = skb_network_offset(skb); |
bbd6ef87 | 2110 | netoff = TPACKET_ALIGN(po->tp_hdrlen + |
8913336a PM |
2111 | (maclen < 16 ? 16 : maclen)) + |
2112 | po->tp_reserve; | |
1da177e4 LT |
2113 | macoff = netoff - maclen; |
2114 | } | |
f6fb8f10 | 2115 | if (po->tp_version <= TPACKET_V2) { |
2116 | if (macoff + snaplen > po->rx_ring.frame_size) { | |
2117 | if (po->copy_thresh && | |
0fd7bac6 | 2118 | atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { |
f6fb8f10 | 2119 | if (skb_shared(skb)) { |
2120 | copy_skb = skb_clone(skb, GFP_ATOMIC); | |
2121 | } else { | |
2122 | copy_skb = skb_get(skb); | |
2123 | skb_head = skb->data; | |
2124 | } | |
2125 | if (copy_skb) | |
2126 | skb_set_owner_r(copy_skb, sk); | |
1da177e4 | 2127 | } |
f6fb8f10 | 2128 | snaplen = po->rx_ring.frame_size - macoff; |
2129 | if ((int)snaplen < 0) | |
2130 | snaplen = 0; | |
1da177e4 | 2131 | } |
dc808110 ED |
2132 | } else if (unlikely(macoff + snaplen > |
2133 | GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { | |
2134 | u32 nval; | |
2135 | ||
2136 | nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; | |
2137 | pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", | |
2138 | snaplen, nval, macoff); | |
2139 | snaplen = nval; | |
2140 | if (unlikely((int)snaplen < 0)) { | |
2141 | snaplen = 0; | |
2142 | macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; | |
2143 | } | |
1da177e4 | 2144 | } |
1da177e4 | 2145 | spin_lock(&sk->sk_receive_queue.lock); |
f6fb8f10 | 2146 | h.raw = packet_current_rx_frame(po, skb, |
2147 | TP_STATUS_KERNEL, (macoff+snaplen)); | |
bbd6ef87 | 2148 | if (!h.raw) |
1da177e4 | 2149 | goto ring_is_full; |
f6fb8f10 | 2150 | if (po->tp_version <= TPACKET_V2) { |
2151 | packet_increment_rx_head(po, &po->rx_ring); | |
2152 | /* | |
2153 | * LOSING will be reported till you read the stats, | |
2154 | * because it's COR - Clear On Read. | |
2155 | * Anyways, moving it for V1/V2 only as V3 doesn't need this | |
2156 | * at packet level. | |
2157 | */ | |
ee80fbf3 | 2158 | if (po->stats.stats1.tp_drops) |
f6fb8f10 | 2159 | status |= TP_STATUS_LOSING; |
2160 | } | |
ee80fbf3 | 2161 | po->stats.stats1.tp_packets++; |
1da177e4 LT |
2162 | if (copy_skb) { |
2163 | status |= TP_STATUS_COPY; | |
2164 | __skb_queue_tail(&sk->sk_receive_queue, copy_skb); | |
2165 | } | |
1da177e4 LT |
2166 | spin_unlock(&sk->sk_receive_queue.lock); |
2167 | ||
bbd6ef87 | 2168 | skb_copy_bits(skb, 0, h.raw + macoff, snaplen); |
b9c32fb2 DB |
2169 | |
2170 | if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) | |
7a51384c | 2171 | getnstimeofday(&ts); |
1da177e4 | 2172 | |
b9c32fb2 DB |
2173 | status |= ts_status; |
2174 | ||
bbd6ef87 PM |
2175 | switch (po->tp_version) { |
2176 | case TPACKET_V1: | |
2177 | h.h1->tp_len = skb->len; | |
2178 | h.h1->tp_snaplen = snaplen; | |
2179 | h.h1->tp_mac = macoff; | |
2180 | h.h1->tp_net = netoff; | |
4b457bdf DB |
2181 | h.h1->tp_sec = ts.tv_sec; |
2182 | h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; | |
bbd6ef87 PM |
2183 | hdrlen = sizeof(*h.h1); |
2184 | break; | |
2185 | case TPACKET_V2: | |
2186 | h.h2->tp_len = skb->len; | |
2187 | h.h2->tp_snaplen = snaplen; | |
2188 | h.h2->tp_mac = macoff; | |
2189 | h.h2->tp_net = netoff; | |
bbd6ef87 PM |
2190 | h.h2->tp_sec = ts.tv_sec; |
2191 | h.h2->tp_nsec = ts.tv_nsec; | |
df8a39de JP |
2192 | if (skb_vlan_tag_present(skb)) { |
2193 | h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); | |
a0cdfcf3 AW |
2194 | h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); |
2195 | status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; | |
a3bcc23e BG |
2196 | } else { |
2197 | h.h2->tp_vlan_tci = 0; | |
a0cdfcf3 | 2198 | h.h2->tp_vlan_tpid = 0; |
a3bcc23e | 2199 | } |
e4d26f4b | 2200 | memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); |
bbd6ef87 PM |
2201 | hdrlen = sizeof(*h.h2); |
2202 | break; | |
f6fb8f10 | 2203 | case TPACKET_V3: |
2204 | /* tp_nxt_offset,vlan are already populated above. | |
2205 | * So DONT clear those fields here | |
2206 | */ | |
2207 | h.h3->tp_status |= status; | |
2208 | h.h3->tp_len = skb->len; | |
2209 | h.h3->tp_snaplen = snaplen; | |
2210 | h.h3->tp_mac = macoff; | |
2211 | h.h3->tp_net = netoff; | |
f6fb8f10 | 2212 | h.h3->tp_sec = ts.tv_sec; |
2213 | h.h3->tp_nsec = ts.tv_nsec; | |
e4d26f4b | 2214 | memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); |
f6fb8f10 | 2215 | hdrlen = sizeof(*h.h3); |
2216 | break; | |
bbd6ef87 PM |
2217 | default: |
2218 | BUG(); | |
2219 | } | |
1da177e4 | 2220 | |
bbd6ef87 | 2221 | sll = h.raw + TPACKET_ALIGN(hdrlen); |
b95cce35 | 2222 | sll->sll_halen = dev_parse_header(skb, sll->sll_addr); |
1da177e4 LT |
2223 | sll->sll_family = AF_PACKET; |
2224 | sll->sll_hatype = dev->type; | |
2225 | sll->sll_protocol = skb->protocol; | |
2226 | sll->sll_pkttype = skb->pkt_type; | |
8032b464 | 2227 | if (unlikely(po->origdev)) |
80feaacb PWJ |
2228 | sll->sll_ifindex = orig_dev->ifindex; |
2229 | else | |
2230 | sll->sll_ifindex = dev->ifindex; | |
1da177e4 | 2231 | |
e16aa207 | 2232 | smp_mb(); |
f0d4eb29 | 2233 | |
f6dafa95 | 2234 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 |
f0d4eb29 | 2235 | if (po->tp_version <= TPACKET_V2) { |
0af55bb5 CG |
2236 | u8 *start, *end; |
2237 | ||
f0d4eb29 DB |
2238 | end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + |
2239 | macoff + snaplen); | |
2240 | ||
2241 | for (start = h.raw; start < end; start += PAGE_SIZE) | |
2242 | flush_dcache_page(pgv_to_page(start)); | |
1da177e4 | 2243 | } |
f0d4eb29 | 2244 | smp_wmb(); |
f6dafa95 | 2245 | #endif |
f0d4eb29 | 2246 | |
da413eec | 2247 | if (po->tp_version <= TPACKET_V2) { |
f6fb8f10 | 2248 | __packet_set_status(po, h.raw, status); |
da413eec DC |
2249 | sk->sk_data_ready(sk); |
2250 | } else { | |
f6fb8f10 | 2251 | prb_clear_blk_fill_status(&po->rx_ring); |
da413eec | 2252 | } |
1da177e4 LT |
2253 | |
2254 | drop_n_restore: | |
2255 | if (skb_head != skb->data && skb_shared(skb)) { | |
2256 | skb->data = skb_head; | |
2257 | skb->len = skb_len; | |
2258 | } | |
2259 | drop: | |
1ce4f28b | 2260 | kfree_skb(skb); |
1da177e4 LT |
2261 | return 0; |
2262 | ||
2263 | ring_is_full: | |
ee80fbf3 | 2264 | po->stats.stats1.tp_drops++; |
1da177e4 LT |
2265 | spin_unlock(&sk->sk_receive_queue.lock); |
2266 | ||
676d2369 | 2267 | sk->sk_data_ready(sk); |
acb5d75b | 2268 | kfree_skb(copy_skb); |
1da177e4 LT |
2269 | goto drop_n_restore; |
2270 | } | |
2271 | ||
69e3c75f JB |
2272 | static void tpacket_destruct_skb(struct sk_buff *skb) |
2273 | { | |
2274 | struct packet_sock *po = pkt_sk(skb->sk); | |
1da177e4 | 2275 | |
69e3c75f | 2276 | if (likely(po->tx_ring.pg_vec)) { |
f0d4eb29 | 2277 | void *ph; |
b9c32fb2 DB |
2278 | __u32 ts; |
2279 | ||
69e3c75f | 2280 | ph = skb_shinfo(skb)->destructor_arg; |
b0138408 | 2281 | packet_dec_pending(&po->tx_ring); |
b9c32fb2 DB |
2282 | |
2283 | ts = __packet_set_timestamp(po, ph, skb); | |
2284 | __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); | |
69e3c75f JB |
2285 | } |
2286 | ||
2287 | sock_wfree(skb); | |
2288 | } | |
2289 | ||
9c707762 WB |
2290 | static bool ll_header_truncated(const struct net_device *dev, int len) |
2291 | { | |
2292 | /* net device doesn't like empty head */ | |
2293 | if (unlikely(len <= dev->hard_header_len)) { | |
eee2f04b | 2294 | net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n", |
9c707762 WB |
2295 | current->comm, len, dev->hard_header_len); |
2296 | return true; | |
2297 | } | |
2298 | ||
2299 | return false; | |
2300 | } | |
2301 | ||
40d4e3df ED |
2302 | static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, |
2303 | void *frame, struct net_device *dev, int size_max, | |
ae641949 | 2304 | __be16 proto, unsigned char *addr, int hlen) |
69e3c75f | 2305 | { |
184f489e | 2306 | union tpacket_uhdr ph; |
09effa67 | 2307 | int to_write, offset, len, tp_len, nr_frags, len_max; |
69e3c75f JB |
2308 | struct socket *sock = po->sk.sk_socket; |
2309 | struct page *page; | |
2310 | void *data; | |
2311 | int err; | |
2312 | ||
2313 | ph.raw = frame; | |
2314 | ||
2315 | skb->protocol = proto; | |
2316 | skb->dev = dev; | |
2317 | skb->priority = po->sk.sk_priority; | |
2d37a186 | 2318 | skb->mark = po->sk.sk_mark; |
2e31396f | 2319 | sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags); |
69e3c75f JB |
2320 | skb_shinfo(skb)->destructor_arg = ph.raw; |
2321 | ||
2322 | switch (po->tp_version) { | |
2323 | case TPACKET_V2: | |
2324 | tp_len = ph.h2->tp_len; | |
2325 | break; | |
2326 | default: | |
2327 | tp_len = ph.h1->tp_len; | |
2328 | break; | |
2329 | } | |
09effa67 DM |
2330 | if (unlikely(tp_len > size_max)) { |
2331 | pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); | |
2332 | return -EMSGSIZE; | |
2333 | } | |
69e3c75f | 2334 | |
ae641949 | 2335 | skb_reserve(skb, hlen); |
69e3c75f | 2336 | skb_reset_network_header(skb); |
c1aad275 | 2337 | |
d346a3fa DB |
2338 | if (!packet_use_direct_xmit(po)) |
2339 | skb_probe_transport_header(skb, 0); | |
2340 | if (unlikely(po->tp_tx_has_off)) { | |
5920cd3a PC |
2341 | int off_min, off_max, off; |
2342 | off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); | |
2343 | off_max = po->tx_ring.frame_size - tp_len; | |
2344 | if (sock->type == SOCK_DGRAM) { | |
2345 | switch (po->tp_version) { | |
2346 | case TPACKET_V2: | |
2347 | off = ph.h2->tp_net; | |
2348 | break; | |
2349 | default: | |
2350 | off = ph.h1->tp_net; | |
2351 | break; | |
2352 | } | |
2353 | } else { | |
2354 | switch (po->tp_version) { | |
2355 | case TPACKET_V2: | |
2356 | off = ph.h2->tp_mac; | |
2357 | break; | |
2358 | default: | |
2359 | off = ph.h1->tp_mac; | |
2360 | break; | |
2361 | } | |
2362 | } | |
2363 | if (unlikely((off < off_min) || (off_max < off))) | |
2364 | return -EINVAL; | |
2365 | data = ph.raw + off; | |
2366 | } else { | |
2367 | data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll); | |
2368 | } | |
69e3c75f JB |
2369 | to_write = tp_len; |
2370 | ||
2371 | if (sock->type == SOCK_DGRAM) { | |
2372 | err = dev_hard_header(skb, dev, ntohs(proto), addr, | |
2373 | NULL, tp_len); | |
2374 | if (unlikely(err < 0)) | |
2375 | return -EINVAL; | |
40d4e3df | 2376 | } else if (dev->hard_header_len) { |
9c707762 | 2377 | if (ll_header_truncated(dev, tp_len)) |
69e3c75f | 2378 | return -EINVAL; |
69e3c75f JB |
2379 | |
2380 | skb_push(skb, dev->hard_header_len); | |
2381 | err = skb_store_bits(skb, 0, data, | |
2382 | dev->hard_header_len); | |
2383 | if (unlikely(err)) | |
2384 | return err; | |
2385 | ||
2386 | data += dev->hard_header_len; | |
2387 | to_write -= dev->hard_header_len; | |
2388 | } | |
2389 | ||
69e3c75f JB |
2390 | offset = offset_in_page(data); |
2391 | len_max = PAGE_SIZE - offset; | |
2392 | len = ((to_write > len_max) ? len_max : to_write); | |
2393 | ||
2394 | skb->data_len = to_write; | |
2395 | skb->len += to_write; | |
2396 | skb->truesize += to_write; | |
2397 | atomic_add(to_write, &po->sk.sk_wmem_alloc); | |
2398 | ||
2399 | while (likely(to_write)) { | |
2400 | nr_frags = skb_shinfo(skb)->nr_frags; | |
2401 | ||
2402 | if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { | |
40d4e3df ED |
2403 | pr_err("Packet exceed the number of skb frags(%lu)\n", |
2404 | MAX_SKB_FRAGS); | |
69e3c75f JB |
2405 | return -EFAULT; |
2406 | } | |
2407 | ||
0af55bb5 CG |
2408 | page = pgv_to_page(data); |
2409 | data += len; | |
69e3c75f JB |
2410 | flush_dcache_page(page); |
2411 | get_page(page); | |
0af55bb5 | 2412 | skb_fill_page_desc(skb, nr_frags, page, offset, len); |
69e3c75f JB |
2413 | to_write -= len; |
2414 | offset = 0; | |
2415 | len_max = PAGE_SIZE; | |
2416 | len = ((to_write > len_max) ? len_max : to_write); | |
2417 | } | |
2418 | ||
2419 | return tp_len; | |
2420 | } | |
2421 | ||
2422 | static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |
2423 | { | |
69e3c75f JB |
2424 | struct sk_buff *skb; |
2425 | struct net_device *dev; | |
2426 | __be16 proto; | |
09effa67 | 2427 | int err, reserve = 0; |
40d4e3df | 2428 | void *ph; |
342dfc30 | 2429 | DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); |
87a2fd28 | 2430 | bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); |
69e3c75f JB |
2431 | int tp_len, size_max; |
2432 | unsigned char *addr; | |
2433 | int len_sum = 0; | |
9e67030a | 2434 | int status = TP_STATUS_AVAILABLE; |
ae641949 | 2435 | int hlen, tlen; |
69e3c75f | 2436 | |
69e3c75f JB |
2437 | mutex_lock(&po->pg_vec_lock); |
2438 | ||
66e56cd4 | 2439 | if (likely(saddr == NULL)) { |
e40526cb | 2440 | dev = packet_cached_dev_get(po); |
69e3c75f JB |
2441 | proto = po->num; |
2442 | addr = NULL; | |
2443 | } else { | |
2444 | err = -EINVAL; | |
2445 | if (msg->msg_namelen < sizeof(struct sockaddr_ll)) | |
2446 | goto out; | |
2447 | if (msg->msg_namelen < (saddr->sll_halen | |
2448 | + offsetof(struct sockaddr_ll, | |
2449 | sll_addr))) | |
2450 | goto out; | |
69e3c75f JB |
2451 | proto = saddr->sll_protocol; |
2452 | addr = saddr->sll_addr; | |
827d9780 | 2453 | dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); |
69e3c75f JB |
2454 | } |
2455 | ||
69e3c75f JB |
2456 | err = -ENXIO; |
2457 | if (unlikely(dev == NULL)) | |
2458 | goto out; | |
69e3c75f JB |
2459 | err = -ENETDOWN; |
2460 | if (unlikely(!(dev->flags & IFF_UP))) | |
2461 | goto out_put; | |
2462 | ||
52f1454f | 2463 | reserve = dev->hard_header_len + VLAN_HLEN; |
69e3c75f | 2464 | size_max = po->tx_ring.frame_size |
b5dd884e | 2465 | - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); |
69e3c75f | 2466 | |
09effa67 DM |
2467 | if (size_max > dev->mtu + reserve) |
2468 | size_max = dev->mtu + reserve; | |
2469 | ||
69e3c75f JB |
2470 | do { |
2471 | ph = packet_current_frame(po, &po->tx_ring, | |
87a2fd28 | 2472 | TP_STATUS_SEND_REQUEST); |
69e3c75f | 2473 | if (unlikely(ph == NULL)) { |
87a2fd28 DB |
2474 | if (need_wait && need_resched()) |
2475 | schedule(); | |
69e3c75f JB |
2476 | continue; |
2477 | } | |
2478 | ||
2479 | status = TP_STATUS_SEND_REQUEST; | |
ae641949 HX |
2480 | hlen = LL_RESERVED_SPACE(dev); |
2481 | tlen = dev->needed_tailroom; | |
69e3c75f | 2482 | skb = sock_alloc_send_skb(&po->sk, |
ae641949 | 2483 | hlen + tlen + sizeof(struct sockaddr_ll), |
fbf33a28 | 2484 | !need_wait, &err); |
69e3c75f | 2485 | |
fbf33a28 KM |
2486 | if (unlikely(skb == NULL)) { |
2487 | /* we assume the socket was initially writeable ... */ | |
2488 | if (likely(len_sum > 0)) | |
2489 | err = len_sum; | |
69e3c75f | 2490 | goto out_status; |
fbf33a28 | 2491 | } |
69e3c75f | 2492 | tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, |
52f1454f | 2493 | addr, hlen); |
dbd46ab4 AD |
2494 | if (likely(tp_len >= 0) && |
2495 | tp_len > dev->mtu + dev->hard_header_len) { | |
52f1454f DB |
2496 | struct ethhdr *ehdr; |
2497 | /* Earlier code assumed this would be a VLAN pkt, | |
2498 | * double-check this now that we have the actual | |
2499 | * packet in hand. | |
2500 | */ | |
69e3c75f | 2501 | |
52f1454f DB |
2502 | skb_reset_mac_header(skb); |
2503 | ehdr = eth_hdr(skb); | |
2504 | if (ehdr->h_proto != htons(ETH_P_8021Q)) | |
2505 | tp_len = -EMSGSIZE; | |
2506 | } | |
69e3c75f JB |
2507 | if (unlikely(tp_len < 0)) { |
2508 | if (po->tp_loss) { | |
2509 | __packet_set_status(po, ph, | |
2510 | TP_STATUS_AVAILABLE); | |
2511 | packet_increment_head(&po->tx_ring); | |
2512 | kfree_skb(skb); | |
2513 | continue; | |
2514 | } else { | |
2515 | status = TP_STATUS_WRONG_FORMAT; | |
2516 | err = tp_len; | |
2517 | goto out_status; | |
2518 | } | |
2519 | } | |
2520 | ||
0fd5d57b DB |
2521 | packet_pick_tx_queue(dev, skb); |
2522 | ||
69e3c75f JB |
2523 | skb->destructor = tpacket_destruct_skb; |
2524 | __packet_set_status(po, ph, TP_STATUS_SENDING); | |
b0138408 | 2525 | packet_inc_pending(&po->tx_ring); |
69e3c75f JB |
2526 | |
2527 | status = TP_STATUS_SEND_REQUEST; | |
d346a3fa | 2528 | err = po->xmit(skb); |
eb70df13 JP |
2529 | if (unlikely(err > 0)) { |
2530 | err = net_xmit_errno(err); | |
2531 | if (err && __packet_get_status(po, ph) == | |
2532 | TP_STATUS_AVAILABLE) { | |
2533 | /* skb was destructed already */ | |
2534 | skb = NULL; | |
2535 | goto out_status; | |
2536 | } | |
2537 | /* | |
2538 | * skb was dropped but not destructed yet; | |
2539 | * let's treat it like congestion or err < 0 | |
2540 | */ | |
2541 | err = 0; | |
2542 | } | |
69e3c75f JB |
2543 | packet_increment_head(&po->tx_ring); |
2544 | len_sum += tp_len; | |
b0138408 DB |
2545 | } while (likely((ph != NULL) || |
2546 | /* Note: packet_read_pending() might be slow if we have | |
2547 | * to call it as it's per_cpu variable, but in fast-path | |
2548 | * we already short-circuit the loop with the first | |
2549 | * condition, and luckily don't have to go that path | |
2550 | * anyway. | |
2551 | */ | |
2552 | (need_wait && packet_read_pending(&po->tx_ring)))); | |
69e3c75f JB |
2553 | |
2554 | err = len_sum; | |
2555 | goto out_put; | |
2556 | ||
69e3c75f JB |
2557 | out_status: |
2558 | __packet_set_status(po, ph, status); | |
2559 | kfree_skb(skb); | |
2560 | out_put: | |
e40526cb | 2561 | dev_put(dev); |
69e3c75f JB |
2562 | out: |
2563 | mutex_unlock(&po->pg_vec_lock); | |
2564 | return err; | |
2565 | } | |
69e3c75f | 2566 | |
eea49cc9 OJ |
2567 | static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, |
2568 | size_t reserve, size_t len, | |
2569 | size_t linear, int noblock, | |
2570 | int *err) | |
bfd5f4a3 SS |
2571 | { |
2572 | struct sk_buff *skb; | |
2573 | ||
2574 | /* Under a page? Don't bother with paged skb. */ | |
2575 | if (prepad + len < PAGE_SIZE || !linear) | |
2576 | linear = len; | |
2577 | ||
2578 | skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, | |
28d64271 | 2579 | err, 0); |
bfd5f4a3 SS |
2580 | if (!skb) |
2581 | return NULL; | |
2582 | ||
2583 | skb_reserve(skb, reserve); | |
2584 | skb_put(skb, linear); | |
2585 | skb->data_len = len - linear; | |
2586 | skb->len += len - linear; | |
2587 | ||
2588 | return skb; | |
2589 | } | |
2590 | ||
d346a3fa | 2591 | static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) |
1da177e4 LT |
2592 | { |
2593 | struct sock *sk = sock->sk; | |
342dfc30 | 2594 | DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); |
1da177e4 LT |
2595 | struct sk_buff *skb; |
2596 | struct net_device *dev; | |
0e11c91e | 2597 | __be16 proto; |
1da177e4 | 2598 | unsigned char *addr; |
827d9780 | 2599 | int err, reserve = 0; |
bfd5f4a3 SS |
2600 | struct virtio_net_hdr vnet_hdr = { 0 }; |
2601 | int offset = 0; | |
2602 | int vnet_hdr_len; | |
2603 | struct packet_sock *po = pkt_sk(sk); | |
2604 | unsigned short gso_type = 0; | |
ae641949 | 2605 | int hlen, tlen; |
3bdc0eba | 2606 | int extra_len = 0; |
8feb2fb2 | 2607 | ssize_t n; |
1da177e4 LT |
2608 | |
2609 | /* | |
1ce4f28b | 2610 | * Get and verify the address. |
1da177e4 | 2611 | */ |
1ce4f28b | 2612 | |
66e56cd4 | 2613 | if (likely(saddr == NULL)) { |
e40526cb | 2614 | dev = packet_cached_dev_get(po); |
1da177e4 LT |
2615 | proto = po->num; |
2616 | addr = NULL; | |
2617 | } else { | |
2618 | err = -EINVAL; | |
2619 | if (msg->msg_namelen < sizeof(struct sockaddr_ll)) | |
2620 | goto out; | |
0fb375fb EB |
2621 | if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) |
2622 | goto out; | |
1da177e4 LT |
2623 | proto = saddr->sll_protocol; |
2624 | addr = saddr->sll_addr; | |
827d9780 | 2625 | dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); |
1da177e4 LT |
2626 | } |
2627 | ||
1da177e4 | 2628 | err = -ENXIO; |
e40526cb | 2629 | if (unlikely(dev == NULL)) |
1da177e4 | 2630 | goto out_unlock; |
d5e76b0a | 2631 | err = -ENETDOWN; |
e40526cb | 2632 | if (unlikely(!(dev->flags & IFF_UP))) |
d5e76b0a DM |
2633 | goto out_unlock; |
2634 | ||
e40526cb DB |
2635 | if (sock->type == SOCK_RAW) |
2636 | reserve = dev->hard_header_len; | |
bfd5f4a3 SS |
2637 | if (po->has_vnet_hdr) { |
2638 | vnet_hdr_len = sizeof(vnet_hdr); | |
2639 | ||
2640 | err = -EINVAL; | |
2641 | if (len < vnet_hdr_len) | |
2642 | goto out_unlock; | |
2643 | ||
2644 | len -= vnet_hdr_len; | |
2645 | ||
8feb2fb2 | 2646 | err = -EFAULT; |
c0371da6 | 2647 | n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter); |
8feb2fb2 | 2648 | if (n != vnet_hdr_len) |
bfd5f4a3 SS |
2649 | goto out_unlock; |
2650 | ||
2651 | if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && | |
dc9e5153 MT |
2652 | (__virtio16_to_cpu(false, vnet_hdr.csum_start) + |
2653 | __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 > | |
2654 | __virtio16_to_cpu(false, vnet_hdr.hdr_len))) | |
2655 | vnet_hdr.hdr_len = __cpu_to_virtio16(false, | |
2656 | __virtio16_to_cpu(false, vnet_hdr.csum_start) + | |
2657 | __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2); | |
bfd5f4a3 SS |
2658 | |
2659 | err = -EINVAL; | |
dc9e5153 | 2660 | if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len) |
bfd5f4a3 SS |
2661 | goto out_unlock; |
2662 | ||
2663 | if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { | |
2664 | switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { | |
2665 | case VIRTIO_NET_HDR_GSO_TCPV4: | |
2666 | gso_type = SKB_GSO_TCPV4; | |
2667 | break; | |
2668 | case VIRTIO_NET_HDR_GSO_TCPV6: | |
2669 | gso_type = SKB_GSO_TCPV6; | |
2670 | break; | |
2671 | case VIRTIO_NET_HDR_GSO_UDP: | |
2672 | gso_type = SKB_GSO_UDP; | |
2673 | break; | |
2674 | default: | |
2675 | goto out_unlock; | |
2676 | } | |
2677 | ||
2678 | if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) | |
2679 | gso_type |= SKB_GSO_TCP_ECN; | |
2680 | ||
2681 | if (vnet_hdr.gso_size == 0) | |
2682 | goto out_unlock; | |
2683 | ||
2684 | } | |
2685 | } | |
2686 | ||
3bdc0eba BG |
2687 | if (unlikely(sock_flag(sk, SOCK_NOFCS))) { |
2688 | if (!netif_supports_nofcs(dev)) { | |
2689 | err = -EPROTONOSUPPORT; | |
2690 | goto out_unlock; | |
2691 | } | |
2692 | extra_len = 4; /* We're doing our own CRC */ | |
2693 | } | |
2694 | ||
1da177e4 | 2695 | err = -EMSGSIZE; |
3bdc0eba | 2696 | if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) |
1da177e4 LT |
2697 | goto out_unlock; |
2698 | ||
bfd5f4a3 | 2699 | err = -ENOBUFS; |
ae641949 HX |
2700 | hlen = LL_RESERVED_SPACE(dev); |
2701 | tlen = dev->needed_tailroom; | |
dc9e5153 MT |
2702 | skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, |
2703 | __virtio16_to_cpu(false, vnet_hdr.hdr_len), | |
bfd5f4a3 | 2704 | msg->msg_flags & MSG_DONTWAIT, &err); |
40d4e3df | 2705 | if (skb == NULL) |
1da177e4 LT |
2706 | goto out_unlock; |
2707 | ||
bfd5f4a3 | 2708 | skb_set_network_header(skb, reserve); |
1da177e4 | 2709 | |
0c4e8581 | 2710 | err = -EINVAL; |
9c707762 WB |
2711 | if (sock->type == SOCK_DGRAM) { |
2712 | offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); | |
46d2cfb1 | 2713 | if (unlikely(offset < 0)) |
9c707762 WB |
2714 | goto out_free; |
2715 | } else { | |
2716 | if (ll_header_truncated(dev, len)) | |
2717 | goto out_free; | |
2718 | } | |
1da177e4 LT |
2719 | |
2720 | /* Returns -EFAULT on error */ | |
c0371da6 | 2721 | err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); |
1da177e4 LT |
2722 | if (err) |
2723 | goto out_free; | |
bf84a010 DB |
2724 | |
2725 | sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); | |
1da177e4 | 2726 | |
3bdc0eba | 2727 | if (!gso_type && (len > dev->mtu + reserve + extra_len)) { |
09effa67 DM |
2728 | /* Earlier code assumed this would be a VLAN pkt, |
2729 | * double-check this now that we have the actual | |
2730 | * packet in hand. | |
2731 | */ | |
2732 | struct ethhdr *ehdr; | |
2733 | skb_reset_mac_header(skb); | |
2734 | ehdr = eth_hdr(skb); | |
2735 | if (ehdr->h_proto != htons(ETH_P_8021Q)) { | |
2736 | err = -EMSGSIZE; | |
2737 | goto out_free; | |
2738 | } | |
57f89bfa BG |
2739 | } |
2740 | ||
09effa67 DM |
2741 | skb->protocol = proto; |
2742 | skb->dev = dev; | |
1da177e4 | 2743 | skb->priority = sk->sk_priority; |
2d37a186 | 2744 | skb->mark = sk->sk_mark; |
0fd5d57b DB |
2745 | |
2746 | packet_pick_tx_queue(dev, skb); | |
1da177e4 | 2747 | |
bfd5f4a3 SS |
2748 | if (po->has_vnet_hdr) { |
2749 | if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | |
dc9e5153 MT |
2750 | u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start); |
2751 | u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset); | |
2752 | if (!skb_partial_csum_set(skb, s, o)) { | |
bfd5f4a3 SS |
2753 | err = -EINVAL; |
2754 | goto out_free; | |
2755 | } | |
2756 | } | |
2757 | ||
dc9e5153 MT |
2758 | skb_shinfo(skb)->gso_size = |
2759 | __virtio16_to_cpu(false, vnet_hdr.gso_size); | |
bfd5f4a3 SS |
2760 | skb_shinfo(skb)->gso_type = gso_type; |
2761 | ||
2762 | /* Header must be checked, and gso_segs computed. */ | |
2763 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | |
2764 | skb_shinfo(skb)->gso_segs = 0; | |
2765 | ||
2766 | len += vnet_hdr_len; | |
2767 | } | |
2768 | ||
d346a3fa DB |
2769 | if (!packet_use_direct_xmit(po)) |
2770 | skb_probe_transport_header(skb, reserve); | |
3bdc0eba BG |
2771 | if (unlikely(extra_len == 4)) |
2772 | skb->no_fcs = 1; | |
2773 | ||
d346a3fa | 2774 | err = po->xmit(skb); |
1da177e4 LT |
2775 | if (err > 0 && (err = net_xmit_errno(err)) != 0) |
2776 | goto out_unlock; | |
2777 | ||
e40526cb | 2778 | dev_put(dev); |
1da177e4 | 2779 | |
40d4e3df | 2780 | return len; |
1da177e4 LT |
2781 | |
2782 | out_free: | |
2783 | kfree_skb(skb); | |
2784 | out_unlock: | |
e40526cb | 2785 | if (dev) |
1da177e4 LT |
2786 | dev_put(dev); |
2787 | out: | |
2788 | return err; | |
2789 | } | |
2790 | ||
1b784140 | 2791 | static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) |
69e3c75f | 2792 | { |
69e3c75f JB |
2793 | struct sock *sk = sock->sk; |
2794 | struct packet_sock *po = pkt_sk(sk); | |
d346a3fa | 2795 | |
69e3c75f JB |
2796 | if (po->tx_ring.pg_vec) |
2797 | return tpacket_snd(po, msg); | |
2798 | else | |
69e3c75f JB |
2799 | return packet_snd(sock, msg, len); |
2800 | } | |
2801 | ||
1da177e4 LT |
2802 | /* |
2803 | * Close a PACKET socket. This is fairly simple. We immediately go | |
2804 | * to 'closed' state and remove our protocol entry in the device list. | |
2805 | */ | |
2806 | ||
2807 | static int packet_release(struct socket *sock) | |
2808 | { | |
2809 | struct sock *sk = sock->sk; | |
2810 | struct packet_sock *po; | |
d12d01d6 | 2811 | struct net *net; |
f6fb8f10 | 2812 | union tpacket_req_u req_u; |
1da177e4 LT |
2813 | |
2814 | if (!sk) | |
2815 | return 0; | |
2816 | ||
3b1e0a65 | 2817 | net = sock_net(sk); |
1da177e4 LT |
2818 | po = pkt_sk(sk); |
2819 | ||
0fa7fa98 | 2820 | mutex_lock(&net->packet.sklist_lock); |
808f5114 | 2821 | sk_del_node_init_rcu(sk); |
0fa7fa98 PE |
2822 | mutex_unlock(&net->packet.sklist_lock); |
2823 | ||
2824 | preempt_disable(); | |
920de804 | 2825 | sock_prot_inuse_add(net, sk->sk_prot, -1); |
0fa7fa98 | 2826 | preempt_enable(); |
1da177e4 | 2827 | |
808f5114 | 2828 | spin_lock(&po->bind_lock); |
ce06b03e | 2829 | unregister_prot_hook(sk, false); |
66e56cd4 DB |
2830 | packet_cached_dev_reset(po); |
2831 | ||
160ff18a BG |
2832 | if (po->prot_hook.dev) { |
2833 | dev_put(po->prot_hook.dev); | |
2834 | po->prot_hook.dev = NULL; | |
2835 | } | |
808f5114 | 2836 | spin_unlock(&po->bind_lock); |
1da177e4 | 2837 | |
1da177e4 | 2838 | packet_flush_mclist(sk); |
1da177e4 | 2839 | |
9665d5d6 PS |
2840 | if (po->rx_ring.pg_vec) { |
2841 | memset(&req_u, 0, sizeof(req_u)); | |
f6fb8f10 | 2842 | packet_set_ring(sk, &req_u, 1, 0); |
9665d5d6 | 2843 | } |
69e3c75f | 2844 | |
9665d5d6 PS |
2845 | if (po->tx_ring.pg_vec) { |
2846 | memset(&req_u, 0, sizeof(req_u)); | |
f6fb8f10 | 2847 | packet_set_ring(sk, &req_u, 1, 1); |
9665d5d6 | 2848 | } |
1da177e4 | 2849 | |
dc99f600 DM |
2850 | fanout_release(sk); |
2851 | ||
808f5114 | 2852 | synchronize_net(); |
1da177e4 LT |
2853 | /* |
2854 | * Now the socket is dead. No more input will appear. | |
2855 | */ | |
1da177e4 LT |
2856 | sock_orphan(sk); |
2857 | sock->sk = NULL; | |
2858 | ||
2859 | /* Purge queues */ | |
2860 | ||
2861 | skb_queue_purge(&sk->sk_receive_queue); | |
b0138408 | 2862 | packet_free_pending(po); |
17ab56a2 | 2863 | sk_refcnt_debug_release(sk); |
1da177e4 LT |
2864 | |
2865 | sock_put(sk); | |
2866 | return 0; | |
2867 | } | |
2868 | ||
2869 | /* | |
2870 | * Attach a packet hook. | |
2871 | */ | |
2872 | ||
902fefb8 | 2873 | static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) |
1da177e4 LT |
2874 | { |
2875 | struct packet_sock *po = pkt_sk(sk); | |
158cd4af | 2876 | struct net_device *dev_curr; |
902fefb8 DB |
2877 | __be16 proto_curr; |
2878 | bool need_rehook; | |
dc99f600 | 2879 | |
aef950b4 WY |
2880 | if (po->fanout) { |
2881 | if (dev) | |
2882 | dev_put(dev); | |
2883 | ||
dc99f600 | 2884 | return -EINVAL; |
aef950b4 | 2885 | } |
1da177e4 LT |
2886 | |
2887 | lock_sock(sk); | |
1da177e4 | 2888 | spin_lock(&po->bind_lock); |
66e56cd4 | 2889 | |
902fefb8 DB |
2890 | proto_curr = po->prot_hook.type; |
2891 | dev_curr = po->prot_hook.dev; | |
2892 | ||
2893 | need_rehook = proto_curr != proto || dev_curr != dev; | |
2894 | ||
2895 | if (need_rehook) { | |
2896 | unregister_prot_hook(sk, true); | |
1da177e4 | 2897 | |
902fefb8 DB |
2898 | po->num = proto; |
2899 | po->prot_hook.type = proto; | |
902fefb8 DB |
2900 | po->prot_hook.dev = dev; |
2901 | ||
2902 | po->ifindex = dev ? dev->ifindex : 0; | |
2903 | packet_cached_dev_assign(po, dev); | |
2904 | } | |
158cd4af LW |
2905 | if (dev_curr) |
2906 | dev_put(dev_curr); | |
66e56cd4 | 2907 | |
902fefb8 | 2908 | if (proto == 0 || !need_rehook) |
1da177e4 LT |
2909 | goto out_unlock; |
2910 | ||
be85d4ad | 2911 | if (!dev || (dev->flags & IFF_UP)) { |
ce06b03e | 2912 | register_prot_hook(sk); |
be85d4ad UT |
2913 | } else { |
2914 | sk->sk_err = ENETDOWN; | |
2915 | if (!sock_flag(sk, SOCK_DEAD)) | |
2916 | sk->sk_error_report(sk); | |
1da177e4 LT |
2917 | } |
2918 | ||
2919 | out_unlock: | |
2920 | spin_unlock(&po->bind_lock); | |
2921 | release_sock(sk); | |
2922 | return 0; | |
2923 | } | |
2924 | ||
2925 | /* | |
2926 | * Bind a packet socket to a device | |
2927 | */ | |
2928 | ||
40d4e3df ED |
2929 | static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, |
2930 | int addr_len) | |
1da177e4 | 2931 | { |
40d4e3df | 2932 | struct sock *sk = sock->sk; |
1da177e4 LT |
2933 | char name[15]; |
2934 | struct net_device *dev; | |
2935 | int err = -ENODEV; | |
1ce4f28b | 2936 | |
1da177e4 LT |
2937 | /* |
2938 | * Check legality | |
2939 | */ | |
1ce4f28b | 2940 | |
8ae55f04 | 2941 | if (addr_len != sizeof(struct sockaddr)) |
1da177e4 | 2942 | return -EINVAL; |
40d4e3df | 2943 | strlcpy(name, uaddr->sa_data, sizeof(name)); |
1da177e4 | 2944 | |
3b1e0a65 | 2945 | dev = dev_get_by_name(sock_net(sk), name); |
160ff18a | 2946 | if (dev) |
1da177e4 | 2947 | err = packet_do_bind(sk, dev, pkt_sk(sk)->num); |
1da177e4 LT |
2948 | return err; |
2949 | } | |
1da177e4 LT |
2950 | |
2951 | static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |
2952 | { | |
40d4e3df ED |
2953 | struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; |
2954 | struct sock *sk = sock->sk; | |
1da177e4 LT |
2955 | struct net_device *dev = NULL; |
2956 | int err; | |
2957 | ||
2958 | ||
2959 | /* | |
2960 | * Check legality | |
2961 | */ | |
1ce4f28b | 2962 | |
1da177e4 LT |
2963 | if (addr_len < sizeof(struct sockaddr_ll)) |
2964 | return -EINVAL; | |
2965 | if (sll->sll_family != AF_PACKET) | |
2966 | return -EINVAL; | |
2967 | ||
2968 | if (sll->sll_ifindex) { | |
2969 | err = -ENODEV; | |
3b1e0a65 | 2970 | dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex); |
1da177e4 LT |
2971 | if (dev == NULL) |
2972 | goto out; | |
2973 | } | |
2974 | err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num); | |
1da177e4 LT |
2975 | |
2976 | out: | |
2977 | return err; | |
2978 | } | |
2979 | ||
2980 | static struct proto packet_proto = { | |
2981 | .name = "PACKET", | |
2982 | .owner = THIS_MODULE, | |
2983 | .obj_size = sizeof(struct packet_sock), | |
2984 | }; | |
2985 | ||
2986 | /* | |
1ce4f28b | 2987 | * Create a packet of type SOCK_PACKET. |
1da177e4 LT |
2988 | */ |
2989 | ||
3f378b68 EP |
2990 | static int packet_create(struct net *net, struct socket *sock, int protocol, |
2991 | int kern) | |
1da177e4 LT |
2992 | { |
2993 | struct sock *sk; | |
2994 | struct packet_sock *po; | |
0e11c91e | 2995 | __be16 proto = (__force __be16)protocol; /* weird, but documented */ |
1da177e4 LT |
2996 | int err; |
2997 | ||
df008c91 | 2998 | if (!ns_capable(net->user_ns, CAP_NET_RAW)) |
1da177e4 | 2999 | return -EPERM; |
be02097c DM |
3000 | if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && |
3001 | sock->type != SOCK_PACKET) | |
1da177e4 LT |
3002 | return -ESOCKTNOSUPPORT; |
3003 | ||
3004 | sock->state = SS_UNCONNECTED; | |
3005 | ||
3006 | err = -ENOBUFS; | |
11aa9c28 | 3007 | sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); |
1da177e4 LT |
3008 | if (sk == NULL) |
3009 | goto out; | |
3010 | ||
3011 | sock->ops = &packet_ops; | |
1da177e4 LT |
3012 | if (sock->type == SOCK_PACKET) |
3013 | sock->ops = &packet_ops_spkt; | |
be02097c | 3014 | |
1da177e4 LT |
3015 | sock_init_data(sock, sk); |
3016 | ||
3017 | po = pkt_sk(sk); | |
3018 | sk->sk_family = PF_PACKET; | |
0e11c91e | 3019 | po->num = proto; |
d346a3fa | 3020 | po->xmit = dev_queue_xmit; |
66e56cd4 | 3021 | |
b0138408 DB |
3022 | err = packet_alloc_pending(po); |
3023 | if (err) | |
3024 | goto out2; | |
3025 | ||
66e56cd4 | 3026 | packet_cached_dev_reset(po); |
1da177e4 LT |
3027 | |
3028 | sk->sk_destruct = packet_sock_destruct; | |
17ab56a2 | 3029 | sk_refcnt_debug_inc(sk); |
1da177e4 LT |
3030 | |
3031 | /* | |
3032 | * Attach a protocol block | |
3033 | */ | |
3034 | ||
3035 | spin_lock_init(&po->bind_lock); | |
905db440 | 3036 | mutex_init(&po->pg_vec_lock); |
0648ab70 | 3037 | po->rollover = NULL; |
1da177e4 | 3038 | po->prot_hook.func = packet_rcv; |
be02097c | 3039 | |
1da177e4 LT |
3040 | if (sock->type == SOCK_PACKET) |
3041 | po->prot_hook.func = packet_rcv_spkt; | |
be02097c | 3042 | |
1da177e4 LT |
3043 | po->prot_hook.af_packet_priv = sk; |
3044 | ||
0e11c91e AV |
3045 | if (proto) { |
3046 | po->prot_hook.type = proto; | |
ce06b03e | 3047 | register_prot_hook(sk); |
1da177e4 LT |
3048 | } |
3049 | ||
0fa7fa98 | 3050 | mutex_lock(&net->packet.sklist_lock); |
808f5114 | 3051 | sk_add_node_rcu(sk, &net->packet.sklist); |
0fa7fa98 PE |
3052 | mutex_unlock(&net->packet.sklist_lock); |
3053 | ||
3054 | preempt_disable(); | |
3680453c | 3055 | sock_prot_inuse_add(net, &packet_proto, 1); |
0fa7fa98 | 3056 | preempt_enable(); |
808f5114 | 3057 | |
40d4e3df | 3058 | return 0; |
b0138408 DB |
3059 | out2: |
3060 | sk_free(sk); | |
1da177e4 LT |
3061 | out: |
3062 | return err; | |
3063 | } | |
3064 | ||
3065 | /* | |
3066 | * Pull a packet from our receive queue and hand it to the user. | |
3067 | * If necessary we block. | |
3068 | */ | |
3069 | ||
1b784140 YX |
3070 | static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, |
3071 | int flags) | |
1da177e4 LT |
3072 | { |
3073 | struct sock *sk = sock->sk; | |
3074 | struct sk_buff *skb; | |
3075 | int copied, err; | |
bfd5f4a3 | 3076 | int vnet_hdr_len = 0; |
2472d761 | 3077 | unsigned int origlen = 0; |
1da177e4 LT |
3078 | |
3079 | err = -EINVAL; | |
ed85b565 | 3080 | if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) |
1da177e4 LT |
3081 | goto out; |
3082 | ||
3083 | #if 0 | |
3084 | /* What error should we return now? EUNATTACH? */ | |
3085 | if (pkt_sk(sk)->ifindex < 0) | |
3086 | return -ENODEV; | |
3087 | #endif | |
3088 | ||
ed85b565 | 3089 | if (flags & MSG_ERRQUEUE) { |
cb820f8e RC |
3090 | err = sock_recv_errqueue(sk, msg, len, |
3091 | SOL_PACKET, PACKET_TX_TIMESTAMP); | |
ed85b565 RC |
3092 | goto out; |
3093 | } | |
3094 | ||
1da177e4 LT |
3095 | /* |
3096 | * Call the generic datagram receiver. This handles all sorts | |
3097 | * of horrible races and re-entrancy so we can forget about it | |
3098 | * in the protocol layers. | |
3099 | * | |
3100 | * Now it will return ENETDOWN, if device have just gone down, | |
3101 | * but then it will block. | |
3102 | */ | |
3103 | ||
40d4e3df | 3104 | skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); |
1da177e4 LT |
3105 | |
3106 | /* | |
1ce4f28b | 3107 | * An error occurred so return it. Because skb_recv_datagram() |
1da177e4 LT |
3108 | * handles the blocking we don't see and worry about blocking |
3109 | * retries. | |
3110 | */ | |
3111 | ||
8ae55f04 | 3112 | if (skb == NULL) |
1da177e4 LT |
3113 | goto out; |
3114 | ||
2ccdbaa6 WB |
3115 | if (pkt_sk(sk)->pressure) |
3116 | packet_rcv_has_room(pkt_sk(sk), NULL); | |
3117 | ||
bfd5f4a3 SS |
3118 | if (pkt_sk(sk)->has_vnet_hdr) { |
3119 | struct virtio_net_hdr vnet_hdr = { 0 }; | |
3120 | ||
3121 | err = -EINVAL; | |
3122 | vnet_hdr_len = sizeof(vnet_hdr); | |
1f18b717 | 3123 | if (len < vnet_hdr_len) |
bfd5f4a3 SS |
3124 | goto out_free; |
3125 | ||
1f18b717 MK |
3126 | len -= vnet_hdr_len; |
3127 | ||
bfd5f4a3 SS |
3128 | if (skb_is_gso(skb)) { |
3129 | struct skb_shared_info *sinfo = skb_shinfo(skb); | |
3130 | ||
3131 | /* This is a hint as to how much should be linear. */ | |
dc9e5153 MT |
3132 | vnet_hdr.hdr_len = |
3133 | __cpu_to_virtio16(false, skb_headlen(skb)); | |
3134 | vnet_hdr.gso_size = | |
3135 | __cpu_to_virtio16(false, sinfo->gso_size); | |
bfd5f4a3 SS |
3136 | if (sinfo->gso_type & SKB_GSO_TCPV4) |
3137 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | |
3138 | else if (sinfo->gso_type & SKB_GSO_TCPV6) | |
3139 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | |
3140 | else if (sinfo->gso_type & SKB_GSO_UDP) | |
3141 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; | |
3142 | else if (sinfo->gso_type & SKB_GSO_FCOE) | |
3143 | goto out_free; | |
3144 | else | |
3145 | BUG(); | |
3146 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) | |
3147 | vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; | |
3148 | } else | |
3149 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; | |
3150 | ||
3151 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
3152 | vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; | |
dc9e5153 MT |
3153 | vnet_hdr.csum_start = __cpu_to_virtio16(false, |
3154 | skb_checksum_start_offset(skb)); | |
3155 | vnet_hdr.csum_offset = __cpu_to_virtio16(false, | |
3156 | skb->csum_offset); | |
10a8d94a JW |
3157 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
3158 | vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; | |
bfd5f4a3 SS |
3159 | } /* else everything is zero */ |
3160 | ||
7eab8d9e | 3161 | err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len); |
bfd5f4a3 SS |
3162 | if (err < 0) |
3163 | goto out_free; | |
3164 | } | |
3165 | ||
f3d33426 HFS |
3166 | /* You lose any data beyond the buffer you gave. If it worries |
3167 | * a user program they can ask the device for its MTU | |
3168 | * anyway. | |
1da177e4 | 3169 | */ |
1da177e4 | 3170 | copied = skb->len; |
40d4e3df ED |
3171 | if (copied > len) { |
3172 | copied = len; | |
3173 | msg->msg_flags |= MSG_TRUNC; | |
1da177e4 LT |
3174 | } |
3175 | ||
51f3d02b | 3176 | err = skb_copy_datagram_msg(skb, 0, msg, copied); |
1da177e4 LT |
3177 | if (err) |
3178 | goto out_free; | |
3179 | ||
2472d761 EB |
3180 | if (sock->type != SOCK_PACKET) { |
3181 | struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; | |
3182 | ||
3183 | /* Original length was stored in sockaddr_ll fields */ | |
3184 | origlen = PACKET_SKB_CB(skb)->sa.origlen; | |
3185 | sll->sll_family = AF_PACKET; | |
3186 | sll->sll_protocol = skb->protocol; | |
3187 | } | |
3188 | ||
3b885787 | 3189 | sock_recv_ts_and_drops(msg, sk, skb); |
1da177e4 | 3190 | |
f3d33426 HFS |
3191 | if (msg->msg_name) { |
3192 | /* If the address length field is there to be filled | |
3193 | * in, we fill it in now. | |
3194 | */ | |
3195 | if (sock->type == SOCK_PACKET) { | |
342dfc30 | 3196 | __sockaddr_check_size(sizeof(struct sockaddr_pkt)); |
f3d33426 HFS |
3197 | msg->msg_namelen = sizeof(struct sockaddr_pkt); |
3198 | } else { | |
3199 | struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; | |
2472d761 | 3200 | |
f3d33426 HFS |
3201 | msg->msg_namelen = sll->sll_halen + |
3202 | offsetof(struct sockaddr_ll, sll_addr); | |
3203 | } | |
ffbc6111 HX |
3204 | memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, |
3205 | msg->msg_namelen); | |
f3d33426 | 3206 | } |
1da177e4 | 3207 | |
8dc41944 | 3208 | if (pkt_sk(sk)->auxdata) { |
ffbc6111 HX |
3209 | struct tpacket_auxdata aux; |
3210 | ||
3211 | aux.tp_status = TP_STATUS_USER; | |
3212 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
3213 | aux.tp_status |= TP_STATUS_CSUMNOTREADY; | |
682f048b AD |
3214 | else if (skb->pkt_type != PACKET_OUTGOING && |
3215 | (skb->ip_summed == CHECKSUM_COMPLETE || | |
3216 | skb_csum_unnecessary(skb))) | |
3217 | aux.tp_status |= TP_STATUS_CSUM_VALID; | |
3218 | ||
2472d761 | 3219 | aux.tp_len = origlen; |
ffbc6111 HX |
3220 | aux.tp_snaplen = skb->len; |
3221 | aux.tp_mac = 0; | |
bbe735e4 | 3222 | aux.tp_net = skb_network_offset(skb); |
df8a39de JP |
3223 | if (skb_vlan_tag_present(skb)) { |
3224 | aux.tp_vlan_tci = skb_vlan_tag_get(skb); | |
a0cdfcf3 AW |
3225 | aux.tp_vlan_tpid = ntohs(skb->vlan_proto); |
3226 | aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; | |
a3bcc23e BG |
3227 | } else { |
3228 | aux.tp_vlan_tci = 0; | |
a0cdfcf3 | 3229 | aux.tp_vlan_tpid = 0; |
a3bcc23e | 3230 | } |
ffbc6111 | 3231 | put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); |
8dc41944 HX |
3232 | } |
3233 | ||
1da177e4 LT |
3234 | /* |
3235 | * Free or return the buffer as appropriate. Again this | |
3236 | * hides all the races and re-entrancy issues from us. | |
3237 | */ | |
bfd5f4a3 | 3238 | err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); |
1da177e4 LT |
3239 | |
3240 | out_free: | |
3241 | skb_free_datagram(sk, skb); | |
3242 | out: | |
3243 | return err; | |
3244 | } | |
3245 | ||
1da177e4 LT |
3246 | static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, |
3247 | int *uaddr_len, int peer) | |
3248 | { | |
3249 | struct net_device *dev; | |
3250 | struct sock *sk = sock->sk; | |
3251 | ||
3252 | if (peer) | |
3253 | return -EOPNOTSUPP; | |
3254 | ||
3255 | uaddr->sa_family = AF_PACKET; | |
2dc85bf3 | 3256 | memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); |
654d1f8a ED |
3257 | rcu_read_lock(); |
3258 | dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); | |
3259 | if (dev) | |
2dc85bf3 | 3260 | strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); |
654d1f8a | 3261 | rcu_read_unlock(); |
1da177e4 LT |
3262 | *uaddr_len = sizeof(*uaddr); |
3263 | ||
3264 | return 0; | |
3265 | } | |
1da177e4 LT |
3266 | |
3267 | static int packet_getname(struct socket *sock, struct sockaddr *uaddr, | |
3268 | int *uaddr_len, int peer) | |
3269 | { | |
3270 | struct net_device *dev; | |
3271 | struct sock *sk = sock->sk; | |
3272 | struct packet_sock *po = pkt_sk(sk); | |
13cfa97b | 3273 | DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); |
1da177e4 LT |
3274 | |
3275 | if (peer) | |
3276 | return -EOPNOTSUPP; | |
3277 | ||
3278 | sll->sll_family = AF_PACKET; | |
3279 | sll->sll_ifindex = po->ifindex; | |
3280 | sll->sll_protocol = po->num; | |
67286640 | 3281 | sll->sll_pkttype = 0; |
654d1f8a ED |
3282 | rcu_read_lock(); |
3283 | dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); | |
1da177e4 LT |
3284 | if (dev) { |
3285 | sll->sll_hatype = dev->type; | |
3286 | sll->sll_halen = dev->addr_len; | |
3287 | memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); | |
1da177e4 LT |
3288 | } else { |
3289 | sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ | |
3290 | sll->sll_halen = 0; | |
3291 | } | |
654d1f8a | 3292 | rcu_read_unlock(); |
0fb375fb | 3293 | *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; |
1da177e4 LT |
3294 | |
3295 | return 0; | |
3296 | } | |
3297 | ||
2aeb0b88 WC |
3298 | static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, |
3299 | int what) | |
1da177e4 LT |
3300 | { |
3301 | switch (i->type) { | |
3302 | case PACKET_MR_MULTICAST: | |
1162563f JP |
3303 | if (i->alen != dev->addr_len) |
3304 | return -EINVAL; | |
1da177e4 | 3305 | if (what > 0) |
22bedad3 | 3306 | return dev_mc_add(dev, i->addr); |
1da177e4 | 3307 | else |
22bedad3 | 3308 | return dev_mc_del(dev, i->addr); |
1da177e4 LT |
3309 | break; |
3310 | case PACKET_MR_PROMISC: | |
2aeb0b88 | 3311 | return dev_set_promiscuity(dev, what); |
1da177e4 | 3312 | case PACKET_MR_ALLMULTI: |
2aeb0b88 | 3313 | return dev_set_allmulti(dev, what); |
d95ed927 | 3314 | case PACKET_MR_UNICAST: |
1162563f JP |
3315 | if (i->alen != dev->addr_len) |
3316 | return -EINVAL; | |
d95ed927 | 3317 | if (what > 0) |
a748ee24 | 3318 | return dev_uc_add(dev, i->addr); |
d95ed927 | 3319 | else |
a748ee24 | 3320 | return dev_uc_del(dev, i->addr); |
d95ed927 | 3321 | break; |
40d4e3df ED |
3322 | default: |
3323 | break; | |
1da177e4 | 3324 | } |
2aeb0b88 | 3325 | return 0; |
1da177e4 LT |
3326 | } |
3327 | ||
82f17091 FR |
3328 | static void packet_dev_mclist_delete(struct net_device *dev, |
3329 | struct packet_mclist **mlp) | |
1da177e4 | 3330 | { |
82f17091 FR |
3331 | struct packet_mclist *ml; |
3332 | ||
3333 | while ((ml = *mlp) != NULL) { | |
3334 | if (ml->ifindex == dev->ifindex) { | |
3335 | packet_dev_mc(dev, ml, -1); | |
3336 | *mlp = ml->next; | |
3337 | kfree(ml); | |
3338 | } else | |
3339 | mlp = &ml->next; | |
1da177e4 LT |
3340 | } |
3341 | } | |
3342 | ||
0fb375fb | 3343 | static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) |
1da177e4 LT |
3344 | { |
3345 | struct packet_sock *po = pkt_sk(sk); | |
3346 | struct packet_mclist *ml, *i; | |
3347 | struct net_device *dev; | |
3348 | int err; | |
3349 | ||
3350 | rtnl_lock(); | |
3351 | ||
3352 | err = -ENODEV; | |
3b1e0a65 | 3353 | dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); |
1da177e4 LT |
3354 | if (!dev) |
3355 | goto done; | |
3356 | ||
3357 | err = -EINVAL; | |
1162563f | 3358 | if (mreq->mr_alen > dev->addr_len) |
1da177e4 LT |
3359 | goto done; |
3360 | ||
3361 | err = -ENOBUFS; | |
8b3a7005 | 3362 | i = kmalloc(sizeof(*i), GFP_KERNEL); |
1da177e4 LT |
3363 | if (i == NULL) |
3364 | goto done; | |
3365 | ||
3366 | err = 0; | |
3367 | for (ml = po->mclist; ml; ml = ml->next) { | |
3368 | if (ml->ifindex == mreq->mr_ifindex && | |
3369 | ml->type == mreq->mr_type && | |
3370 | ml->alen == mreq->mr_alen && | |
3371 | memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { | |
3372 | ml->count++; | |
3373 | /* Free the new element ... */ | |
3374 | kfree(i); | |
3375 | goto done; | |
3376 | } | |
3377 | } | |
3378 | ||
3379 | i->type = mreq->mr_type; | |
3380 | i->ifindex = mreq->mr_ifindex; | |
3381 | i->alen = mreq->mr_alen; | |
3382 | memcpy(i->addr, mreq->mr_address, i->alen); | |
3383 | i->count = 1; | |
3384 | i->next = po->mclist; | |
3385 | po->mclist = i; | |
2aeb0b88 WC |
3386 | err = packet_dev_mc(dev, i, 1); |
3387 | if (err) { | |
3388 | po->mclist = i->next; | |
3389 | kfree(i); | |
3390 | } | |
1da177e4 LT |
3391 | |
3392 | done: | |
3393 | rtnl_unlock(); | |
3394 | return err; | |
3395 | } | |
3396 | ||
0fb375fb | 3397 | static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) |
1da177e4 LT |
3398 | { |
3399 | struct packet_mclist *ml, **mlp; | |
3400 | ||
3401 | rtnl_lock(); | |
3402 | ||
3403 | for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { | |
3404 | if (ml->ifindex == mreq->mr_ifindex && | |
3405 | ml->type == mreq->mr_type && | |
3406 | ml->alen == mreq->mr_alen && | |
3407 | memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { | |
3408 | if (--ml->count == 0) { | |
3409 | struct net_device *dev; | |
3410 | *mlp = ml->next; | |
ad959e76 ED |
3411 | dev = __dev_get_by_index(sock_net(sk), ml->ifindex); |
3412 | if (dev) | |
1da177e4 | 3413 | packet_dev_mc(dev, ml, -1); |
1da177e4 LT |
3414 | kfree(ml); |
3415 | } | |
82f17091 | 3416 | break; |
1da177e4 LT |
3417 | } |
3418 | } | |
3419 | rtnl_unlock(); | |
82f17091 | 3420 | return 0; |
1da177e4 LT |
3421 | } |
3422 | ||
3423 | static void packet_flush_mclist(struct sock *sk) | |
3424 | { | |
3425 | struct packet_sock *po = pkt_sk(sk); | |
3426 | struct packet_mclist *ml; | |
3427 | ||
3428 | if (!po->mclist) | |
3429 | return; | |
3430 | ||
3431 | rtnl_lock(); | |
3432 | while ((ml = po->mclist) != NULL) { | |
3433 | struct net_device *dev; | |
3434 | ||
3435 | po->mclist = ml->next; | |
ad959e76 ED |
3436 | dev = __dev_get_by_index(sock_net(sk), ml->ifindex); |
3437 | if (dev != NULL) | |
1da177e4 | 3438 | packet_dev_mc(dev, ml, -1); |
1da177e4 LT |
3439 | kfree(ml); |
3440 | } | |
3441 | rtnl_unlock(); | |
3442 | } | |
1da177e4 LT |
3443 | |
3444 | static int | |
b7058842 | 3445 | packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) |
1da177e4 LT |
3446 | { |
3447 | struct sock *sk = sock->sk; | |
8dc41944 | 3448 | struct packet_sock *po = pkt_sk(sk); |
1da177e4 LT |
3449 | int ret; |
3450 | ||
3451 | if (level != SOL_PACKET) | |
3452 | return -ENOPROTOOPT; | |
3453 | ||
69e3c75f | 3454 | switch (optname) { |
1ce4f28b | 3455 | case PACKET_ADD_MEMBERSHIP: |
1da177e4 LT |
3456 | case PACKET_DROP_MEMBERSHIP: |
3457 | { | |
0fb375fb EB |
3458 | struct packet_mreq_max mreq; |
3459 | int len = optlen; | |
3460 | memset(&mreq, 0, sizeof(mreq)); | |
3461 | if (len < sizeof(struct packet_mreq)) | |
1da177e4 | 3462 | return -EINVAL; |
0fb375fb EB |
3463 | if (len > sizeof(mreq)) |
3464 | len = sizeof(mreq); | |
40d4e3df | 3465 | if (copy_from_user(&mreq, optval, len)) |
1da177e4 | 3466 | return -EFAULT; |
0fb375fb EB |
3467 | if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) |
3468 | return -EINVAL; | |
1da177e4 LT |
3469 | if (optname == PACKET_ADD_MEMBERSHIP) |
3470 | ret = packet_mc_add(sk, &mreq); | |
3471 | else | |
3472 | ret = packet_mc_drop(sk, &mreq); | |
3473 | return ret; | |
3474 | } | |
a2efcfa0 | 3475 | |
1da177e4 | 3476 | case PACKET_RX_RING: |
69e3c75f | 3477 | case PACKET_TX_RING: |
1da177e4 | 3478 | { |
f6fb8f10 | 3479 | union tpacket_req_u req_u; |
3480 | int len; | |
1da177e4 | 3481 | |
f6fb8f10 | 3482 | switch (po->tp_version) { |
3483 | case TPACKET_V1: | |
3484 | case TPACKET_V2: | |
3485 | len = sizeof(req_u.req); | |
3486 | break; | |
3487 | case TPACKET_V3: | |
3488 | default: | |
3489 | len = sizeof(req_u.req3); | |
3490 | break; | |
3491 | } | |
3492 | if (optlen < len) | |
1da177e4 | 3493 | return -EINVAL; |
bfd5f4a3 SS |
3494 | if (pkt_sk(sk)->has_vnet_hdr) |
3495 | return -EINVAL; | |
f6fb8f10 | 3496 | if (copy_from_user(&req_u.req, optval, len)) |
1da177e4 | 3497 | return -EFAULT; |
f6fb8f10 | 3498 | return packet_set_ring(sk, &req_u, 0, |
3499 | optname == PACKET_TX_RING); | |
1da177e4 LT |
3500 | } |
3501 | case PACKET_COPY_THRESH: | |
3502 | { | |
3503 | int val; | |
3504 | ||
40d4e3df | 3505 | if (optlen != sizeof(val)) |
1da177e4 | 3506 | return -EINVAL; |
40d4e3df | 3507 | if (copy_from_user(&val, optval, sizeof(val))) |
1da177e4 LT |
3508 | return -EFAULT; |
3509 | ||
3510 | pkt_sk(sk)->copy_thresh = val; | |
3511 | return 0; | |
3512 | } | |
bbd6ef87 PM |
3513 | case PACKET_VERSION: |
3514 | { | |
3515 | int val; | |
3516 | ||
3517 | if (optlen != sizeof(val)) | |
3518 | return -EINVAL; | |
69e3c75f | 3519 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) |
bbd6ef87 PM |
3520 | return -EBUSY; |
3521 | if (copy_from_user(&val, optval, sizeof(val))) | |
3522 | return -EFAULT; | |
3523 | switch (val) { | |
3524 | case TPACKET_V1: | |
3525 | case TPACKET_V2: | |
f6fb8f10 | 3526 | case TPACKET_V3: |
bbd6ef87 PM |
3527 | po->tp_version = val; |
3528 | return 0; | |
3529 | default: | |
3530 | return -EINVAL; | |
3531 | } | |
3532 | } | |
8913336a PM |
3533 | case PACKET_RESERVE: |
3534 | { | |
3535 | unsigned int val; | |
3536 | ||
3537 | if (optlen != sizeof(val)) | |
3538 | return -EINVAL; | |
69e3c75f | 3539 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) |
8913336a PM |
3540 | return -EBUSY; |
3541 | if (copy_from_user(&val, optval, sizeof(val))) | |
3542 | return -EFAULT; | |
3543 | po->tp_reserve = val; | |
3544 | return 0; | |
3545 | } | |
69e3c75f JB |
3546 | case PACKET_LOSS: |
3547 | { | |
3548 | unsigned int val; | |
3549 | ||
3550 | if (optlen != sizeof(val)) | |
3551 | return -EINVAL; | |
3552 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) | |
3553 | return -EBUSY; | |
3554 | if (copy_from_user(&val, optval, sizeof(val))) | |
3555 | return -EFAULT; | |
3556 | po->tp_loss = !!val; | |
3557 | return 0; | |
3558 | } | |
8dc41944 HX |
3559 | case PACKET_AUXDATA: |
3560 | { | |
3561 | int val; | |
3562 | ||
3563 | if (optlen < sizeof(val)) | |
3564 | return -EINVAL; | |
3565 | if (copy_from_user(&val, optval, sizeof(val))) | |
3566 | return -EFAULT; | |
3567 | ||
3568 | po->auxdata = !!val; | |
3569 | return 0; | |
3570 | } | |
80feaacb PWJ |
3571 | case PACKET_ORIGDEV: |
3572 | { | |
3573 | int val; | |
3574 | ||
3575 | if (optlen < sizeof(val)) | |
3576 | return -EINVAL; | |
3577 | if (copy_from_user(&val, optval, sizeof(val))) | |
3578 | return -EFAULT; | |
3579 | ||
3580 | po->origdev = !!val; | |
3581 | return 0; | |
3582 | } | |
bfd5f4a3 SS |
3583 | case PACKET_VNET_HDR: |
3584 | { | |
3585 | int val; | |
3586 | ||
3587 | if (sock->type != SOCK_RAW) | |
3588 | return -EINVAL; | |
3589 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) | |
3590 | return -EBUSY; | |
3591 | if (optlen < sizeof(val)) | |
3592 | return -EINVAL; | |
3593 | if (copy_from_user(&val, optval, sizeof(val))) | |
3594 | return -EFAULT; | |
3595 | ||
3596 | po->has_vnet_hdr = !!val; | |
3597 | return 0; | |
3598 | } | |
614f60fa SM |
3599 | case PACKET_TIMESTAMP: |
3600 | { | |
3601 | int val; | |
3602 | ||
3603 | if (optlen != sizeof(val)) | |
3604 | return -EINVAL; | |
3605 | if (copy_from_user(&val, optval, sizeof(val))) | |
3606 | return -EFAULT; | |
3607 | ||
3608 | po->tp_tstamp = val; | |
3609 | return 0; | |
3610 | } | |
dc99f600 DM |
3611 | case PACKET_FANOUT: |
3612 | { | |
3613 | int val; | |
3614 | ||
3615 | if (optlen != sizeof(val)) | |
3616 | return -EINVAL; | |
3617 | if (copy_from_user(&val, optval, sizeof(val))) | |
3618 | return -EFAULT; | |
3619 | ||
3620 | return fanout_add(sk, val & 0xffff, val >> 16); | |
3621 | } | |
47dceb8e WB |
3622 | case PACKET_FANOUT_DATA: |
3623 | { | |
3624 | if (!po->fanout) | |
3625 | return -EINVAL; | |
3626 | ||
3627 | return fanout_set_data(po, optval, optlen); | |
3628 | } | |
5920cd3a PC |
3629 | case PACKET_TX_HAS_OFF: |
3630 | { | |
3631 | unsigned int val; | |
3632 | ||
3633 | if (optlen != sizeof(val)) | |
3634 | return -EINVAL; | |
3635 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) | |
3636 | return -EBUSY; | |
3637 | if (copy_from_user(&val, optval, sizeof(val))) | |
3638 | return -EFAULT; | |
3639 | po->tp_tx_has_off = !!val; | |
3640 | return 0; | |
3641 | } | |
d346a3fa DB |
3642 | case PACKET_QDISC_BYPASS: |
3643 | { | |
3644 | int val; | |
3645 | ||
3646 | if (optlen != sizeof(val)) | |
3647 | return -EINVAL; | |
3648 | if (copy_from_user(&val, optval, sizeof(val))) | |
3649 | return -EFAULT; | |
3650 | ||
3651 | po->xmit = val ? packet_direct_xmit : dev_queue_xmit; | |
3652 | return 0; | |
3653 | } | |
1da177e4 LT |
3654 | default: |
3655 | return -ENOPROTOOPT; | |
3656 | } | |
3657 | } | |
3658 | ||
3659 | static int packet_getsockopt(struct socket *sock, int level, int optname, | |
3660 | char __user *optval, int __user *optlen) | |
3661 | { | |
3662 | int len; | |
c06fff6e | 3663 | int val, lv = sizeof(val); |
1da177e4 LT |
3664 | struct sock *sk = sock->sk; |
3665 | struct packet_sock *po = pkt_sk(sk); | |
c06fff6e | 3666 | void *data = &val; |
ee80fbf3 | 3667 | union tpacket_stats_u st; |
a9b63918 | 3668 | struct tpacket_rollover_stats rstats; |
1da177e4 LT |
3669 | |
3670 | if (level != SOL_PACKET) | |
3671 | return -ENOPROTOOPT; | |
3672 | ||
8ae55f04 KK |
3673 | if (get_user(len, optlen)) |
3674 | return -EFAULT; | |
1da177e4 LT |
3675 | |
3676 | if (len < 0) | |
3677 | return -EINVAL; | |
1ce4f28b | 3678 | |
69e3c75f | 3679 | switch (optname) { |
1da177e4 | 3680 | case PACKET_STATISTICS: |
1da177e4 | 3681 | spin_lock_bh(&sk->sk_receive_queue.lock); |
ee80fbf3 DB |
3682 | memcpy(&st, &po->stats, sizeof(st)); |
3683 | memset(&po->stats, 0, sizeof(po->stats)); | |
3684 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
3685 | ||
f6fb8f10 | 3686 | if (po->tp_version == TPACKET_V3) { |
c06fff6e | 3687 | lv = sizeof(struct tpacket_stats_v3); |
8bcdeaff | 3688 | st.stats3.tp_packets += st.stats3.tp_drops; |
ee80fbf3 | 3689 | data = &st.stats3; |
f6fb8f10 | 3690 | } else { |
c06fff6e | 3691 | lv = sizeof(struct tpacket_stats); |
8bcdeaff | 3692 | st.stats1.tp_packets += st.stats1.tp_drops; |
ee80fbf3 | 3693 | data = &st.stats1; |
f6fb8f10 | 3694 | } |
ee80fbf3 | 3695 | |
8dc41944 HX |
3696 | break; |
3697 | case PACKET_AUXDATA: | |
8dc41944 | 3698 | val = po->auxdata; |
80feaacb PWJ |
3699 | break; |
3700 | case PACKET_ORIGDEV: | |
80feaacb | 3701 | val = po->origdev; |
bfd5f4a3 SS |
3702 | break; |
3703 | case PACKET_VNET_HDR: | |
bfd5f4a3 | 3704 | val = po->has_vnet_hdr; |
1da177e4 | 3705 | break; |
bbd6ef87 | 3706 | case PACKET_VERSION: |
bbd6ef87 | 3707 | val = po->tp_version; |
bbd6ef87 PM |
3708 | break; |
3709 | case PACKET_HDRLEN: | |
3710 | if (len > sizeof(int)) | |
3711 | len = sizeof(int); | |
3712 | if (copy_from_user(&val, optval, len)) | |
3713 | return -EFAULT; | |
3714 | switch (val) { | |
3715 | case TPACKET_V1: | |
3716 | val = sizeof(struct tpacket_hdr); | |
3717 | break; | |
3718 | case TPACKET_V2: | |
3719 | val = sizeof(struct tpacket2_hdr); | |
3720 | break; | |
f6fb8f10 | 3721 | case TPACKET_V3: |
3722 | val = sizeof(struct tpacket3_hdr); | |
3723 | break; | |
bbd6ef87 PM |
3724 | default: |
3725 | return -EINVAL; | |
3726 | } | |
bbd6ef87 | 3727 | break; |
8913336a | 3728 | case PACKET_RESERVE: |
8913336a | 3729 | val = po->tp_reserve; |
8913336a | 3730 | break; |
69e3c75f | 3731 | case PACKET_LOSS: |
69e3c75f | 3732 | val = po->tp_loss; |
69e3c75f | 3733 | break; |
614f60fa | 3734 | case PACKET_TIMESTAMP: |
614f60fa | 3735 | val = po->tp_tstamp; |
614f60fa | 3736 | break; |
dc99f600 | 3737 | case PACKET_FANOUT: |
dc99f600 DM |
3738 | val = (po->fanout ? |
3739 | ((u32)po->fanout->id | | |
77f65ebd WB |
3740 | ((u32)po->fanout->type << 16) | |
3741 | ((u32)po->fanout->flags << 24)) : | |
dc99f600 | 3742 | 0); |
dc99f600 | 3743 | break; |
a9b63918 WB |
3744 | case PACKET_ROLLOVER_STATS: |
3745 | if (!po->rollover) | |
3746 | return -EINVAL; | |
3747 | rstats.tp_all = atomic_long_read(&po->rollover->num); | |
3748 | rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); | |
3749 | rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); | |
3750 | data = &rstats; | |
3751 | lv = sizeof(rstats); | |
3752 | break; | |
5920cd3a PC |
3753 | case PACKET_TX_HAS_OFF: |
3754 | val = po->tp_tx_has_off; | |
3755 | break; | |
d346a3fa DB |
3756 | case PACKET_QDISC_BYPASS: |
3757 | val = packet_use_direct_xmit(po); | |
3758 | break; | |
1da177e4 LT |
3759 | default: |
3760 | return -ENOPROTOOPT; | |
3761 | } | |
3762 | ||
c06fff6e ED |
3763 | if (len > lv) |
3764 | len = lv; | |
8ae55f04 KK |
3765 | if (put_user(len, optlen)) |
3766 | return -EFAULT; | |
8dc41944 HX |
3767 | if (copy_to_user(optval, data, len)) |
3768 | return -EFAULT; | |
8ae55f04 | 3769 | return 0; |
1da177e4 LT |
3770 | } |
3771 | ||
3772 | ||
351638e7 JP |
3773 | static int packet_notifier(struct notifier_block *this, |
3774 | unsigned long msg, void *ptr) | |
1da177e4 LT |
3775 | { |
3776 | struct sock *sk; | |
351638e7 | 3777 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
c346dca1 | 3778 | struct net *net = dev_net(dev); |
1da177e4 | 3779 | |
808f5114 | 3780 | rcu_read_lock(); |
b67bfe0d | 3781 | sk_for_each_rcu(sk, &net->packet.sklist) { |
1da177e4 LT |
3782 | struct packet_sock *po = pkt_sk(sk); |
3783 | ||
3784 | switch (msg) { | |
3785 | case NETDEV_UNREGISTER: | |
1da177e4 | 3786 | if (po->mclist) |
82f17091 | 3787 | packet_dev_mclist_delete(dev, &po->mclist); |
a2efcfa0 DM |
3788 | /* fallthrough */ |
3789 | ||
1da177e4 LT |
3790 | case NETDEV_DOWN: |
3791 | if (dev->ifindex == po->ifindex) { | |
3792 | spin_lock(&po->bind_lock); | |
3793 | if (po->running) { | |
ce06b03e | 3794 | __unregister_prot_hook(sk, false); |
1da177e4 LT |
3795 | sk->sk_err = ENETDOWN; |
3796 | if (!sock_flag(sk, SOCK_DEAD)) | |
3797 | sk->sk_error_report(sk); | |
3798 | } | |
3799 | if (msg == NETDEV_UNREGISTER) { | |
66e56cd4 | 3800 | packet_cached_dev_reset(po); |
1da177e4 | 3801 | po->ifindex = -1; |
160ff18a BG |
3802 | if (po->prot_hook.dev) |
3803 | dev_put(po->prot_hook.dev); | |
1da177e4 LT |
3804 | po->prot_hook.dev = NULL; |
3805 | } | |
3806 | spin_unlock(&po->bind_lock); | |
3807 | } | |
3808 | break; | |
3809 | case NETDEV_UP: | |
808f5114 | 3810 | if (dev->ifindex == po->ifindex) { |
3811 | spin_lock(&po->bind_lock); | |
ce06b03e DM |
3812 | if (po->num) |
3813 | register_prot_hook(sk); | |
808f5114 | 3814 | spin_unlock(&po->bind_lock); |
1da177e4 | 3815 | } |
1da177e4 LT |
3816 | break; |
3817 | } | |
3818 | } | |
808f5114 | 3819 | rcu_read_unlock(); |
1da177e4 LT |
3820 | return NOTIFY_DONE; |
3821 | } | |
3822 | ||
3823 | ||
3824 | static int packet_ioctl(struct socket *sock, unsigned int cmd, | |
3825 | unsigned long arg) | |
3826 | { | |
3827 | struct sock *sk = sock->sk; | |
3828 | ||
69e3c75f | 3829 | switch (cmd) { |
40d4e3df ED |
3830 | case SIOCOUTQ: |
3831 | { | |
3832 | int amount = sk_wmem_alloc_get(sk); | |
31e6d363 | 3833 | |
40d4e3df ED |
3834 | return put_user(amount, (int __user *)arg); |
3835 | } | |
3836 | case SIOCINQ: | |
3837 | { | |
3838 | struct sk_buff *skb; | |
3839 | int amount = 0; | |
3840 | ||
3841 | spin_lock_bh(&sk->sk_receive_queue.lock); | |
3842 | skb = skb_peek(&sk->sk_receive_queue); | |
3843 | if (skb) | |
3844 | amount = skb->len; | |
3845 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
3846 | return put_user(amount, (int __user *)arg); | |
3847 | } | |
3848 | case SIOCGSTAMP: | |
3849 | return sock_get_timestamp(sk, (struct timeval __user *)arg); | |
3850 | case SIOCGSTAMPNS: | |
3851 | return sock_get_timestampns(sk, (struct timespec __user *)arg); | |
1ce4f28b | 3852 | |
1da177e4 | 3853 | #ifdef CONFIG_INET |
40d4e3df ED |
3854 | case SIOCADDRT: |
3855 | case SIOCDELRT: | |
3856 | case SIOCDARP: | |
3857 | case SIOCGARP: | |
3858 | case SIOCSARP: | |
3859 | case SIOCGIFADDR: | |
3860 | case SIOCSIFADDR: | |
3861 | case SIOCGIFBRDADDR: | |
3862 | case SIOCSIFBRDADDR: | |
3863 | case SIOCGIFNETMASK: | |
3864 | case SIOCSIFNETMASK: | |
3865 | case SIOCGIFDSTADDR: | |
3866 | case SIOCSIFDSTADDR: | |
3867 | case SIOCSIFFLAGS: | |
40d4e3df | 3868 | return inet_dgram_ops.ioctl(sock, cmd, arg); |
1da177e4 LT |
3869 | #endif |
3870 | ||
40d4e3df ED |
3871 | default: |
3872 | return -ENOIOCTLCMD; | |
1da177e4 LT |
3873 | } |
3874 | return 0; | |
3875 | } | |
3876 | ||
40d4e3df | 3877 | static unsigned int packet_poll(struct file *file, struct socket *sock, |
1da177e4 LT |
3878 | poll_table *wait) |
3879 | { | |
3880 | struct sock *sk = sock->sk; | |
3881 | struct packet_sock *po = pkt_sk(sk); | |
3882 | unsigned int mask = datagram_poll(file, sock, wait); | |
3883 | ||
3884 | spin_lock_bh(&sk->sk_receive_queue.lock); | |
69e3c75f | 3885 | if (po->rx_ring.pg_vec) { |
f6fb8f10 | 3886 | if (!packet_previous_rx_frame(po, &po->rx_ring, |
3887 | TP_STATUS_KERNEL)) | |
1da177e4 LT |
3888 | mask |= POLLIN | POLLRDNORM; |
3889 | } | |
2ccdbaa6 | 3890 | if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) |
54d7c01d | 3891 | po->pressure = 0; |
1da177e4 | 3892 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
69e3c75f JB |
3893 | spin_lock_bh(&sk->sk_write_queue.lock); |
3894 | if (po->tx_ring.pg_vec) { | |
3895 | if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) | |
3896 | mask |= POLLOUT | POLLWRNORM; | |
3897 | } | |
3898 | spin_unlock_bh(&sk->sk_write_queue.lock); | |
1da177e4 LT |
3899 | return mask; |
3900 | } | |
3901 | ||
3902 | ||
3903 | /* Dirty? Well, I still did not learn better way to account | |
3904 | * for user mmaps. | |
3905 | */ | |
3906 | ||
3907 | static void packet_mm_open(struct vm_area_struct *vma) | |
3908 | { | |
3909 | struct file *file = vma->vm_file; | |
40d4e3df | 3910 | struct socket *sock = file->private_data; |
1da177e4 | 3911 | struct sock *sk = sock->sk; |
1ce4f28b | 3912 | |
1da177e4 LT |
3913 | if (sk) |
3914 | atomic_inc(&pkt_sk(sk)->mapped); | |
3915 | } | |
3916 | ||
3917 | static void packet_mm_close(struct vm_area_struct *vma) | |
3918 | { | |
3919 | struct file *file = vma->vm_file; | |
40d4e3df | 3920 | struct socket *sock = file->private_data; |
1da177e4 | 3921 | struct sock *sk = sock->sk; |
1ce4f28b | 3922 | |
1da177e4 LT |
3923 | if (sk) |
3924 | atomic_dec(&pkt_sk(sk)->mapped); | |
3925 | } | |
3926 | ||
f0f37e2f | 3927 | static const struct vm_operations_struct packet_mmap_ops = { |
40d4e3df ED |
3928 | .open = packet_mm_open, |
3929 | .close = packet_mm_close, | |
1da177e4 LT |
3930 | }; |
3931 | ||
0e3125c7 NH |
3932 | static void free_pg_vec(struct pgv *pg_vec, unsigned int order, |
3933 | unsigned int len) | |
1da177e4 LT |
3934 | { |
3935 | int i; | |
3936 | ||
4ebf0ae2 | 3937 | for (i = 0; i < len; i++) { |
0e3125c7 | 3938 | if (likely(pg_vec[i].buffer)) { |
c56b4d90 | 3939 | if (is_vmalloc_addr(pg_vec[i].buffer)) |
0e3125c7 NH |
3940 | vfree(pg_vec[i].buffer); |
3941 | else | |
3942 | free_pages((unsigned long)pg_vec[i].buffer, | |
3943 | order); | |
3944 | pg_vec[i].buffer = NULL; | |
3945 | } | |
1da177e4 LT |
3946 | } |
3947 | kfree(pg_vec); | |
3948 | } | |
3949 | ||
eea49cc9 | 3950 | static char *alloc_one_pg_vec_page(unsigned long order) |
4ebf0ae2 | 3951 | { |
f0d4eb29 | 3952 | char *buffer; |
0e3125c7 NH |
3953 | gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | |
3954 | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; | |
3955 | ||
3956 | buffer = (char *) __get_free_pages(gfp_flags, order); | |
0e3125c7 NH |
3957 | if (buffer) |
3958 | return buffer; | |
3959 | ||
f0d4eb29 | 3960 | /* __get_free_pages failed, fall back to vmalloc */ |
bbce5a59 | 3961 | buffer = vzalloc((1 << order) * PAGE_SIZE); |
0e3125c7 NH |
3962 | if (buffer) |
3963 | return buffer; | |
3964 | ||
f0d4eb29 | 3965 | /* vmalloc failed, lets dig into swap here */ |
0e3125c7 | 3966 | gfp_flags &= ~__GFP_NORETRY; |
f0d4eb29 | 3967 | buffer = (char *) __get_free_pages(gfp_flags, order); |
0e3125c7 NH |
3968 | if (buffer) |
3969 | return buffer; | |
3970 | ||
f0d4eb29 | 3971 | /* complete and utter failure */ |
0e3125c7 | 3972 | return NULL; |
4ebf0ae2 DM |
3973 | } |
3974 | ||
0e3125c7 | 3975 | static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) |
4ebf0ae2 DM |
3976 | { |
3977 | unsigned int block_nr = req->tp_block_nr; | |
0e3125c7 | 3978 | struct pgv *pg_vec; |
4ebf0ae2 DM |
3979 | int i; |
3980 | ||
0e3125c7 | 3981 | pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); |
4ebf0ae2 DM |
3982 | if (unlikely(!pg_vec)) |
3983 | goto out; | |
3984 | ||
3985 | for (i = 0; i < block_nr; i++) { | |
c56b4d90 | 3986 | pg_vec[i].buffer = alloc_one_pg_vec_page(order); |
0e3125c7 | 3987 | if (unlikely(!pg_vec[i].buffer)) |
4ebf0ae2 DM |
3988 | goto out_free_pgvec; |
3989 | } | |
3990 | ||
3991 | out: | |
3992 | return pg_vec; | |
3993 | ||
3994 | out_free_pgvec: | |
3995 | free_pg_vec(pg_vec, order, block_nr); | |
3996 | pg_vec = NULL; | |
3997 | goto out; | |
3998 | } | |
1da177e4 | 3999 | |
f6fb8f10 | 4000 | static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
69e3c75f | 4001 | int closing, int tx_ring) |
1da177e4 | 4002 | { |
0e3125c7 | 4003 | struct pgv *pg_vec = NULL; |
1da177e4 | 4004 | struct packet_sock *po = pkt_sk(sk); |
0e11c91e | 4005 | int was_running, order = 0; |
69e3c75f JB |
4006 | struct packet_ring_buffer *rb; |
4007 | struct sk_buff_head *rb_queue; | |
0e11c91e | 4008 | __be16 num; |
f6fb8f10 | 4009 | int err = -EINVAL; |
4010 | /* Added to avoid minimal code churn */ | |
4011 | struct tpacket_req *req = &req_u->req; | |
4012 | ||
4013 | /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ | |
4014 | if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { | |
4015 | WARN(1, "Tx-ring is not supported.\n"); | |
4016 | goto out; | |
4017 | } | |
1ce4f28b | 4018 | |
69e3c75f JB |
4019 | rb = tx_ring ? &po->tx_ring : &po->rx_ring; |
4020 | rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; | |
1da177e4 | 4021 | |
69e3c75f JB |
4022 | err = -EBUSY; |
4023 | if (!closing) { | |
4024 | if (atomic_read(&po->mapped)) | |
4025 | goto out; | |
b0138408 | 4026 | if (packet_read_pending(rb)) |
69e3c75f JB |
4027 | goto out; |
4028 | } | |
1da177e4 | 4029 | |
69e3c75f JB |
4030 | if (req->tp_block_nr) { |
4031 | /* Sanity tests and some calculations */ | |
4032 | err = -EBUSY; | |
4033 | if (unlikely(rb->pg_vec)) | |
4034 | goto out; | |
1da177e4 | 4035 | |
bbd6ef87 PM |
4036 | switch (po->tp_version) { |
4037 | case TPACKET_V1: | |
4038 | po->tp_hdrlen = TPACKET_HDRLEN; | |
4039 | break; | |
4040 | case TPACKET_V2: | |
4041 | po->tp_hdrlen = TPACKET2_HDRLEN; | |
4042 | break; | |
f6fb8f10 | 4043 | case TPACKET_V3: |
4044 | po->tp_hdrlen = TPACKET3_HDRLEN; | |
4045 | break; | |
bbd6ef87 PM |
4046 | } |
4047 | ||
69e3c75f | 4048 | err = -EINVAL; |
4ebf0ae2 | 4049 | if (unlikely((int)req->tp_block_size <= 0)) |
69e3c75f | 4050 | goto out; |
4ebf0ae2 | 4051 | if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) |
69e3c75f | 4052 | goto out; |
dc808110 ED |
4053 | if (po->tp_version >= TPACKET_V3 && |
4054 | (int)(req->tp_block_size - | |
4055 | BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) | |
4056 | goto out; | |
8913336a | 4057 | if (unlikely(req->tp_frame_size < po->tp_hdrlen + |
69e3c75f JB |
4058 | po->tp_reserve)) |
4059 | goto out; | |
4ebf0ae2 | 4060 | if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) |
69e3c75f | 4061 | goto out; |
1da177e4 | 4062 | |
69e3c75f JB |
4063 | rb->frames_per_block = req->tp_block_size/req->tp_frame_size; |
4064 | if (unlikely(rb->frames_per_block <= 0)) | |
4065 | goto out; | |
4066 | if (unlikely((rb->frames_per_block * req->tp_block_nr) != | |
4067 | req->tp_frame_nr)) | |
4068 | goto out; | |
1da177e4 LT |
4069 | |
4070 | err = -ENOMEM; | |
4ebf0ae2 DM |
4071 | order = get_order(req->tp_block_size); |
4072 | pg_vec = alloc_pg_vec(req, order); | |
4073 | if (unlikely(!pg_vec)) | |
1da177e4 | 4074 | goto out; |
f6fb8f10 | 4075 | switch (po->tp_version) { |
4076 | case TPACKET_V3: | |
4077 | /* Transmit path is not supported. We checked | |
4078 | * it above but just being paranoid | |
4079 | */ | |
4080 | if (!tx_ring) | |
e8e85cc5 | 4081 | init_prb_bdqc(po, rb, pg_vec, req_u); |
d7cf0c34 | 4082 | break; |
f6fb8f10 | 4083 | default: |
4084 | break; | |
4085 | } | |
69e3c75f JB |
4086 | } |
4087 | /* Done */ | |
4088 | else { | |
4089 | err = -EINVAL; | |
4ebf0ae2 | 4090 | if (unlikely(req->tp_frame_nr)) |
69e3c75f | 4091 | goto out; |
1da177e4 LT |
4092 | } |
4093 | ||
4094 | lock_sock(sk); | |
4095 | ||
4096 | /* Detach socket from network */ | |
4097 | spin_lock(&po->bind_lock); | |
4098 | was_running = po->running; | |
4099 | num = po->num; | |
4100 | if (was_running) { | |
1da177e4 | 4101 | po->num = 0; |
ce06b03e | 4102 | __unregister_prot_hook(sk, false); |
1da177e4 LT |
4103 | } |
4104 | spin_unlock(&po->bind_lock); | |
1ce4f28b | 4105 | |
1da177e4 LT |
4106 | synchronize_net(); |
4107 | ||
4108 | err = -EBUSY; | |
905db440 | 4109 | mutex_lock(&po->pg_vec_lock); |
1da177e4 LT |
4110 | if (closing || atomic_read(&po->mapped) == 0) { |
4111 | err = 0; | |
69e3c75f | 4112 | spin_lock_bh(&rb_queue->lock); |
c053fd96 | 4113 | swap(rb->pg_vec, pg_vec); |
69e3c75f JB |
4114 | rb->frame_max = (req->tp_frame_nr - 1); |
4115 | rb->head = 0; | |
4116 | rb->frame_size = req->tp_frame_size; | |
4117 | spin_unlock_bh(&rb_queue->lock); | |
4118 | ||
c053fd96 CG |
4119 | swap(rb->pg_vec_order, order); |
4120 | swap(rb->pg_vec_len, req->tp_block_nr); | |
69e3c75f JB |
4121 | |
4122 | rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; | |
4123 | po->prot_hook.func = (po->rx_ring.pg_vec) ? | |
4124 | tpacket_rcv : packet_rcv; | |
4125 | skb_queue_purge(rb_queue); | |
1da177e4 | 4126 | if (atomic_read(&po->mapped)) |
40d4e3df ED |
4127 | pr_err("packet_mmap: vma is busy: %d\n", |
4128 | atomic_read(&po->mapped)); | |
1da177e4 | 4129 | } |
905db440 | 4130 | mutex_unlock(&po->pg_vec_lock); |
1da177e4 LT |
4131 | |
4132 | spin_lock(&po->bind_lock); | |
ce06b03e | 4133 | if (was_running) { |
1da177e4 | 4134 | po->num = num; |
ce06b03e | 4135 | register_prot_hook(sk); |
1da177e4 LT |
4136 | } |
4137 | spin_unlock(&po->bind_lock); | |
f6fb8f10 | 4138 | if (closing && (po->tp_version > TPACKET_V2)) { |
4139 | /* Because we don't support block-based V3 on tx-ring */ | |
4140 | if (!tx_ring) | |
73d0fcf2 | 4141 | prb_shutdown_retire_blk_timer(po, rb_queue); |
f6fb8f10 | 4142 | } |
1da177e4 LT |
4143 | release_sock(sk); |
4144 | ||
1da177e4 LT |
4145 | if (pg_vec) |
4146 | free_pg_vec(pg_vec, order, req->tp_block_nr); | |
4147 | out: | |
4148 | return err; | |
4149 | } | |
4150 | ||
69e3c75f JB |
4151 | static int packet_mmap(struct file *file, struct socket *sock, |
4152 | struct vm_area_struct *vma) | |
1da177e4 LT |
4153 | { |
4154 | struct sock *sk = sock->sk; | |
4155 | struct packet_sock *po = pkt_sk(sk); | |
69e3c75f JB |
4156 | unsigned long size, expected_size; |
4157 | struct packet_ring_buffer *rb; | |
1da177e4 LT |
4158 | unsigned long start; |
4159 | int err = -EINVAL; | |
4160 | int i; | |
4161 | ||
4162 | if (vma->vm_pgoff) | |
4163 | return -EINVAL; | |
4164 | ||
905db440 | 4165 | mutex_lock(&po->pg_vec_lock); |
69e3c75f JB |
4166 | |
4167 | expected_size = 0; | |
4168 | for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { | |
4169 | if (rb->pg_vec) { | |
4170 | expected_size += rb->pg_vec_len | |
4171 | * rb->pg_vec_pages | |
4172 | * PAGE_SIZE; | |
4173 | } | |
4174 | } | |
4175 | ||
4176 | if (expected_size == 0) | |
1da177e4 | 4177 | goto out; |
69e3c75f JB |
4178 | |
4179 | size = vma->vm_end - vma->vm_start; | |
4180 | if (size != expected_size) | |
1da177e4 LT |
4181 | goto out; |
4182 | ||
1da177e4 | 4183 | start = vma->vm_start; |
69e3c75f JB |
4184 | for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { |
4185 | if (rb->pg_vec == NULL) | |
4186 | continue; | |
4187 | ||
4188 | for (i = 0; i < rb->pg_vec_len; i++) { | |
0e3125c7 NH |
4189 | struct page *page; |
4190 | void *kaddr = rb->pg_vec[i].buffer; | |
69e3c75f JB |
4191 | int pg_num; |
4192 | ||
c56b4d90 CG |
4193 | for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { |
4194 | page = pgv_to_page(kaddr); | |
69e3c75f JB |
4195 | err = vm_insert_page(vma, start, page); |
4196 | if (unlikely(err)) | |
4197 | goto out; | |
4198 | start += PAGE_SIZE; | |
0e3125c7 | 4199 | kaddr += PAGE_SIZE; |
69e3c75f | 4200 | } |
4ebf0ae2 | 4201 | } |
1da177e4 | 4202 | } |
69e3c75f | 4203 | |
4ebf0ae2 | 4204 | atomic_inc(&po->mapped); |
1da177e4 LT |
4205 | vma->vm_ops = &packet_mmap_ops; |
4206 | err = 0; | |
4207 | ||
4208 | out: | |
905db440 | 4209 | mutex_unlock(&po->pg_vec_lock); |
1da177e4 LT |
4210 | return err; |
4211 | } | |
1da177e4 | 4212 | |
90ddc4f0 | 4213 | static const struct proto_ops packet_ops_spkt = { |
1da177e4 LT |
4214 | .family = PF_PACKET, |
4215 | .owner = THIS_MODULE, | |
4216 | .release = packet_release, | |
4217 | .bind = packet_bind_spkt, | |
4218 | .connect = sock_no_connect, | |
4219 | .socketpair = sock_no_socketpair, | |
4220 | .accept = sock_no_accept, | |
4221 | .getname = packet_getname_spkt, | |
4222 | .poll = datagram_poll, | |
4223 | .ioctl = packet_ioctl, | |
4224 | .listen = sock_no_listen, | |
4225 | .shutdown = sock_no_shutdown, | |
4226 | .setsockopt = sock_no_setsockopt, | |
4227 | .getsockopt = sock_no_getsockopt, | |
4228 | .sendmsg = packet_sendmsg_spkt, | |
4229 | .recvmsg = packet_recvmsg, | |
4230 | .mmap = sock_no_mmap, | |
4231 | .sendpage = sock_no_sendpage, | |
4232 | }; | |
1da177e4 | 4233 | |
90ddc4f0 | 4234 | static const struct proto_ops packet_ops = { |
1da177e4 LT |
4235 | .family = PF_PACKET, |
4236 | .owner = THIS_MODULE, | |
4237 | .release = packet_release, | |
4238 | .bind = packet_bind, | |
4239 | .connect = sock_no_connect, | |
4240 | .socketpair = sock_no_socketpair, | |
4241 | .accept = sock_no_accept, | |
1ce4f28b | 4242 | .getname = packet_getname, |
1da177e4 LT |
4243 | .poll = packet_poll, |
4244 | .ioctl = packet_ioctl, | |
4245 | .listen = sock_no_listen, | |
4246 | .shutdown = sock_no_shutdown, | |
4247 | .setsockopt = packet_setsockopt, | |
4248 | .getsockopt = packet_getsockopt, | |
4249 | .sendmsg = packet_sendmsg, | |
4250 | .recvmsg = packet_recvmsg, | |
4251 | .mmap = packet_mmap, | |
4252 | .sendpage = sock_no_sendpage, | |
4253 | }; | |
4254 | ||
ec1b4cf7 | 4255 | static const struct net_proto_family packet_family_ops = { |
1da177e4 LT |
4256 | .family = PF_PACKET, |
4257 | .create = packet_create, | |
4258 | .owner = THIS_MODULE, | |
4259 | }; | |
4260 | ||
4261 | static struct notifier_block packet_netdev_notifier = { | |
40d4e3df | 4262 | .notifier_call = packet_notifier, |
1da177e4 LT |
4263 | }; |
4264 | ||
4265 | #ifdef CONFIG_PROC_FS | |
1da177e4 LT |
4266 | |
4267 | static void *packet_seq_start(struct seq_file *seq, loff_t *pos) | |
808f5114 | 4268 | __acquires(RCU) |
1da177e4 | 4269 | { |
e372c414 | 4270 | struct net *net = seq_file_net(seq); |
808f5114 | 4271 | |
4272 | rcu_read_lock(); | |
4273 | return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); | |
1da177e4 LT |
4274 | } |
4275 | ||
4276 | static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
4277 | { | |
1bf40954 | 4278 | struct net *net = seq_file_net(seq); |
808f5114 | 4279 | return seq_hlist_next_rcu(v, &net->packet.sklist, pos); |
1da177e4 LT |
4280 | } |
4281 | ||
4282 | static void packet_seq_stop(struct seq_file *seq, void *v) | |
808f5114 | 4283 | __releases(RCU) |
1da177e4 | 4284 | { |
808f5114 | 4285 | rcu_read_unlock(); |
1da177e4 LT |
4286 | } |
4287 | ||
1ce4f28b | 4288 | static int packet_seq_show(struct seq_file *seq, void *v) |
1da177e4 LT |
4289 | { |
4290 | if (v == SEQ_START_TOKEN) | |
4291 | seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); | |
4292 | else { | |
b7ceabd9 | 4293 | struct sock *s = sk_entry(v); |
1da177e4 LT |
4294 | const struct packet_sock *po = pkt_sk(s); |
4295 | ||
4296 | seq_printf(seq, | |
71338aa7 | 4297 | "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", |
1da177e4 LT |
4298 | s, |
4299 | atomic_read(&s->sk_refcnt), | |
4300 | s->sk_type, | |
4301 | ntohs(po->num), | |
4302 | po->ifindex, | |
4303 | po->running, | |
4304 | atomic_read(&s->sk_rmem_alloc), | |
a7cb5a49 | 4305 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), |
40d4e3df | 4306 | sock_i_ino(s)); |
1da177e4 LT |
4307 | } |
4308 | ||
4309 | return 0; | |
4310 | } | |
4311 | ||
56b3d975 | 4312 | static const struct seq_operations packet_seq_ops = { |
1da177e4 LT |
4313 | .start = packet_seq_start, |
4314 | .next = packet_seq_next, | |
4315 | .stop = packet_seq_stop, | |
4316 | .show = packet_seq_show, | |
4317 | }; | |
4318 | ||
4319 | static int packet_seq_open(struct inode *inode, struct file *file) | |
4320 | { | |
e372c414 DL |
4321 | return seq_open_net(inode, file, &packet_seq_ops, |
4322 | sizeof(struct seq_net_private)); | |
1da177e4 LT |
4323 | } |
4324 | ||
da7071d7 | 4325 | static const struct file_operations packet_seq_fops = { |
1da177e4 LT |
4326 | .owner = THIS_MODULE, |
4327 | .open = packet_seq_open, | |
4328 | .read = seq_read, | |
4329 | .llseek = seq_lseek, | |
e372c414 | 4330 | .release = seq_release_net, |
1da177e4 LT |
4331 | }; |
4332 | ||
4333 | #endif | |
4334 | ||
2c8c1e72 | 4335 | static int __net_init packet_net_init(struct net *net) |
d12d01d6 | 4336 | { |
0fa7fa98 | 4337 | mutex_init(&net->packet.sklist_lock); |
2aaef4e4 | 4338 | INIT_HLIST_HEAD(&net->packet.sklist); |
d12d01d6 | 4339 | |
d4beaa66 | 4340 | if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops)) |
d12d01d6 DL |
4341 | return -ENOMEM; |
4342 | ||
4343 | return 0; | |
4344 | } | |
4345 | ||
2c8c1e72 | 4346 | static void __net_exit packet_net_exit(struct net *net) |
d12d01d6 | 4347 | { |
ece31ffd | 4348 | remove_proc_entry("packet", net->proc_net); |
d12d01d6 DL |
4349 | } |
4350 | ||
4351 | static struct pernet_operations packet_net_ops = { | |
4352 | .init = packet_net_init, | |
4353 | .exit = packet_net_exit, | |
4354 | }; | |
4355 | ||
4356 | ||
1da177e4 LT |
4357 | static void __exit packet_exit(void) |
4358 | { | |
1da177e4 | 4359 | unregister_netdevice_notifier(&packet_netdev_notifier); |
d12d01d6 | 4360 | unregister_pernet_subsys(&packet_net_ops); |
1da177e4 LT |
4361 | sock_unregister(PF_PACKET); |
4362 | proto_unregister(&packet_proto); | |
4363 | } | |
4364 | ||
4365 | static int __init packet_init(void) | |
4366 | { | |
4367 | int rc = proto_register(&packet_proto, 0); | |
4368 | ||
4369 | if (rc != 0) | |
4370 | goto out; | |
4371 | ||
4372 | sock_register(&packet_family_ops); | |
d12d01d6 | 4373 | register_pernet_subsys(&packet_net_ops); |
1da177e4 | 4374 | register_netdevice_notifier(&packet_netdev_notifier); |
1da177e4 LT |
4375 | out: |
4376 | return rc; | |
4377 | } | |
4378 | ||
4379 | module_init(packet_init); | |
4380 | module_exit(packet_exit); | |
4381 | MODULE_LICENSE("GPL"); | |
4382 | MODULE_ALIAS_NETPROTO(PF_PACKET); |