]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * PACKET - implements raw packet sockets. | |
7 | * | |
02c30a84 | 8 | * Authors: Ross Biro |
1da177e4 LT |
9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
10 | * Alan Cox, <gw4pts@gw4pts.ampr.org> | |
11 | * | |
1ce4f28b | 12 | * Fixes: |
1da177e4 LT |
13 | * Alan Cox : verify_area() now used correctly |
14 | * Alan Cox : new skbuff lists, look ma no backlogs! | |
15 | * Alan Cox : tidied skbuff lists. | |
16 | * Alan Cox : Now uses generic datagram routines I | |
17 | * added. Also fixed the peek/read crash | |
18 | * from all old Linux datagram code. | |
19 | * Alan Cox : Uses the improved datagram code. | |
20 | * Alan Cox : Added NULL's for socket options. | |
21 | * Alan Cox : Re-commented the code. | |
22 | * Alan Cox : Use new kernel side addressing | |
23 | * Rob Janssen : Correct MTU usage. | |
24 | * Dave Platt : Counter leaks caused by incorrect | |
25 | * interrupt locking and some slightly | |
26 | * dubious gcc output. Can you read | |
27 | * compiler: it said _VOLATILE_ | |
28 | * Richard Kooijman : Timestamp fixes. | |
29 | * Alan Cox : New buffers. Use sk->mac.raw. | |
30 | * Alan Cox : sendmsg/recvmsg support. | |
31 | * Alan Cox : Protocol setting support | |
32 | * Alexey Kuznetsov : Untied from IPv4 stack. | |
33 | * Cyrus Durgin : Fixed kerneld for kmod. | |
34 | * Michal Ostrowski : Module initialization cleanup. | |
1ce4f28b | 35 | * Ulises Alonso : Frame number limit removal and |
1da177e4 | 36 | * packet_set_ring memory leak. |
0fb375fb EB |
37 | * Eric Biederman : Allow for > 8 byte hardware addresses. |
38 | * The convention is that longer addresses | |
39 | * will simply extend the hardware address | |
1ce4f28b | 40 | * byte arrays at the end of sockaddr_ll |
0fb375fb | 41 | * and packet_mreq. |
69e3c75f | 42 | * Johann Baudy : Added TX RING. |
f6fb8f10 | 43 | * Chetan Loke : Implemented TPACKET_V3 block abstraction |
44 | * layer. | |
45 | * Copyright (C) 2011, <lokec@ccs.neu.edu> | |
46 | * | |
1da177e4 LT |
47 | * |
48 | * This program is free software; you can redistribute it and/or | |
49 | * modify it under the terms of the GNU General Public License | |
50 | * as published by the Free Software Foundation; either version | |
51 | * 2 of the License, or (at your option) any later version. | |
52 | * | |
53 | */ | |
1ce4f28b | 54 | |
1da177e4 | 55 | #include <linux/types.h> |
1da177e4 | 56 | #include <linux/mm.h> |
4fc268d2 | 57 | #include <linux/capability.h> |
1da177e4 LT |
58 | #include <linux/fcntl.h> |
59 | #include <linux/socket.h> | |
60 | #include <linux/in.h> | |
61 | #include <linux/inet.h> | |
62 | #include <linux/netdevice.h> | |
63 | #include <linux/if_packet.h> | |
64 | #include <linux/wireless.h> | |
ffbc6111 | 65 | #include <linux/kernel.h> |
1da177e4 | 66 | #include <linux/kmod.h> |
5a0e3ad6 | 67 | #include <linux/slab.h> |
0e3125c7 | 68 | #include <linux/vmalloc.h> |
457c4cbc | 69 | #include <net/net_namespace.h> |
1da177e4 LT |
70 | #include <net/ip.h> |
71 | #include <net/protocol.h> | |
72 | #include <linux/skbuff.h> | |
73 | #include <net/sock.h> | |
74 | #include <linux/errno.h> | |
75 | #include <linux/timer.h> | |
1da177e4 LT |
76 | #include <asm/uaccess.h> |
77 | #include <asm/ioctls.h> | |
78 | #include <asm/page.h> | |
a1f8e7f7 | 79 | #include <asm/cacheflush.h> |
1da177e4 LT |
80 | #include <asm/io.h> |
81 | #include <linux/proc_fs.h> | |
82 | #include <linux/seq_file.h> | |
83 | #include <linux/poll.h> | |
84 | #include <linux/module.h> | |
85 | #include <linux/init.h> | |
905db440 | 86 | #include <linux/mutex.h> |
05423b24 | 87 | #include <linux/if_vlan.h> |
bfd5f4a3 | 88 | #include <linux/virtio_net.h> |
ed85b565 | 89 | #include <linux/errqueue.h> |
614f60fa | 90 | #include <linux/net_tstamp.h> |
b0138408 | 91 | #include <linux/percpu.h> |
1da177e4 LT |
92 | #ifdef CONFIG_INET |
93 | #include <net/inet_common.h> | |
94 | #endif | |
95 | ||
2787b04b PE |
96 | #include "internal.h" |
97 | ||
1da177e4 LT |
98 | /* |
99 | Assumptions: | |
100 | - if device has no dev->hard_header routine, it adds and removes ll header | |
101 | inside itself. In this case ll header is invisible outside of device, | |
102 | but higher levels still should reserve dev->hard_header_len. | |
103 | Some devices are enough clever to reallocate skb, when header | |
104 | will not fit to reserved space (tunnel), another ones are silly | |
105 | (PPP). | |
106 | - packet socket receives packets with pulled ll header, | |
107 | so that SOCK_RAW should push it back. | |
108 | ||
109 | On receive: | |
110 | ----------- | |
111 | ||
112 | Incoming, dev->hard_header!=NULL | |
b0e380b1 ACM |
113 | mac_header -> ll header |
114 | data -> data | |
1da177e4 LT |
115 | |
116 | Outgoing, dev->hard_header!=NULL | |
b0e380b1 ACM |
117 | mac_header -> ll header |
118 | data -> ll header | |
1da177e4 LT |
119 | |
120 | Incoming, dev->hard_header==NULL | |
b0e380b1 ACM |
121 | mac_header -> UNKNOWN position. It is very likely, that it points to ll |
122 | header. PPP makes it, that is wrong, because introduce | |
db0c58f9 | 123 | assymetry between rx and tx paths. |
b0e380b1 | 124 | data -> data |
1da177e4 LT |
125 | |
126 | Outgoing, dev->hard_header==NULL | |
b0e380b1 ACM |
127 | mac_header -> data. ll header is still not built! |
128 | data -> data | |
1da177e4 LT |
129 | |
130 | Resume | |
131 | If dev->hard_header==NULL we are unlikely to restore sensible ll header. | |
132 | ||
133 | ||
134 | On transmit: | |
135 | ------------ | |
136 | ||
137 | dev->hard_header != NULL | |
b0e380b1 ACM |
138 | mac_header -> ll header |
139 | data -> ll header | |
1da177e4 LT |
140 | |
141 | dev->hard_header == NULL (ll header is added by device, we cannot control it) | |
b0e380b1 ACM |
142 | mac_header -> data |
143 | data -> data | |
1da177e4 LT |
144 | |
145 | We should set nh.raw on output to correct posistion, | |
146 | packet classifier depends on it. | |
147 | */ | |
148 | ||
1da177e4 LT |
149 | /* Private packet socket structures. */ |
150 | ||
0fb375fb EB |
151 | /* identical to struct packet_mreq except it has |
152 | * a longer address field. | |
153 | */ | |
40d4e3df | 154 | struct packet_mreq_max { |
0fb375fb EB |
155 | int mr_ifindex; |
156 | unsigned short mr_type; | |
157 | unsigned short mr_alen; | |
158 | unsigned char mr_address[MAX_ADDR_LEN]; | |
1da177e4 | 159 | }; |
a2efcfa0 | 160 | |
184f489e DB |
161 | union tpacket_uhdr { |
162 | struct tpacket_hdr *h1; | |
163 | struct tpacket2_hdr *h2; | |
164 | struct tpacket3_hdr *h3; | |
165 | void *raw; | |
166 | }; | |
167 | ||
f6fb8f10 | 168 | static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
69e3c75f JB |
169 | int closing, int tx_ring); |
170 | ||
f6fb8f10 | 171 | #define V3_ALIGNMENT (8) |
172 | ||
bc59ba39 | 173 | #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) |
f6fb8f10 | 174 | |
175 | #define BLK_PLUS_PRIV(sz_of_priv) \ | |
176 | (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) | |
177 | ||
f6fb8f10 | 178 | #define PGV_FROM_VMALLOC 1 |
69e3c75f | 179 | |
f6fb8f10 | 180 | #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) |
181 | #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) | |
182 | #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) | |
183 | #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) | |
184 | #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) | |
185 | #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) | |
186 | #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x))) | |
187 | ||
69e3c75f JB |
188 | struct packet_sock; |
189 | static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); | |
77f65ebd WB |
190 | static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, |
191 | struct packet_type *pt, struct net_device *orig_dev); | |
1da177e4 | 192 | |
f6fb8f10 | 193 | static void *packet_previous_frame(struct packet_sock *po, |
194 | struct packet_ring_buffer *rb, | |
195 | int status); | |
196 | static void packet_increment_head(struct packet_ring_buffer *buff); | |
bc59ba39 | 197 | static int prb_curr_blk_in_use(struct tpacket_kbdq_core *, |
198 | struct tpacket_block_desc *); | |
199 | static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, | |
f6fb8f10 | 200 | struct packet_sock *); |
bc59ba39 | 201 | static void prb_retire_current_block(struct tpacket_kbdq_core *, |
f6fb8f10 | 202 | struct packet_sock *, unsigned int status); |
bc59ba39 | 203 | static int prb_queue_frozen(struct tpacket_kbdq_core *); |
204 | static void prb_open_block(struct tpacket_kbdq_core *, | |
205 | struct tpacket_block_desc *); | |
f6fb8f10 | 206 | static void prb_retire_rx_blk_timer_expired(unsigned long); |
bc59ba39 | 207 | static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); |
208 | static void prb_init_blk_timer(struct packet_sock *, | |
209 | struct tpacket_kbdq_core *, | |
210 | void (*func) (unsigned long)); | |
211 | static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); | |
212 | static void prb_clear_rxhash(struct tpacket_kbdq_core *, | |
213 | struct tpacket3_hdr *); | |
214 | static void prb_fill_vlan_info(struct tpacket_kbdq_core *, | |
215 | struct tpacket3_hdr *); | |
1da177e4 LT |
216 | static void packet_flush_mclist(struct sock *sk); |
217 | ||
ffbc6111 HX |
218 | struct packet_skb_cb { |
219 | unsigned int origlen; | |
220 | union { | |
221 | struct sockaddr_pkt pkt; | |
222 | struct sockaddr_ll ll; | |
223 | } sa; | |
224 | }; | |
225 | ||
226 | #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) | |
8dc41944 | 227 | |
bc59ba39 | 228 | #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) |
f6fb8f10 | 229 | #define GET_PBLOCK_DESC(x, bid) \ |
bc59ba39 | 230 | ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) |
f6fb8f10 | 231 | #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ |
bc59ba39 | 232 | ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) |
f6fb8f10 | 233 | #define GET_NEXT_PRB_BLK_NUM(x) \ |
234 | (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ | |
235 | ((x)->kactive_blk_num+1) : 0) | |
236 | ||
dc99f600 DM |
237 | static void __fanout_unlink(struct sock *sk, struct packet_sock *po); |
238 | static void __fanout_link(struct sock *sk, struct packet_sock *po); | |
239 | ||
d346a3fa DB |
240 | static int packet_direct_xmit(struct sk_buff *skb) |
241 | { | |
242 | struct net_device *dev = skb->dev; | |
d346a3fa DB |
243 | netdev_features_t features; |
244 | struct netdev_queue *txq; | |
43279500 | 245 | int ret = NETDEV_TX_BUSY; |
d346a3fa DB |
246 | |
247 | if (unlikely(!netif_running(dev) || | |
43279500 DB |
248 | !netif_carrier_ok(dev))) |
249 | goto drop; | |
d346a3fa DB |
250 | |
251 | features = netif_skb_features(skb); | |
252 | if (skb_needs_linearize(skb, features) && | |
43279500 DB |
253 | __skb_linearize(skb)) |
254 | goto drop; | |
d346a3fa | 255 | |
10c51b56 | 256 | txq = skb_get_tx_queue(dev, skb); |
d346a3fa | 257 | |
43279500 DB |
258 | local_bh_disable(); |
259 | ||
260 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | |
10b3ad8c | 261 | if (!netif_xmit_frozen_or_drv_stopped(txq)) |
fa2dbdc2 | 262 | ret = netdev_start_xmit(skb, dev, txq, false); |
43279500 | 263 | HARD_TX_UNLOCK(dev, txq); |
d346a3fa | 264 | |
43279500 DB |
265 | local_bh_enable(); |
266 | ||
267 | if (!dev_xmit_complete(ret)) | |
d346a3fa | 268 | kfree_skb(skb); |
43279500 | 269 | |
d346a3fa | 270 | return ret; |
43279500 | 271 | drop: |
0f97ede4 | 272 | atomic_long_inc(&dev->tx_dropped); |
43279500 DB |
273 | kfree_skb(skb); |
274 | return NET_XMIT_DROP; | |
d346a3fa DB |
275 | } |
276 | ||
66e56cd4 DB |
277 | static struct net_device *packet_cached_dev_get(struct packet_sock *po) |
278 | { | |
279 | struct net_device *dev; | |
280 | ||
281 | rcu_read_lock(); | |
282 | dev = rcu_dereference(po->cached_dev); | |
283 | if (likely(dev)) | |
284 | dev_hold(dev); | |
285 | rcu_read_unlock(); | |
286 | ||
287 | return dev; | |
288 | } | |
289 | ||
290 | static void packet_cached_dev_assign(struct packet_sock *po, | |
291 | struct net_device *dev) | |
292 | { | |
293 | rcu_assign_pointer(po->cached_dev, dev); | |
294 | } | |
295 | ||
296 | static void packet_cached_dev_reset(struct packet_sock *po) | |
297 | { | |
298 | RCU_INIT_POINTER(po->cached_dev, NULL); | |
299 | } | |
300 | ||
d346a3fa DB |
301 | static bool packet_use_direct_xmit(const struct packet_sock *po) |
302 | { | |
303 | return po->xmit == packet_direct_xmit; | |
304 | } | |
305 | ||
0fd5d57b | 306 | static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) |
d346a3fa | 307 | { |
1cbac010 | 308 | return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; |
d346a3fa DB |
309 | } |
310 | ||
0fd5d57b DB |
311 | static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) |
312 | { | |
313 | const struct net_device_ops *ops = dev->netdev_ops; | |
314 | u16 queue_index; | |
315 | ||
316 | if (ops->ndo_select_queue) { | |
317 | queue_index = ops->ndo_select_queue(dev, skb, NULL, | |
318 | __packet_pick_tx_queue); | |
319 | queue_index = netdev_cap_txqueue(dev, queue_index); | |
320 | } else { | |
321 | queue_index = __packet_pick_tx_queue(dev, skb); | |
322 | } | |
323 | ||
324 | skb_set_queue_mapping(skb, queue_index); | |
325 | } | |
326 | ||
ce06b03e DM |
327 | /* register_prot_hook must be invoked with the po->bind_lock held, |
328 | * or from a context in which asynchronous accesses to the packet | |
329 | * socket is not possible (packet_create()). | |
330 | */ | |
331 | static void register_prot_hook(struct sock *sk) | |
332 | { | |
333 | struct packet_sock *po = pkt_sk(sk); | |
e40526cb | 334 | |
ce06b03e | 335 | if (!po->running) { |
66e56cd4 | 336 | if (po->fanout) |
dc99f600 | 337 | __fanout_link(sk, po); |
66e56cd4 | 338 | else |
dc99f600 | 339 | dev_add_pack(&po->prot_hook); |
e40526cb | 340 | |
ce06b03e DM |
341 | sock_hold(sk); |
342 | po->running = 1; | |
343 | } | |
344 | } | |
345 | ||
346 | /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock | |
347 | * held. If the sync parameter is true, we will temporarily drop | |
348 | * the po->bind_lock and do a synchronize_net to make sure no | |
349 | * asynchronous packet processing paths still refer to the elements | |
350 | * of po->prot_hook. If the sync parameter is false, it is the | |
351 | * callers responsibility to take care of this. | |
352 | */ | |
353 | static void __unregister_prot_hook(struct sock *sk, bool sync) | |
354 | { | |
355 | struct packet_sock *po = pkt_sk(sk); | |
356 | ||
357 | po->running = 0; | |
66e56cd4 DB |
358 | |
359 | if (po->fanout) | |
dc99f600 | 360 | __fanout_unlink(sk, po); |
66e56cd4 | 361 | else |
dc99f600 | 362 | __dev_remove_pack(&po->prot_hook); |
e40526cb | 363 | |
ce06b03e DM |
364 | __sock_put(sk); |
365 | ||
366 | if (sync) { | |
367 | spin_unlock(&po->bind_lock); | |
368 | synchronize_net(); | |
369 | spin_lock(&po->bind_lock); | |
370 | } | |
371 | } | |
372 | ||
373 | static void unregister_prot_hook(struct sock *sk, bool sync) | |
374 | { | |
375 | struct packet_sock *po = pkt_sk(sk); | |
376 | ||
377 | if (po->running) | |
378 | __unregister_prot_hook(sk, sync); | |
379 | } | |
380 | ||
6e58040b | 381 | static inline struct page * __pure pgv_to_page(void *addr) |
0af55bb5 CG |
382 | { |
383 | if (is_vmalloc_addr(addr)) | |
384 | return vmalloc_to_page(addr); | |
385 | return virt_to_page(addr); | |
386 | } | |
387 | ||
69e3c75f | 388 | static void __packet_set_status(struct packet_sock *po, void *frame, int status) |
1da177e4 | 389 | { |
184f489e | 390 | union tpacket_uhdr h; |
1da177e4 | 391 | |
69e3c75f | 392 | h.raw = frame; |
bbd6ef87 PM |
393 | switch (po->tp_version) { |
394 | case TPACKET_V1: | |
69e3c75f | 395 | h.h1->tp_status = status; |
0af55bb5 | 396 | flush_dcache_page(pgv_to_page(&h.h1->tp_status)); |
bbd6ef87 PM |
397 | break; |
398 | case TPACKET_V2: | |
69e3c75f | 399 | h.h2->tp_status = status; |
0af55bb5 | 400 | flush_dcache_page(pgv_to_page(&h.h2->tp_status)); |
bbd6ef87 | 401 | break; |
f6fb8f10 | 402 | case TPACKET_V3: |
69e3c75f | 403 | default: |
f6fb8f10 | 404 | WARN(1, "TPACKET version not supported.\n"); |
69e3c75f | 405 | BUG(); |
bbd6ef87 | 406 | } |
69e3c75f JB |
407 | |
408 | smp_wmb(); | |
bbd6ef87 PM |
409 | } |
410 | ||
69e3c75f | 411 | static int __packet_get_status(struct packet_sock *po, void *frame) |
bbd6ef87 | 412 | { |
184f489e | 413 | union tpacket_uhdr h; |
bbd6ef87 | 414 | |
69e3c75f JB |
415 | smp_rmb(); |
416 | ||
bbd6ef87 PM |
417 | h.raw = frame; |
418 | switch (po->tp_version) { | |
419 | case TPACKET_V1: | |
0af55bb5 | 420 | flush_dcache_page(pgv_to_page(&h.h1->tp_status)); |
69e3c75f | 421 | return h.h1->tp_status; |
bbd6ef87 | 422 | case TPACKET_V2: |
0af55bb5 | 423 | flush_dcache_page(pgv_to_page(&h.h2->tp_status)); |
69e3c75f | 424 | return h.h2->tp_status; |
f6fb8f10 | 425 | case TPACKET_V3: |
69e3c75f | 426 | default: |
f6fb8f10 | 427 | WARN(1, "TPACKET version not supported.\n"); |
69e3c75f JB |
428 | BUG(); |
429 | return 0; | |
bbd6ef87 | 430 | } |
1da177e4 | 431 | } |
69e3c75f | 432 | |
b9c32fb2 DB |
433 | static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts, |
434 | unsigned int flags) | |
7a51384c DB |
435 | { |
436 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); | |
437 | ||
68a360e8 WB |
438 | if (shhwtstamps && |
439 | (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && | |
440 | ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts)) | |
441 | return TP_STATUS_TS_RAW_HARDWARE; | |
7a51384c DB |
442 | |
443 | if (ktime_to_timespec_cond(skb->tstamp, ts)) | |
b9c32fb2 | 444 | return TP_STATUS_TS_SOFTWARE; |
7a51384c | 445 | |
b9c32fb2 | 446 | return 0; |
7a51384c DB |
447 | } |
448 | ||
b9c32fb2 DB |
449 | static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, |
450 | struct sk_buff *skb) | |
2e31396f WB |
451 | { |
452 | union tpacket_uhdr h; | |
453 | struct timespec ts; | |
b9c32fb2 | 454 | __u32 ts_status; |
2e31396f | 455 | |
b9c32fb2 DB |
456 | if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) |
457 | return 0; | |
2e31396f WB |
458 | |
459 | h.raw = frame; | |
460 | switch (po->tp_version) { | |
461 | case TPACKET_V1: | |
462 | h.h1->tp_sec = ts.tv_sec; | |
463 | h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; | |
464 | break; | |
465 | case TPACKET_V2: | |
466 | h.h2->tp_sec = ts.tv_sec; | |
467 | h.h2->tp_nsec = ts.tv_nsec; | |
468 | break; | |
469 | case TPACKET_V3: | |
470 | default: | |
471 | WARN(1, "TPACKET version not supported.\n"); | |
472 | BUG(); | |
473 | } | |
474 | ||
475 | /* one flush is safe, as both fields always lie on the same cacheline */ | |
476 | flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); | |
477 | smp_wmb(); | |
b9c32fb2 DB |
478 | |
479 | return ts_status; | |
2e31396f WB |
480 | } |
481 | ||
69e3c75f JB |
482 | static void *packet_lookup_frame(struct packet_sock *po, |
483 | struct packet_ring_buffer *rb, | |
484 | unsigned int position, | |
485 | int status) | |
486 | { | |
487 | unsigned int pg_vec_pos, frame_offset; | |
184f489e | 488 | union tpacket_uhdr h; |
69e3c75f JB |
489 | |
490 | pg_vec_pos = position / rb->frames_per_block; | |
491 | frame_offset = position % rb->frames_per_block; | |
492 | ||
0e3125c7 NH |
493 | h.raw = rb->pg_vec[pg_vec_pos].buffer + |
494 | (frame_offset * rb->frame_size); | |
69e3c75f JB |
495 | |
496 | if (status != __packet_get_status(po, h.raw)) | |
497 | return NULL; | |
498 | ||
499 | return h.raw; | |
500 | } | |
501 | ||
eea49cc9 | 502 | static void *packet_current_frame(struct packet_sock *po, |
69e3c75f JB |
503 | struct packet_ring_buffer *rb, |
504 | int status) | |
505 | { | |
506 | return packet_lookup_frame(po, rb, rb->head, status); | |
507 | } | |
508 | ||
bc59ba39 | 509 | static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 510 | { |
511 | del_timer_sync(&pkc->retire_blk_timer); | |
512 | } | |
513 | ||
514 | static void prb_shutdown_retire_blk_timer(struct packet_sock *po, | |
515 | int tx_ring, | |
516 | struct sk_buff_head *rb_queue) | |
517 | { | |
bc59ba39 | 518 | struct tpacket_kbdq_core *pkc; |
f6fb8f10 | 519 | |
22781a5b DJ |
520 | pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) : |
521 | GET_PBDQC_FROM_RB(&po->rx_ring); | |
f6fb8f10 | 522 | |
ec6f809f | 523 | spin_lock_bh(&rb_queue->lock); |
f6fb8f10 | 524 | pkc->delete_blk_timer = 1; |
ec6f809f | 525 | spin_unlock_bh(&rb_queue->lock); |
f6fb8f10 | 526 | |
527 | prb_del_retire_blk_timer(pkc); | |
528 | } | |
529 | ||
530 | static void prb_init_blk_timer(struct packet_sock *po, | |
bc59ba39 | 531 | struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 532 | void (*func) (unsigned long)) |
533 | { | |
534 | init_timer(&pkc->retire_blk_timer); | |
535 | pkc->retire_blk_timer.data = (long)po; | |
536 | pkc->retire_blk_timer.function = func; | |
537 | pkc->retire_blk_timer.expires = jiffies; | |
538 | } | |
539 | ||
540 | static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring) | |
541 | { | |
bc59ba39 | 542 | struct tpacket_kbdq_core *pkc; |
f6fb8f10 | 543 | |
544 | if (tx_ring) | |
545 | BUG(); | |
546 | ||
22781a5b DJ |
547 | pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) : |
548 | GET_PBDQC_FROM_RB(&po->rx_ring); | |
f6fb8f10 | 549 | prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired); |
550 | } | |
551 | ||
552 | static int prb_calc_retire_blk_tmo(struct packet_sock *po, | |
553 | int blk_size_in_bytes) | |
554 | { | |
555 | struct net_device *dev; | |
556 | unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; | |
4bc71cb9 JP |
557 | struct ethtool_cmd ecmd; |
558 | int err; | |
e440cf2c | 559 | u32 speed; |
f6fb8f10 | 560 | |
4bc71cb9 JP |
561 | rtnl_lock(); |
562 | dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); | |
563 | if (unlikely(!dev)) { | |
564 | rtnl_unlock(); | |
f6fb8f10 | 565 | return DEFAULT_PRB_RETIRE_TOV; |
4bc71cb9 JP |
566 | } |
567 | err = __ethtool_get_settings(dev, &ecmd); | |
e440cf2c | 568 | speed = ethtool_cmd_speed(&ecmd); |
4bc71cb9 JP |
569 | rtnl_unlock(); |
570 | if (!err) { | |
4bc71cb9 JP |
571 | /* |
572 | * If the link speed is so slow you don't really | |
573 | * need to worry about perf anyways | |
574 | */ | |
e440cf2c | 575 | if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) { |
4bc71cb9 | 576 | return DEFAULT_PRB_RETIRE_TOV; |
e440cf2c | 577 | } else { |
578 | msec = 1; | |
579 | div = speed / 1000; | |
f6fb8f10 | 580 | } |
581 | } | |
582 | ||
583 | mbits = (blk_size_in_bytes * 8) / (1024 * 1024); | |
584 | ||
585 | if (div) | |
586 | mbits /= div; | |
587 | ||
588 | tmo = mbits * msec; | |
589 | ||
590 | if (div) | |
591 | return tmo+1; | |
592 | return tmo; | |
593 | } | |
594 | ||
bc59ba39 | 595 | static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, |
f6fb8f10 | 596 | union tpacket_req_u *req_u) |
597 | { | |
598 | p1->feature_req_word = req_u->req3.tp_feature_req_word; | |
599 | } | |
600 | ||
601 | static void init_prb_bdqc(struct packet_sock *po, | |
602 | struct packet_ring_buffer *rb, | |
603 | struct pgv *pg_vec, | |
604 | union tpacket_req_u *req_u, int tx_ring) | |
605 | { | |
22781a5b | 606 | struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); |
bc59ba39 | 607 | struct tpacket_block_desc *pbd; |
f6fb8f10 | 608 | |
609 | memset(p1, 0x0, sizeof(*p1)); | |
610 | ||
611 | p1->knxt_seq_num = 1; | |
612 | p1->pkbdq = pg_vec; | |
bc59ba39 | 613 | pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; |
e3192690 | 614 | p1->pkblk_start = pg_vec[0].buffer; |
f6fb8f10 | 615 | p1->kblk_size = req_u->req3.tp_block_size; |
616 | p1->knum_blocks = req_u->req3.tp_block_nr; | |
617 | p1->hdrlen = po->tp_hdrlen; | |
618 | p1->version = po->tp_version; | |
619 | p1->last_kactive_blk_num = 0; | |
ee80fbf3 | 620 | po->stats.stats3.tp_freeze_q_cnt = 0; |
f6fb8f10 | 621 | if (req_u->req3.tp_retire_blk_tov) |
622 | p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; | |
623 | else | |
624 | p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, | |
625 | req_u->req3.tp_block_size); | |
626 | p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); | |
627 | p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; | |
628 | ||
dc808110 | 629 | p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); |
f6fb8f10 | 630 | prb_init_ft_ops(p1, req_u); |
631 | prb_setup_retire_blk_timer(po, tx_ring); | |
632 | prb_open_block(p1, pbd); | |
633 | } | |
634 | ||
635 | /* Do NOT update the last_blk_num first. | |
636 | * Assumes sk_buff_head lock is held. | |
637 | */ | |
bc59ba39 | 638 | static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 639 | { |
640 | mod_timer(&pkc->retire_blk_timer, | |
641 | jiffies + pkc->tov_in_jiffies); | |
642 | pkc->last_kactive_blk_num = pkc->kactive_blk_num; | |
643 | } | |
644 | ||
645 | /* | |
646 | * Timer logic: | |
647 | * 1) We refresh the timer only when we open a block. | |
648 | * By doing this we don't waste cycles refreshing the timer | |
649 | * on packet-by-packet basis. | |
650 | * | |
651 | * With a 1MB block-size, on a 1Gbps line, it will take | |
652 | * i) ~8 ms to fill a block + ii) memcpy etc. | |
653 | * In this cut we are not accounting for the memcpy time. | |
654 | * | |
655 | * So, if the user sets the 'tmo' to 10ms then the timer | |
656 | * will never fire while the block is still getting filled | |
657 | * (which is what we want). However, the user could choose | |
658 | * to close a block early and that's fine. | |
659 | * | |
660 | * But when the timer does fire, we check whether or not to refresh it. | |
661 | * Since the tmo granularity is in msecs, it is not too expensive | |
662 | * to refresh the timer, lets say every '8' msecs. | |
663 | * Either the user can set the 'tmo' or we can derive it based on | |
664 | * a) line-speed and b) block-size. | |
665 | * prb_calc_retire_blk_tmo() calculates the tmo. | |
666 | * | |
667 | */ | |
668 | static void prb_retire_rx_blk_timer_expired(unsigned long data) | |
669 | { | |
670 | struct packet_sock *po = (struct packet_sock *)data; | |
22781a5b | 671 | struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); |
f6fb8f10 | 672 | unsigned int frozen; |
bc59ba39 | 673 | struct tpacket_block_desc *pbd; |
f6fb8f10 | 674 | |
675 | spin_lock(&po->sk.sk_receive_queue.lock); | |
676 | ||
677 | frozen = prb_queue_frozen(pkc); | |
678 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | |
679 | ||
680 | if (unlikely(pkc->delete_blk_timer)) | |
681 | goto out; | |
682 | ||
683 | /* We only need to plug the race when the block is partially filled. | |
684 | * tpacket_rcv: | |
685 | * lock(); increment BLOCK_NUM_PKTS; unlock() | |
686 | * copy_bits() is in progress ... | |
687 | * timer fires on other cpu: | |
688 | * we can't retire the current block because copy_bits | |
689 | * is in progress. | |
690 | * | |
691 | */ | |
692 | if (BLOCK_NUM_PKTS(pbd)) { | |
693 | while (atomic_read(&pkc->blk_fill_in_prog)) { | |
694 | /* Waiting for skb_copy_bits to finish... */ | |
695 | cpu_relax(); | |
696 | } | |
697 | } | |
698 | ||
699 | if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { | |
700 | if (!frozen) { | |
701 | prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); | |
702 | if (!prb_dispatch_next_block(pkc, po)) | |
703 | goto refresh_timer; | |
704 | else | |
705 | goto out; | |
706 | } else { | |
707 | /* Case 1. Queue was frozen because user-space was | |
708 | * lagging behind. | |
709 | */ | |
710 | if (prb_curr_blk_in_use(pkc, pbd)) { | |
711 | /* | |
712 | * Ok, user-space is still behind. | |
713 | * So just refresh the timer. | |
714 | */ | |
715 | goto refresh_timer; | |
716 | } else { | |
717 | /* Case 2. queue was frozen,user-space caught up, | |
718 | * now the link went idle && the timer fired. | |
719 | * We don't have a block to close.So we open this | |
720 | * block and restart the timer. | |
721 | * opening a block thaws the queue,restarts timer | |
722 | * Thawing/timer-refresh is a side effect. | |
723 | */ | |
724 | prb_open_block(pkc, pbd); | |
725 | goto out; | |
726 | } | |
727 | } | |
728 | } | |
729 | ||
730 | refresh_timer: | |
731 | _prb_refresh_rx_retire_blk_timer(pkc); | |
732 | ||
733 | out: | |
734 | spin_unlock(&po->sk.sk_receive_queue.lock); | |
735 | } | |
736 | ||
eea49cc9 | 737 | static void prb_flush_block(struct tpacket_kbdq_core *pkc1, |
bc59ba39 | 738 | struct tpacket_block_desc *pbd1, __u32 status) |
f6fb8f10 | 739 | { |
740 | /* Flush everything minus the block header */ | |
741 | ||
742 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 | |
743 | u8 *start, *end; | |
744 | ||
745 | start = (u8 *)pbd1; | |
746 | ||
747 | /* Skip the block header(we know header WILL fit in 4K) */ | |
748 | start += PAGE_SIZE; | |
749 | ||
750 | end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); | |
751 | for (; start < end; start += PAGE_SIZE) | |
752 | flush_dcache_page(pgv_to_page(start)); | |
753 | ||
754 | smp_wmb(); | |
755 | #endif | |
756 | ||
757 | /* Now update the block status. */ | |
758 | ||
759 | BLOCK_STATUS(pbd1) = status; | |
760 | ||
761 | /* Flush the block header */ | |
762 | ||
763 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 | |
764 | start = (u8 *)pbd1; | |
765 | flush_dcache_page(pgv_to_page(start)); | |
766 | ||
767 | smp_wmb(); | |
768 | #endif | |
769 | } | |
770 | ||
771 | /* | |
772 | * Side effect: | |
773 | * | |
774 | * 1) flush the block | |
775 | * 2) Increment active_blk_num | |
776 | * | |
777 | * Note:We DONT refresh the timer on purpose. | |
778 | * Because almost always the next block will be opened. | |
779 | */ | |
bc59ba39 | 780 | static void prb_close_block(struct tpacket_kbdq_core *pkc1, |
781 | struct tpacket_block_desc *pbd1, | |
f6fb8f10 | 782 | struct packet_sock *po, unsigned int stat) |
783 | { | |
784 | __u32 status = TP_STATUS_USER | stat; | |
785 | ||
786 | struct tpacket3_hdr *last_pkt; | |
bc59ba39 | 787 | struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; |
da413eec | 788 | struct sock *sk = &po->sk; |
f6fb8f10 | 789 | |
ee80fbf3 | 790 | if (po->stats.stats3.tp_drops) |
f6fb8f10 | 791 | status |= TP_STATUS_LOSING; |
792 | ||
793 | last_pkt = (struct tpacket3_hdr *)pkc1->prev; | |
794 | last_pkt->tp_next_offset = 0; | |
795 | ||
796 | /* Get the ts of the last pkt */ | |
797 | if (BLOCK_NUM_PKTS(pbd1)) { | |
798 | h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; | |
799 | h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; | |
800 | } else { | |
801 | /* Ok, we tmo'd - so get the current time */ | |
802 | struct timespec ts; | |
803 | getnstimeofday(&ts); | |
804 | h1->ts_last_pkt.ts_sec = ts.tv_sec; | |
805 | h1->ts_last_pkt.ts_nsec = ts.tv_nsec; | |
806 | } | |
807 | ||
808 | smp_wmb(); | |
809 | ||
810 | /* Flush the block */ | |
811 | prb_flush_block(pkc1, pbd1, status); | |
812 | ||
da413eec DC |
813 | sk->sk_data_ready(sk); |
814 | ||
f6fb8f10 | 815 | pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); |
816 | } | |
817 | ||
eea49cc9 | 818 | static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 819 | { |
820 | pkc->reset_pending_on_curr_blk = 0; | |
821 | } | |
822 | ||
823 | /* | |
824 | * Side effect of opening a block: | |
825 | * | |
826 | * 1) prb_queue is thawed. | |
827 | * 2) retire_blk_timer is refreshed. | |
828 | * | |
829 | */ | |
bc59ba39 | 830 | static void prb_open_block(struct tpacket_kbdq_core *pkc1, |
831 | struct tpacket_block_desc *pbd1) | |
f6fb8f10 | 832 | { |
833 | struct timespec ts; | |
bc59ba39 | 834 | struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; |
f6fb8f10 | 835 | |
836 | smp_rmb(); | |
837 | ||
8da3056c DB |
838 | /* We could have just memset this but we will lose the |
839 | * flexibility of making the priv area sticky | |
840 | */ | |
f6fb8f10 | 841 | |
8da3056c DB |
842 | BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; |
843 | BLOCK_NUM_PKTS(pbd1) = 0; | |
844 | BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); | |
f6fb8f10 | 845 | |
8da3056c DB |
846 | getnstimeofday(&ts); |
847 | ||
848 | h1->ts_first_pkt.ts_sec = ts.tv_sec; | |
849 | h1->ts_first_pkt.ts_nsec = ts.tv_nsec; | |
f6fb8f10 | 850 | |
8da3056c DB |
851 | pkc1->pkblk_start = (char *)pbd1; |
852 | pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); | |
853 | ||
854 | BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); | |
855 | BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; | |
856 | ||
857 | pbd1->version = pkc1->version; | |
858 | pkc1->prev = pkc1->nxt_offset; | |
859 | pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; | |
860 | ||
861 | prb_thaw_queue(pkc1); | |
862 | _prb_refresh_rx_retire_blk_timer(pkc1); | |
863 | ||
864 | smp_wmb(); | |
f6fb8f10 | 865 | } |
866 | ||
867 | /* | |
868 | * Queue freeze logic: | |
869 | * 1) Assume tp_block_nr = 8 blocks. | |
870 | * 2) At time 't0', user opens Rx ring. | |
871 | * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 | |
872 | * 4) user-space is either sleeping or processing block '0'. | |
873 | * 5) tpacket_rcv is currently filling block '7', since there is no space left, | |
874 | * it will close block-7,loop around and try to fill block '0'. | |
875 | * call-flow: | |
876 | * __packet_lookup_frame_in_block | |
877 | * prb_retire_current_block() | |
878 | * prb_dispatch_next_block() | |
879 | * |->(BLOCK_STATUS == USER) evaluates to true | |
880 | * 5.1) Since block-0 is currently in-use, we just freeze the queue. | |
881 | * 6) Now there are two cases: | |
882 | * 6.1) Link goes idle right after the queue is frozen. | |
883 | * But remember, the last open_block() refreshed the timer. | |
884 | * When this timer expires,it will refresh itself so that we can | |
885 | * re-open block-0 in near future. | |
886 | * 6.2) Link is busy and keeps on receiving packets. This is a simple | |
887 | * case and __packet_lookup_frame_in_block will check if block-0 | |
888 | * is free and can now be re-used. | |
889 | */ | |
eea49cc9 | 890 | static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 891 | struct packet_sock *po) |
892 | { | |
893 | pkc->reset_pending_on_curr_blk = 1; | |
ee80fbf3 | 894 | po->stats.stats3.tp_freeze_q_cnt++; |
f6fb8f10 | 895 | } |
896 | ||
897 | #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) | |
898 | ||
899 | /* | |
900 | * If the next block is free then we will dispatch it | |
901 | * and return a good offset. | |
902 | * Else, we will freeze the queue. | |
903 | * So, caller must check the return value. | |
904 | */ | |
bc59ba39 | 905 | static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 906 | struct packet_sock *po) |
907 | { | |
bc59ba39 | 908 | struct tpacket_block_desc *pbd; |
f6fb8f10 | 909 | |
910 | smp_rmb(); | |
911 | ||
912 | /* 1. Get current block num */ | |
913 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | |
914 | ||
915 | /* 2. If this block is currently in_use then freeze the queue */ | |
916 | if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { | |
917 | prb_freeze_queue(pkc, po); | |
918 | return NULL; | |
919 | } | |
920 | ||
921 | /* | |
922 | * 3. | |
923 | * open this block and return the offset where the first packet | |
924 | * needs to get stored. | |
925 | */ | |
926 | prb_open_block(pkc, pbd); | |
927 | return (void *)pkc->nxt_offset; | |
928 | } | |
929 | ||
bc59ba39 | 930 | static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 931 | struct packet_sock *po, unsigned int status) |
932 | { | |
bc59ba39 | 933 | struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); |
f6fb8f10 | 934 | |
935 | /* retire/close the current block */ | |
936 | if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { | |
937 | /* | |
938 | * Plug the case where copy_bits() is in progress on | |
939 | * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't | |
940 | * have space to copy the pkt in the current block and | |
941 | * called prb_retire_current_block() | |
942 | * | |
943 | * We don't need to worry about the TMO case because | |
944 | * the timer-handler already handled this case. | |
945 | */ | |
946 | if (!(status & TP_STATUS_BLK_TMO)) { | |
947 | while (atomic_read(&pkc->blk_fill_in_prog)) { | |
948 | /* Waiting for skb_copy_bits to finish... */ | |
949 | cpu_relax(); | |
950 | } | |
951 | } | |
952 | prb_close_block(pkc, pbd, po, status); | |
953 | return; | |
954 | } | |
f6fb8f10 | 955 | } |
956 | ||
eea49cc9 | 957 | static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc, |
bc59ba39 | 958 | struct tpacket_block_desc *pbd) |
f6fb8f10 | 959 | { |
960 | return TP_STATUS_USER & BLOCK_STATUS(pbd); | |
961 | } | |
962 | ||
eea49cc9 | 963 | static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 964 | { |
965 | return pkc->reset_pending_on_curr_blk; | |
966 | } | |
967 | ||
eea49cc9 | 968 | static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) |
f6fb8f10 | 969 | { |
bc59ba39 | 970 | struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); |
f6fb8f10 | 971 | atomic_dec(&pkc->blk_fill_in_prog); |
972 | } | |
973 | ||
eea49cc9 | 974 | static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 975 | struct tpacket3_hdr *ppd) |
976 | { | |
3958afa1 | 977 | ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); |
f6fb8f10 | 978 | } |
979 | ||
eea49cc9 | 980 | static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 981 | struct tpacket3_hdr *ppd) |
982 | { | |
983 | ppd->hv1.tp_rxhash = 0; | |
984 | } | |
985 | ||
eea49cc9 | 986 | static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 987 | struct tpacket3_hdr *ppd) |
988 | { | |
989 | if (vlan_tx_tag_present(pkc->skb)) { | |
990 | ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb); | |
a0cdfcf3 AW |
991 | ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); |
992 | ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; | |
f6fb8f10 | 993 | } else { |
9e67030a | 994 | ppd->hv1.tp_vlan_tci = 0; |
a0cdfcf3 | 995 | ppd->hv1.tp_vlan_tpid = 0; |
9e67030a | 996 | ppd->tp_status = TP_STATUS_AVAILABLE; |
f6fb8f10 | 997 | } |
998 | } | |
999 | ||
bc59ba39 | 1000 | static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 1001 | struct tpacket3_hdr *ppd) |
1002 | { | |
a0cdfcf3 | 1003 | ppd->hv1.tp_padding = 0; |
f6fb8f10 | 1004 | prb_fill_vlan_info(pkc, ppd); |
1005 | ||
1006 | if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) | |
1007 | prb_fill_rxhash(pkc, ppd); | |
1008 | else | |
1009 | prb_clear_rxhash(pkc, ppd); | |
1010 | } | |
1011 | ||
eea49cc9 | 1012 | static void prb_fill_curr_block(char *curr, |
bc59ba39 | 1013 | struct tpacket_kbdq_core *pkc, |
1014 | struct tpacket_block_desc *pbd, | |
f6fb8f10 | 1015 | unsigned int len) |
1016 | { | |
1017 | struct tpacket3_hdr *ppd; | |
1018 | ||
1019 | ppd = (struct tpacket3_hdr *)curr; | |
1020 | ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); | |
1021 | pkc->prev = curr; | |
1022 | pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); | |
1023 | BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); | |
1024 | BLOCK_NUM_PKTS(pbd) += 1; | |
1025 | atomic_inc(&pkc->blk_fill_in_prog); | |
1026 | prb_run_all_ft_ops(pkc, ppd); | |
1027 | } | |
1028 | ||
1029 | /* Assumes caller has the sk->rx_queue.lock */ | |
1030 | static void *__packet_lookup_frame_in_block(struct packet_sock *po, | |
1031 | struct sk_buff *skb, | |
1032 | int status, | |
1033 | unsigned int len | |
1034 | ) | |
1035 | { | |
bc59ba39 | 1036 | struct tpacket_kbdq_core *pkc; |
1037 | struct tpacket_block_desc *pbd; | |
f6fb8f10 | 1038 | char *curr, *end; |
1039 | ||
e3192690 | 1040 | pkc = GET_PBDQC_FROM_RB(&po->rx_ring); |
f6fb8f10 | 1041 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); |
1042 | ||
1043 | /* Queue is frozen when user space is lagging behind */ | |
1044 | if (prb_queue_frozen(pkc)) { | |
1045 | /* | |
1046 | * Check if that last block which caused the queue to freeze, | |
1047 | * is still in_use by user-space. | |
1048 | */ | |
1049 | if (prb_curr_blk_in_use(pkc, pbd)) { | |
1050 | /* Can't record this packet */ | |
1051 | return NULL; | |
1052 | } else { | |
1053 | /* | |
1054 | * Ok, the block was released by user-space. | |
1055 | * Now let's open that block. | |
1056 | * opening a block also thaws the queue. | |
1057 | * Thawing is a side effect. | |
1058 | */ | |
1059 | prb_open_block(pkc, pbd); | |
1060 | } | |
1061 | } | |
1062 | ||
1063 | smp_mb(); | |
1064 | curr = pkc->nxt_offset; | |
1065 | pkc->skb = skb; | |
e3192690 | 1066 | end = (char *)pbd + pkc->kblk_size; |
f6fb8f10 | 1067 | |
1068 | /* first try the current block */ | |
1069 | if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { | |
1070 | prb_fill_curr_block(curr, pkc, pbd, len); | |
1071 | return (void *)curr; | |
1072 | } | |
1073 | ||
1074 | /* Ok, close the current block */ | |
1075 | prb_retire_current_block(pkc, po, 0); | |
1076 | ||
1077 | /* Now, try to dispatch the next block */ | |
1078 | curr = (char *)prb_dispatch_next_block(pkc, po); | |
1079 | if (curr) { | |
1080 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | |
1081 | prb_fill_curr_block(curr, pkc, pbd, len); | |
1082 | return (void *)curr; | |
1083 | } | |
1084 | ||
1085 | /* | |
1086 | * No free blocks are available.user_space hasn't caught up yet. | |
1087 | * Queue was just frozen and now this packet will get dropped. | |
1088 | */ | |
1089 | return NULL; | |
1090 | } | |
1091 | ||
eea49cc9 | 1092 | static void *packet_current_rx_frame(struct packet_sock *po, |
f6fb8f10 | 1093 | struct sk_buff *skb, |
1094 | int status, unsigned int len) | |
1095 | { | |
1096 | char *curr = NULL; | |
1097 | switch (po->tp_version) { | |
1098 | case TPACKET_V1: | |
1099 | case TPACKET_V2: | |
1100 | curr = packet_lookup_frame(po, &po->rx_ring, | |
1101 | po->rx_ring.head, status); | |
1102 | return curr; | |
1103 | case TPACKET_V3: | |
1104 | return __packet_lookup_frame_in_block(po, skb, status, len); | |
1105 | default: | |
1106 | WARN(1, "TPACKET version not supported\n"); | |
1107 | BUG(); | |
99aa3473 | 1108 | return NULL; |
f6fb8f10 | 1109 | } |
1110 | } | |
1111 | ||
eea49cc9 | 1112 | static void *prb_lookup_block(struct packet_sock *po, |
f6fb8f10 | 1113 | struct packet_ring_buffer *rb, |
77f65ebd | 1114 | unsigned int idx, |
f6fb8f10 | 1115 | int status) |
1116 | { | |
bc59ba39 | 1117 | struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); |
77f65ebd | 1118 | struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); |
f6fb8f10 | 1119 | |
1120 | if (status != BLOCK_STATUS(pbd)) | |
1121 | return NULL; | |
1122 | return pbd; | |
1123 | } | |
1124 | ||
eea49cc9 | 1125 | static int prb_previous_blk_num(struct packet_ring_buffer *rb) |
f6fb8f10 | 1126 | { |
1127 | unsigned int prev; | |
1128 | if (rb->prb_bdqc.kactive_blk_num) | |
1129 | prev = rb->prb_bdqc.kactive_blk_num-1; | |
1130 | else | |
1131 | prev = rb->prb_bdqc.knum_blocks-1; | |
1132 | return prev; | |
1133 | } | |
1134 | ||
1135 | /* Assumes caller has held the rx_queue.lock */ | |
eea49cc9 | 1136 | static void *__prb_previous_block(struct packet_sock *po, |
f6fb8f10 | 1137 | struct packet_ring_buffer *rb, |
1138 | int status) | |
1139 | { | |
1140 | unsigned int previous = prb_previous_blk_num(rb); | |
1141 | return prb_lookup_block(po, rb, previous, status); | |
1142 | } | |
1143 | ||
eea49cc9 | 1144 | static void *packet_previous_rx_frame(struct packet_sock *po, |
f6fb8f10 | 1145 | struct packet_ring_buffer *rb, |
1146 | int status) | |
1147 | { | |
1148 | if (po->tp_version <= TPACKET_V2) | |
1149 | return packet_previous_frame(po, rb, status); | |
1150 | ||
1151 | return __prb_previous_block(po, rb, status); | |
1152 | } | |
1153 | ||
eea49cc9 | 1154 | static void packet_increment_rx_head(struct packet_sock *po, |
f6fb8f10 | 1155 | struct packet_ring_buffer *rb) |
1156 | { | |
1157 | switch (po->tp_version) { | |
1158 | case TPACKET_V1: | |
1159 | case TPACKET_V2: | |
1160 | return packet_increment_head(rb); | |
1161 | case TPACKET_V3: | |
1162 | default: | |
1163 | WARN(1, "TPACKET version not supported.\n"); | |
1164 | BUG(); | |
1165 | return; | |
1166 | } | |
1167 | } | |
1168 | ||
eea49cc9 | 1169 | static void *packet_previous_frame(struct packet_sock *po, |
69e3c75f JB |
1170 | struct packet_ring_buffer *rb, |
1171 | int status) | |
1172 | { | |
1173 | unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; | |
1174 | return packet_lookup_frame(po, rb, previous, status); | |
1175 | } | |
1176 | ||
eea49cc9 | 1177 | static void packet_increment_head(struct packet_ring_buffer *buff) |
69e3c75f JB |
1178 | { |
1179 | buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; | |
1180 | } | |
1181 | ||
b0138408 DB |
1182 | static void packet_inc_pending(struct packet_ring_buffer *rb) |
1183 | { | |
1184 | this_cpu_inc(*rb->pending_refcnt); | |
1185 | } | |
1186 | ||
1187 | static void packet_dec_pending(struct packet_ring_buffer *rb) | |
1188 | { | |
1189 | this_cpu_dec(*rb->pending_refcnt); | |
1190 | } | |
1191 | ||
1192 | static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) | |
1193 | { | |
1194 | unsigned int refcnt = 0; | |
1195 | int cpu; | |
1196 | ||
1197 | /* We don't use pending refcount in rx_ring. */ | |
1198 | if (rb->pending_refcnt == NULL) | |
1199 | return 0; | |
1200 | ||
1201 | for_each_possible_cpu(cpu) | |
1202 | refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); | |
1203 | ||
1204 | return refcnt; | |
1205 | } | |
1206 | ||
1207 | static int packet_alloc_pending(struct packet_sock *po) | |
1208 | { | |
1209 | po->rx_ring.pending_refcnt = NULL; | |
1210 | ||
1211 | po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); | |
1212 | if (unlikely(po->tx_ring.pending_refcnt == NULL)) | |
1213 | return -ENOBUFS; | |
1214 | ||
1215 | return 0; | |
1216 | } | |
1217 | ||
1218 | static void packet_free_pending(struct packet_sock *po) | |
1219 | { | |
1220 | free_percpu(po->tx_ring.pending_refcnt); | |
1221 | } | |
1222 | ||
77f65ebd WB |
1223 | static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) |
1224 | { | |
1225 | struct sock *sk = &po->sk; | |
1226 | bool has_room; | |
1227 | ||
1228 | if (po->prot_hook.func != tpacket_rcv) | |
1229 | return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize) | |
1230 | <= sk->sk_rcvbuf; | |
1231 | ||
1232 | spin_lock(&sk->sk_receive_queue.lock); | |
1233 | if (po->tp_version == TPACKET_V3) | |
1234 | has_room = prb_lookup_block(po, &po->rx_ring, | |
1235 | po->rx_ring.prb_bdqc.kactive_blk_num, | |
1236 | TP_STATUS_KERNEL); | |
1237 | else | |
1238 | has_room = packet_lookup_frame(po, &po->rx_ring, | |
1239 | po->rx_ring.head, | |
1240 | TP_STATUS_KERNEL); | |
1241 | spin_unlock(&sk->sk_receive_queue.lock); | |
1242 | ||
1243 | return has_room; | |
1244 | } | |
1245 | ||
1da177e4 LT |
1246 | static void packet_sock_destruct(struct sock *sk) |
1247 | { | |
ed85b565 RC |
1248 | skb_queue_purge(&sk->sk_error_queue); |
1249 | ||
547b792c IJ |
1250 | WARN_ON(atomic_read(&sk->sk_rmem_alloc)); |
1251 | WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | |
1da177e4 LT |
1252 | |
1253 | if (!sock_flag(sk, SOCK_DEAD)) { | |
40d4e3df | 1254 | pr_err("Attempt to release alive packet socket: %p\n", sk); |
1da177e4 LT |
1255 | return; |
1256 | } | |
1257 | ||
17ab56a2 | 1258 | sk_refcnt_debug_dec(sk); |
1da177e4 LT |
1259 | } |
1260 | ||
dc99f600 DM |
1261 | static int fanout_rr_next(struct packet_fanout *f, unsigned int num) |
1262 | { | |
1263 | int x = atomic_read(&f->rr_cur) + 1; | |
1264 | ||
1265 | if (x >= num) | |
1266 | x = 0; | |
1267 | ||
1268 | return x; | |
1269 | } | |
1270 | ||
77f65ebd WB |
1271 | static unsigned int fanout_demux_hash(struct packet_fanout *f, |
1272 | struct sk_buff *skb, | |
1273 | unsigned int num) | |
dc99f600 | 1274 | { |
61b905da | 1275 | return reciprocal_scale(skb_get_hash(skb), num); |
dc99f600 DM |
1276 | } |
1277 | ||
77f65ebd WB |
1278 | static unsigned int fanout_demux_lb(struct packet_fanout *f, |
1279 | struct sk_buff *skb, | |
1280 | unsigned int num) | |
dc99f600 DM |
1281 | { |
1282 | int cur, old; | |
1283 | ||
1284 | cur = atomic_read(&f->rr_cur); | |
1285 | while ((old = atomic_cmpxchg(&f->rr_cur, cur, | |
1286 | fanout_rr_next(f, num))) != cur) | |
1287 | cur = old; | |
77f65ebd WB |
1288 | return cur; |
1289 | } | |
1290 | ||
1291 | static unsigned int fanout_demux_cpu(struct packet_fanout *f, | |
1292 | struct sk_buff *skb, | |
1293 | unsigned int num) | |
1294 | { | |
1295 | return smp_processor_id() % num; | |
dc99f600 DM |
1296 | } |
1297 | ||
5df0ddfb DB |
1298 | static unsigned int fanout_demux_rnd(struct packet_fanout *f, |
1299 | struct sk_buff *skb, | |
1300 | unsigned int num) | |
1301 | { | |
f337db64 | 1302 | return prandom_u32_max(num); |
5df0ddfb DB |
1303 | } |
1304 | ||
77f65ebd WB |
1305 | static unsigned int fanout_demux_rollover(struct packet_fanout *f, |
1306 | struct sk_buff *skb, | |
1307 | unsigned int idx, unsigned int skip, | |
1308 | unsigned int num) | |
95ec3eb4 | 1309 | { |
77f65ebd | 1310 | unsigned int i, j; |
95ec3eb4 | 1311 | |
77f65ebd WB |
1312 | i = j = min_t(int, f->next[idx], num - 1); |
1313 | do { | |
1314 | if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) { | |
1315 | if (i != j) | |
1316 | f->next[idx] = i; | |
1317 | return i; | |
1318 | } | |
1319 | if (++i == num) | |
1320 | i = 0; | |
1321 | } while (i != j); | |
1322 | ||
1323 | return idx; | |
1324 | } | |
1325 | ||
2d36097d NH |
1326 | static unsigned int fanout_demux_qm(struct packet_fanout *f, |
1327 | struct sk_buff *skb, | |
1328 | unsigned int num) | |
1329 | { | |
1330 | return skb_get_queue_mapping(skb) % num; | |
1331 | } | |
1332 | ||
77f65ebd WB |
1333 | static bool fanout_has_flag(struct packet_fanout *f, u16 flag) |
1334 | { | |
1335 | return f->flags & (flag >> 8); | |
95ec3eb4 DM |
1336 | } |
1337 | ||
95ec3eb4 DM |
1338 | static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, |
1339 | struct packet_type *pt, struct net_device *orig_dev) | |
dc99f600 DM |
1340 | { |
1341 | struct packet_fanout *f = pt->af_packet_priv; | |
1342 | unsigned int num = f->num_members; | |
1343 | struct packet_sock *po; | |
77f65ebd | 1344 | unsigned int idx; |
dc99f600 DM |
1345 | |
1346 | if (!net_eq(dev_net(dev), read_pnet(&f->net)) || | |
1347 | !num) { | |
1348 | kfree_skb(skb); | |
1349 | return 0; | |
1350 | } | |
1351 | ||
95ec3eb4 DM |
1352 | switch (f->type) { |
1353 | case PACKET_FANOUT_HASH: | |
1354 | default: | |
77f65ebd | 1355 | if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { |
bc416d97 | 1356 | skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET); |
95ec3eb4 DM |
1357 | if (!skb) |
1358 | return 0; | |
1359 | } | |
77f65ebd | 1360 | idx = fanout_demux_hash(f, skb, num); |
95ec3eb4 DM |
1361 | break; |
1362 | case PACKET_FANOUT_LB: | |
77f65ebd | 1363 | idx = fanout_demux_lb(f, skb, num); |
95ec3eb4 DM |
1364 | break; |
1365 | case PACKET_FANOUT_CPU: | |
77f65ebd WB |
1366 | idx = fanout_demux_cpu(f, skb, num); |
1367 | break; | |
5df0ddfb DB |
1368 | case PACKET_FANOUT_RND: |
1369 | idx = fanout_demux_rnd(f, skb, num); | |
1370 | break; | |
2d36097d NH |
1371 | case PACKET_FANOUT_QM: |
1372 | idx = fanout_demux_qm(f, skb, num); | |
1373 | break; | |
77f65ebd WB |
1374 | case PACKET_FANOUT_ROLLOVER: |
1375 | idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num); | |
95ec3eb4 | 1376 | break; |
dc99f600 DM |
1377 | } |
1378 | ||
77f65ebd WB |
1379 | po = pkt_sk(f->arr[idx]); |
1380 | if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) && | |
1381 | unlikely(!packet_rcv_has_room(po, skb))) { | |
1382 | idx = fanout_demux_rollover(f, skb, idx, idx, num); | |
1383 | po = pkt_sk(f->arr[idx]); | |
1384 | } | |
dc99f600 DM |
1385 | |
1386 | return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); | |
1387 | } | |
1388 | ||
fff3321d PE |
1389 | DEFINE_MUTEX(fanout_mutex); |
1390 | EXPORT_SYMBOL_GPL(fanout_mutex); | |
dc99f600 DM |
1391 | static LIST_HEAD(fanout_list); |
1392 | ||
1393 | static void __fanout_link(struct sock *sk, struct packet_sock *po) | |
1394 | { | |
1395 | struct packet_fanout *f = po->fanout; | |
1396 | ||
1397 | spin_lock(&f->lock); | |
1398 | f->arr[f->num_members] = sk; | |
1399 | smp_wmb(); | |
1400 | f->num_members++; | |
1401 | spin_unlock(&f->lock); | |
1402 | } | |
1403 | ||
1404 | static void __fanout_unlink(struct sock *sk, struct packet_sock *po) | |
1405 | { | |
1406 | struct packet_fanout *f = po->fanout; | |
1407 | int i; | |
1408 | ||
1409 | spin_lock(&f->lock); | |
1410 | for (i = 0; i < f->num_members; i++) { | |
1411 | if (f->arr[i] == sk) | |
1412 | break; | |
1413 | } | |
1414 | BUG_ON(i >= f->num_members); | |
1415 | f->arr[i] = f->arr[f->num_members - 1]; | |
1416 | f->num_members--; | |
1417 | spin_unlock(&f->lock); | |
1418 | } | |
1419 | ||
d4dd8aee | 1420 | static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) |
c0de08d0 | 1421 | { |
d4dd8aee | 1422 | if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout) |
c0de08d0 EL |
1423 | return true; |
1424 | ||
1425 | return false; | |
1426 | } | |
1427 | ||
7736d33f | 1428 | static int fanout_add(struct sock *sk, u16 id, u16 type_flags) |
dc99f600 DM |
1429 | { |
1430 | struct packet_sock *po = pkt_sk(sk); | |
1431 | struct packet_fanout *f, *match; | |
7736d33f | 1432 | u8 type = type_flags & 0xff; |
77f65ebd | 1433 | u8 flags = type_flags >> 8; |
dc99f600 DM |
1434 | int err; |
1435 | ||
1436 | switch (type) { | |
77f65ebd WB |
1437 | case PACKET_FANOUT_ROLLOVER: |
1438 | if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) | |
1439 | return -EINVAL; | |
dc99f600 DM |
1440 | case PACKET_FANOUT_HASH: |
1441 | case PACKET_FANOUT_LB: | |
95ec3eb4 | 1442 | case PACKET_FANOUT_CPU: |
5df0ddfb | 1443 | case PACKET_FANOUT_RND: |
2d36097d | 1444 | case PACKET_FANOUT_QM: |
dc99f600 DM |
1445 | break; |
1446 | default: | |
1447 | return -EINVAL; | |
1448 | } | |
1449 | ||
1450 | if (!po->running) | |
1451 | return -EINVAL; | |
1452 | ||
1453 | if (po->fanout) | |
1454 | return -EALREADY; | |
1455 | ||
1456 | mutex_lock(&fanout_mutex); | |
1457 | match = NULL; | |
1458 | list_for_each_entry(f, &fanout_list, list) { | |
1459 | if (f->id == id && | |
1460 | read_pnet(&f->net) == sock_net(sk)) { | |
1461 | match = f; | |
1462 | break; | |
1463 | } | |
1464 | } | |
afe62c68 | 1465 | err = -EINVAL; |
77f65ebd | 1466 | if (match && match->flags != flags) |
afe62c68 | 1467 | goto out; |
dc99f600 | 1468 | if (!match) { |
afe62c68 | 1469 | err = -ENOMEM; |
dc99f600 | 1470 | match = kzalloc(sizeof(*match), GFP_KERNEL); |
afe62c68 ED |
1471 | if (!match) |
1472 | goto out; | |
1473 | write_pnet(&match->net, sock_net(sk)); | |
1474 | match->id = id; | |
1475 | match->type = type; | |
77f65ebd | 1476 | match->flags = flags; |
afe62c68 ED |
1477 | atomic_set(&match->rr_cur, 0); |
1478 | INIT_LIST_HEAD(&match->list); | |
1479 | spin_lock_init(&match->lock); | |
1480 | atomic_set(&match->sk_ref, 0); | |
1481 | match->prot_hook.type = po->prot_hook.type; | |
1482 | match->prot_hook.dev = po->prot_hook.dev; | |
1483 | match->prot_hook.func = packet_rcv_fanout; | |
1484 | match->prot_hook.af_packet_priv = match; | |
c0de08d0 | 1485 | match->prot_hook.id_match = match_fanout_group; |
afe62c68 ED |
1486 | dev_add_pack(&match->prot_hook); |
1487 | list_add(&match->list, &fanout_list); | |
dc99f600 | 1488 | } |
afe62c68 ED |
1489 | err = -EINVAL; |
1490 | if (match->type == type && | |
1491 | match->prot_hook.type == po->prot_hook.type && | |
1492 | match->prot_hook.dev == po->prot_hook.dev) { | |
1493 | err = -ENOSPC; | |
1494 | if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) { | |
1495 | __dev_remove_pack(&po->prot_hook); | |
1496 | po->fanout = match; | |
1497 | atomic_inc(&match->sk_ref); | |
1498 | __fanout_link(sk, po); | |
1499 | err = 0; | |
dc99f600 DM |
1500 | } |
1501 | } | |
afe62c68 | 1502 | out: |
dc99f600 DM |
1503 | mutex_unlock(&fanout_mutex); |
1504 | return err; | |
1505 | } | |
1506 | ||
1507 | static void fanout_release(struct sock *sk) | |
1508 | { | |
1509 | struct packet_sock *po = pkt_sk(sk); | |
1510 | struct packet_fanout *f; | |
1511 | ||
1512 | f = po->fanout; | |
1513 | if (!f) | |
1514 | return; | |
1515 | ||
fff3321d | 1516 | mutex_lock(&fanout_mutex); |
dc99f600 DM |
1517 | po->fanout = NULL; |
1518 | ||
dc99f600 DM |
1519 | if (atomic_dec_and_test(&f->sk_ref)) { |
1520 | list_del(&f->list); | |
1521 | dev_remove_pack(&f->prot_hook); | |
1522 | kfree(f); | |
1523 | } | |
1524 | mutex_unlock(&fanout_mutex); | |
1525 | } | |
1da177e4 | 1526 | |
90ddc4f0 | 1527 | static const struct proto_ops packet_ops; |
1da177e4 | 1528 | |
90ddc4f0 | 1529 | static const struct proto_ops packet_ops_spkt; |
1da177e4 | 1530 | |
40d4e3df ED |
1531 | static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, |
1532 | struct packet_type *pt, struct net_device *orig_dev) | |
1da177e4 LT |
1533 | { |
1534 | struct sock *sk; | |
1535 | struct sockaddr_pkt *spkt; | |
1536 | ||
1537 | /* | |
1538 | * When we registered the protocol we saved the socket in the data | |
1539 | * field for just this event. | |
1540 | */ | |
1541 | ||
1542 | sk = pt->af_packet_priv; | |
1ce4f28b | 1543 | |
1da177e4 LT |
1544 | /* |
1545 | * Yank back the headers [hope the device set this | |
1546 | * right or kerboom...] | |
1547 | * | |
1548 | * Incoming packets have ll header pulled, | |
1549 | * push it back. | |
1550 | * | |
98e399f8 | 1551 | * For outgoing ones skb->data == skb_mac_header(skb) |
1da177e4 LT |
1552 | * so that this procedure is noop. |
1553 | */ | |
1554 | ||
1555 | if (skb->pkt_type == PACKET_LOOPBACK) | |
1556 | goto out; | |
1557 | ||
09ad9bc7 | 1558 | if (!net_eq(dev_net(dev), sock_net(sk))) |
d12d01d6 DL |
1559 | goto out; |
1560 | ||
40d4e3df ED |
1561 | skb = skb_share_check(skb, GFP_ATOMIC); |
1562 | if (skb == NULL) | |
1da177e4 LT |
1563 | goto oom; |
1564 | ||
1565 | /* drop any routing info */ | |
adf30907 | 1566 | skb_dst_drop(skb); |
1da177e4 | 1567 | |
84531c24 PO |
1568 | /* drop conntrack reference */ |
1569 | nf_reset(skb); | |
1570 | ||
ffbc6111 | 1571 | spkt = &PACKET_SKB_CB(skb)->sa.pkt; |
1da177e4 | 1572 | |
98e399f8 | 1573 | skb_push(skb, skb->data - skb_mac_header(skb)); |
1da177e4 LT |
1574 | |
1575 | /* | |
1576 | * The SOCK_PACKET socket receives _all_ frames. | |
1577 | */ | |
1578 | ||
1579 | spkt->spkt_family = dev->type; | |
1580 | strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); | |
1581 | spkt->spkt_protocol = skb->protocol; | |
1582 | ||
1583 | /* | |
1584 | * Charge the memory to the socket. This is done specifically | |
1585 | * to prevent sockets using all the memory up. | |
1586 | */ | |
1587 | ||
40d4e3df | 1588 | if (sock_queue_rcv_skb(sk, skb) == 0) |
1da177e4 LT |
1589 | return 0; |
1590 | ||
1591 | out: | |
1592 | kfree_skb(skb); | |
1593 | oom: | |
1594 | return 0; | |
1595 | } | |
1596 | ||
1597 | ||
1598 | /* | |
1599 | * Output a raw packet to a device layer. This bypasses all the other | |
1600 | * protocol layers and you must therefore supply it with a complete frame | |
1601 | */ | |
1ce4f28b | 1602 | |
1da177e4 LT |
1603 | static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, |
1604 | struct msghdr *msg, size_t len) | |
1605 | { | |
1606 | struct sock *sk = sock->sk; | |
342dfc30 | 1607 | DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); |
1a35ca80 | 1608 | struct sk_buff *skb = NULL; |
1da177e4 | 1609 | struct net_device *dev; |
40d4e3df | 1610 | __be16 proto = 0; |
1da177e4 | 1611 | int err; |
3bdc0eba | 1612 | int extra_len = 0; |
1ce4f28b | 1613 | |
1da177e4 | 1614 | /* |
1ce4f28b | 1615 | * Get and verify the address. |
1da177e4 LT |
1616 | */ |
1617 | ||
40d4e3df | 1618 | if (saddr) { |
1da177e4 | 1619 | if (msg->msg_namelen < sizeof(struct sockaddr)) |
40d4e3df ED |
1620 | return -EINVAL; |
1621 | if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) | |
1622 | proto = saddr->spkt_protocol; | |
1623 | } else | |
1624 | return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ | |
1da177e4 LT |
1625 | |
1626 | /* | |
1ce4f28b | 1627 | * Find the device first to size check it |
1da177e4 LT |
1628 | */ |
1629 | ||
de74e92a | 1630 | saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; |
1a35ca80 | 1631 | retry: |
654d1f8a ED |
1632 | rcu_read_lock(); |
1633 | dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); | |
1da177e4 LT |
1634 | err = -ENODEV; |
1635 | if (dev == NULL) | |
1636 | goto out_unlock; | |
1ce4f28b | 1637 | |
d5e76b0a DM |
1638 | err = -ENETDOWN; |
1639 | if (!(dev->flags & IFF_UP)) | |
1640 | goto out_unlock; | |
1641 | ||
1da177e4 | 1642 | /* |
40d4e3df ED |
1643 | * You may not queue a frame bigger than the mtu. This is the lowest level |
1644 | * raw protocol and you must do your own fragmentation at this level. | |
1da177e4 | 1645 | */ |
1ce4f28b | 1646 | |
3bdc0eba BG |
1647 | if (unlikely(sock_flag(sk, SOCK_NOFCS))) { |
1648 | if (!netif_supports_nofcs(dev)) { | |
1649 | err = -EPROTONOSUPPORT; | |
1650 | goto out_unlock; | |
1651 | } | |
1652 | extra_len = 4; /* We're doing our own CRC */ | |
1653 | } | |
1654 | ||
1da177e4 | 1655 | err = -EMSGSIZE; |
3bdc0eba | 1656 | if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) |
1da177e4 LT |
1657 | goto out_unlock; |
1658 | ||
1a35ca80 ED |
1659 | if (!skb) { |
1660 | size_t reserved = LL_RESERVED_SPACE(dev); | |
4ce40912 | 1661 | int tlen = dev->needed_tailroom; |
1a35ca80 ED |
1662 | unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; |
1663 | ||
1664 | rcu_read_unlock(); | |
4ce40912 | 1665 | skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); |
1a35ca80 ED |
1666 | if (skb == NULL) |
1667 | return -ENOBUFS; | |
1668 | /* FIXME: Save some space for broken drivers that write a hard | |
1669 | * header at transmission time by themselves. PPP is the notable | |
1670 | * one here. This should really be fixed at the driver level. | |
1671 | */ | |
1672 | skb_reserve(skb, reserved); | |
1673 | skb_reset_network_header(skb); | |
1674 | ||
1675 | /* Try to align data part correctly */ | |
1676 | if (hhlen) { | |
1677 | skb->data -= hhlen; | |
1678 | skb->tail -= hhlen; | |
1679 | if (len < hhlen) | |
1680 | skb_reset_network_header(skb); | |
1681 | } | |
6ce8e9ce | 1682 | err = memcpy_from_msg(skb_put(skb, len), msg, len); |
1a35ca80 ED |
1683 | if (err) |
1684 | goto out_free; | |
1685 | goto retry; | |
1da177e4 LT |
1686 | } |
1687 | ||
3bdc0eba | 1688 | if (len > (dev->mtu + dev->hard_header_len + extra_len)) { |
57f89bfa BG |
1689 | /* Earlier code assumed this would be a VLAN pkt, |
1690 | * double-check this now that we have the actual | |
1691 | * packet in hand. | |
1692 | */ | |
1693 | struct ethhdr *ehdr; | |
1694 | skb_reset_mac_header(skb); | |
1695 | ehdr = eth_hdr(skb); | |
1696 | if (ehdr->h_proto != htons(ETH_P_8021Q)) { | |
1697 | err = -EMSGSIZE; | |
1698 | goto out_unlock; | |
1699 | } | |
1700 | } | |
1a35ca80 | 1701 | |
1da177e4 LT |
1702 | skb->protocol = proto; |
1703 | skb->dev = dev; | |
1704 | skb->priority = sk->sk_priority; | |
2d37a186 | 1705 | skb->mark = sk->sk_mark; |
bf84a010 DB |
1706 | |
1707 | sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); | |
1da177e4 | 1708 | |
3bdc0eba BG |
1709 | if (unlikely(extra_len == 4)) |
1710 | skb->no_fcs = 1; | |
1711 | ||
40893fd0 | 1712 | skb_probe_transport_header(skb, 0); |
c1aad275 | 1713 | |
1da177e4 | 1714 | dev_queue_xmit(skb); |
654d1f8a | 1715 | rcu_read_unlock(); |
40d4e3df | 1716 | return len; |
1da177e4 | 1717 | |
1da177e4 | 1718 | out_unlock: |
654d1f8a | 1719 | rcu_read_unlock(); |
1a35ca80 ED |
1720 | out_free: |
1721 | kfree_skb(skb); | |
1da177e4 LT |
1722 | return err; |
1723 | } | |
1da177e4 | 1724 | |
eea49cc9 | 1725 | static unsigned int run_filter(const struct sk_buff *skb, |
62ab0812 | 1726 | const struct sock *sk, |
dbcb5855 | 1727 | unsigned int res) |
1da177e4 LT |
1728 | { |
1729 | struct sk_filter *filter; | |
fda9ef5d | 1730 | |
80f8f102 ED |
1731 | rcu_read_lock(); |
1732 | filter = rcu_dereference(sk->sk_filter); | |
dbcb5855 | 1733 | if (filter != NULL) |
0a14842f | 1734 | res = SK_RUN_FILTER(filter, skb); |
80f8f102 | 1735 | rcu_read_unlock(); |
1da177e4 | 1736 | |
dbcb5855 | 1737 | return res; |
1da177e4 LT |
1738 | } |
1739 | ||
1740 | /* | |
62ab0812 ED |
1741 | * This function makes lazy skb cloning in hope that most of packets |
1742 | * are discarded by BPF. | |
1743 | * | |
1744 | * Note tricky part: we DO mangle shared skb! skb->data, skb->len | |
1745 | * and skb->cb are mangled. It works because (and until) packets | |
1746 | * falling here are owned by current CPU. Output packets are cloned | |
1747 | * by dev_queue_xmit_nit(), input packets are processed by net_bh | |
1748 | * sequencially, so that if we return skb to original state on exit, | |
1749 | * we will not harm anyone. | |
1da177e4 LT |
1750 | */ |
1751 | ||
40d4e3df ED |
1752 | static int packet_rcv(struct sk_buff *skb, struct net_device *dev, |
1753 | struct packet_type *pt, struct net_device *orig_dev) | |
1da177e4 LT |
1754 | { |
1755 | struct sock *sk; | |
1756 | struct sockaddr_ll *sll; | |
1757 | struct packet_sock *po; | |
40d4e3df | 1758 | u8 *skb_head = skb->data; |
1da177e4 | 1759 | int skb_len = skb->len; |
dbcb5855 | 1760 | unsigned int snaplen, res; |
1da177e4 LT |
1761 | |
1762 | if (skb->pkt_type == PACKET_LOOPBACK) | |
1763 | goto drop; | |
1764 | ||
1765 | sk = pt->af_packet_priv; | |
1766 | po = pkt_sk(sk); | |
1767 | ||
09ad9bc7 | 1768 | if (!net_eq(dev_net(dev), sock_net(sk))) |
d12d01d6 DL |
1769 | goto drop; |
1770 | ||
1da177e4 LT |
1771 | skb->dev = dev; |
1772 | ||
3b04ddde | 1773 | if (dev->header_ops) { |
1da177e4 | 1774 | /* The device has an explicit notion of ll header, |
62ab0812 ED |
1775 | * exported to higher levels. |
1776 | * | |
1777 | * Otherwise, the device hides details of its frame | |
1778 | * structure, so that corresponding packet head is | |
1779 | * never delivered to user. | |
1da177e4 LT |
1780 | */ |
1781 | if (sk->sk_type != SOCK_DGRAM) | |
98e399f8 | 1782 | skb_push(skb, skb->data - skb_mac_header(skb)); |
1da177e4 LT |
1783 | else if (skb->pkt_type == PACKET_OUTGOING) { |
1784 | /* Special case: outgoing packets have ll header at head */ | |
bbe735e4 | 1785 | skb_pull(skb, skb_network_offset(skb)); |
1da177e4 LT |
1786 | } |
1787 | } | |
1788 | ||
1789 | snaplen = skb->len; | |
1790 | ||
dbcb5855 DM |
1791 | res = run_filter(skb, sk, snaplen); |
1792 | if (!res) | |
fda9ef5d | 1793 | goto drop_n_restore; |
dbcb5855 DM |
1794 | if (snaplen > res) |
1795 | snaplen = res; | |
1da177e4 | 1796 | |
0fd7bac6 | 1797 | if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) |
1da177e4 LT |
1798 | goto drop_n_acct; |
1799 | ||
1800 | if (skb_shared(skb)) { | |
1801 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); | |
1802 | if (nskb == NULL) | |
1803 | goto drop_n_acct; | |
1804 | ||
1805 | if (skb_head != skb->data) { | |
1806 | skb->data = skb_head; | |
1807 | skb->len = skb_len; | |
1808 | } | |
abc4e4fa | 1809 | consume_skb(skb); |
1da177e4 LT |
1810 | skb = nskb; |
1811 | } | |
1812 | ||
ffbc6111 HX |
1813 | BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 > |
1814 | sizeof(skb->cb)); | |
1815 | ||
1816 | sll = &PACKET_SKB_CB(skb)->sa.ll; | |
1da177e4 LT |
1817 | sll->sll_family = AF_PACKET; |
1818 | sll->sll_hatype = dev->type; | |
1819 | sll->sll_protocol = skb->protocol; | |
1820 | sll->sll_pkttype = skb->pkt_type; | |
8032b464 | 1821 | if (unlikely(po->origdev)) |
80feaacb PWJ |
1822 | sll->sll_ifindex = orig_dev->ifindex; |
1823 | else | |
1824 | sll->sll_ifindex = dev->ifindex; | |
1da177e4 | 1825 | |
b95cce35 | 1826 | sll->sll_halen = dev_parse_header(skb, sll->sll_addr); |
1da177e4 | 1827 | |
ffbc6111 | 1828 | PACKET_SKB_CB(skb)->origlen = skb->len; |
8dc41944 | 1829 | |
1da177e4 LT |
1830 | if (pskb_trim(skb, snaplen)) |
1831 | goto drop_n_acct; | |
1832 | ||
1833 | skb_set_owner_r(skb, sk); | |
1834 | skb->dev = NULL; | |
adf30907 | 1835 | skb_dst_drop(skb); |
1da177e4 | 1836 | |
84531c24 PO |
1837 | /* drop conntrack reference */ |
1838 | nf_reset(skb); | |
1839 | ||
1da177e4 | 1840 | spin_lock(&sk->sk_receive_queue.lock); |
ee80fbf3 | 1841 | po->stats.stats1.tp_packets++; |
3b885787 | 1842 | skb->dropcount = atomic_read(&sk->sk_drops); |
1da177e4 LT |
1843 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
1844 | spin_unlock(&sk->sk_receive_queue.lock); | |
676d2369 | 1845 | sk->sk_data_ready(sk); |
1da177e4 LT |
1846 | return 0; |
1847 | ||
1848 | drop_n_acct: | |
7091fbd8 | 1849 | spin_lock(&sk->sk_receive_queue.lock); |
ee80fbf3 | 1850 | po->stats.stats1.tp_drops++; |
7091fbd8 WB |
1851 | atomic_inc(&sk->sk_drops); |
1852 | spin_unlock(&sk->sk_receive_queue.lock); | |
1da177e4 LT |
1853 | |
1854 | drop_n_restore: | |
1855 | if (skb_head != skb->data && skb_shared(skb)) { | |
1856 | skb->data = skb_head; | |
1857 | skb->len = skb_len; | |
1858 | } | |
1859 | drop: | |
ead2ceb0 | 1860 | consume_skb(skb); |
1da177e4 LT |
1861 | return 0; |
1862 | } | |
1863 | ||
40d4e3df ED |
1864 | static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, |
1865 | struct packet_type *pt, struct net_device *orig_dev) | |
1da177e4 LT |
1866 | { |
1867 | struct sock *sk; | |
1868 | struct packet_sock *po; | |
1869 | struct sockaddr_ll *sll; | |
184f489e | 1870 | union tpacket_uhdr h; |
40d4e3df | 1871 | u8 *skb_head = skb->data; |
1da177e4 | 1872 | int skb_len = skb->len; |
dbcb5855 | 1873 | unsigned int snaplen, res; |
f6fb8f10 | 1874 | unsigned long status = TP_STATUS_USER; |
bbd6ef87 | 1875 | unsigned short macoff, netoff, hdrlen; |
1da177e4 | 1876 | struct sk_buff *copy_skb = NULL; |
bbd6ef87 | 1877 | struct timespec ts; |
b9c32fb2 | 1878 | __u32 ts_status; |
1da177e4 | 1879 | |
51846355 AW |
1880 | /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. |
1881 | * We may add members to them until current aligned size without forcing | |
1882 | * userspace to call getsockopt(..., PACKET_HDRLEN, ...). | |
1883 | */ | |
1884 | BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); | |
1885 | BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); | |
1886 | ||
1da177e4 LT |
1887 | if (skb->pkt_type == PACKET_LOOPBACK) |
1888 | goto drop; | |
1889 | ||
1890 | sk = pt->af_packet_priv; | |
1891 | po = pkt_sk(sk); | |
1892 | ||
09ad9bc7 | 1893 | if (!net_eq(dev_net(dev), sock_net(sk))) |
d12d01d6 DL |
1894 | goto drop; |
1895 | ||
3b04ddde | 1896 | if (dev->header_ops) { |
1da177e4 | 1897 | if (sk->sk_type != SOCK_DGRAM) |
98e399f8 | 1898 | skb_push(skb, skb->data - skb_mac_header(skb)); |
1da177e4 LT |
1899 | else if (skb->pkt_type == PACKET_OUTGOING) { |
1900 | /* Special case: outgoing packets have ll header at head */ | |
bbe735e4 | 1901 | skb_pull(skb, skb_network_offset(skb)); |
1da177e4 LT |
1902 | } |
1903 | } | |
1904 | ||
8dc41944 HX |
1905 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
1906 | status |= TP_STATUS_CSUMNOTREADY; | |
1907 | ||
1da177e4 LT |
1908 | snaplen = skb->len; |
1909 | ||
dbcb5855 DM |
1910 | res = run_filter(skb, sk, snaplen); |
1911 | if (!res) | |
fda9ef5d | 1912 | goto drop_n_restore; |
dbcb5855 DM |
1913 | if (snaplen > res) |
1914 | snaplen = res; | |
1da177e4 LT |
1915 | |
1916 | if (sk->sk_type == SOCK_DGRAM) { | |
8913336a PM |
1917 | macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + |
1918 | po->tp_reserve; | |
1da177e4 | 1919 | } else { |
95c96174 | 1920 | unsigned int maclen = skb_network_offset(skb); |
bbd6ef87 | 1921 | netoff = TPACKET_ALIGN(po->tp_hdrlen + |
8913336a PM |
1922 | (maclen < 16 ? 16 : maclen)) + |
1923 | po->tp_reserve; | |
1da177e4 LT |
1924 | macoff = netoff - maclen; |
1925 | } | |
f6fb8f10 | 1926 | if (po->tp_version <= TPACKET_V2) { |
1927 | if (macoff + snaplen > po->rx_ring.frame_size) { | |
1928 | if (po->copy_thresh && | |
0fd7bac6 | 1929 | atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { |
f6fb8f10 | 1930 | if (skb_shared(skb)) { |
1931 | copy_skb = skb_clone(skb, GFP_ATOMIC); | |
1932 | } else { | |
1933 | copy_skb = skb_get(skb); | |
1934 | skb_head = skb->data; | |
1935 | } | |
1936 | if (copy_skb) | |
1937 | skb_set_owner_r(copy_skb, sk); | |
1da177e4 | 1938 | } |
f6fb8f10 | 1939 | snaplen = po->rx_ring.frame_size - macoff; |
1940 | if ((int)snaplen < 0) | |
1941 | snaplen = 0; | |
1da177e4 | 1942 | } |
dc808110 ED |
1943 | } else if (unlikely(macoff + snaplen > |
1944 | GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { | |
1945 | u32 nval; | |
1946 | ||
1947 | nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; | |
1948 | pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", | |
1949 | snaplen, nval, macoff); | |
1950 | snaplen = nval; | |
1951 | if (unlikely((int)snaplen < 0)) { | |
1952 | snaplen = 0; | |
1953 | macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; | |
1954 | } | |
1da177e4 | 1955 | } |
1da177e4 | 1956 | spin_lock(&sk->sk_receive_queue.lock); |
f6fb8f10 | 1957 | h.raw = packet_current_rx_frame(po, skb, |
1958 | TP_STATUS_KERNEL, (macoff+snaplen)); | |
bbd6ef87 | 1959 | if (!h.raw) |
1da177e4 | 1960 | goto ring_is_full; |
f6fb8f10 | 1961 | if (po->tp_version <= TPACKET_V2) { |
1962 | packet_increment_rx_head(po, &po->rx_ring); | |
1963 | /* | |
1964 | * LOSING will be reported till you read the stats, | |
1965 | * because it's COR - Clear On Read. | |
1966 | * Anyways, moving it for V1/V2 only as V3 doesn't need this | |
1967 | * at packet level. | |
1968 | */ | |
ee80fbf3 | 1969 | if (po->stats.stats1.tp_drops) |
f6fb8f10 | 1970 | status |= TP_STATUS_LOSING; |
1971 | } | |
ee80fbf3 | 1972 | po->stats.stats1.tp_packets++; |
1da177e4 LT |
1973 | if (copy_skb) { |
1974 | status |= TP_STATUS_COPY; | |
1975 | __skb_queue_tail(&sk->sk_receive_queue, copy_skb); | |
1976 | } | |
1da177e4 LT |
1977 | spin_unlock(&sk->sk_receive_queue.lock); |
1978 | ||
bbd6ef87 | 1979 | skb_copy_bits(skb, 0, h.raw + macoff, snaplen); |
b9c32fb2 DB |
1980 | |
1981 | if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) | |
7a51384c | 1982 | getnstimeofday(&ts); |
1da177e4 | 1983 | |
b9c32fb2 DB |
1984 | status |= ts_status; |
1985 | ||
bbd6ef87 PM |
1986 | switch (po->tp_version) { |
1987 | case TPACKET_V1: | |
1988 | h.h1->tp_len = skb->len; | |
1989 | h.h1->tp_snaplen = snaplen; | |
1990 | h.h1->tp_mac = macoff; | |
1991 | h.h1->tp_net = netoff; | |
4b457bdf DB |
1992 | h.h1->tp_sec = ts.tv_sec; |
1993 | h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; | |
bbd6ef87 PM |
1994 | hdrlen = sizeof(*h.h1); |
1995 | break; | |
1996 | case TPACKET_V2: | |
1997 | h.h2->tp_len = skb->len; | |
1998 | h.h2->tp_snaplen = snaplen; | |
1999 | h.h2->tp_mac = macoff; | |
2000 | h.h2->tp_net = netoff; | |
bbd6ef87 PM |
2001 | h.h2->tp_sec = ts.tv_sec; |
2002 | h.h2->tp_nsec = ts.tv_nsec; | |
a3bcc23e BG |
2003 | if (vlan_tx_tag_present(skb)) { |
2004 | h.h2->tp_vlan_tci = vlan_tx_tag_get(skb); | |
a0cdfcf3 AW |
2005 | h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); |
2006 | status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; | |
a3bcc23e BG |
2007 | } else { |
2008 | h.h2->tp_vlan_tci = 0; | |
a0cdfcf3 | 2009 | h.h2->tp_vlan_tpid = 0; |
a3bcc23e | 2010 | } |
e4d26f4b | 2011 | memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); |
bbd6ef87 PM |
2012 | hdrlen = sizeof(*h.h2); |
2013 | break; | |
f6fb8f10 | 2014 | case TPACKET_V3: |
2015 | /* tp_nxt_offset,vlan are already populated above. | |
2016 | * So DONT clear those fields here | |
2017 | */ | |
2018 | h.h3->tp_status |= status; | |
2019 | h.h3->tp_len = skb->len; | |
2020 | h.h3->tp_snaplen = snaplen; | |
2021 | h.h3->tp_mac = macoff; | |
2022 | h.h3->tp_net = netoff; | |
f6fb8f10 | 2023 | h.h3->tp_sec = ts.tv_sec; |
2024 | h.h3->tp_nsec = ts.tv_nsec; | |
e4d26f4b | 2025 | memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); |
f6fb8f10 | 2026 | hdrlen = sizeof(*h.h3); |
2027 | break; | |
bbd6ef87 PM |
2028 | default: |
2029 | BUG(); | |
2030 | } | |
1da177e4 | 2031 | |
bbd6ef87 | 2032 | sll = h.raw + TPACKET_ALIGN(hdrlen); |
b95cce35 | 2033 | sll->sll_halen = dev_parse_header(skb, sll->sll_addr); |
1da177e4 LT |
2034 | sll->sll_family = AF_PACKET; |
2035 | sll->sll_hatype = dev->type; | |
2036 | sll->sll_protocol = skb->protocol; | |
2037 | sll->sll_pkttype = skb->pkt_type; | |
8032b464 | 2038 | if (unlikely(po->origdev)) |
80feaacb PWJ |
2039 | sll->sll_ifindex = orig_dev->ifindex; |
2040 | else | |
2041 | sll->sll_ifindex = dev->ifindex; | |
1da177e4 | 2042 | |
e16aa207 | 2043 | smp_mb(); |
f0d4eb29 | 2044 | |
f6dafa95 | 2045 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 |
f0d4eb29 | 2046 | if (po->tp_version <= TPACKET_V2) { |
0af55bb5 CG |
2047 | u8 *start, *end; |
2048 | ||
f0d4eb29 DB |
2049 | end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + |
2050 | macoff + snaplen); | |
2051 | ||
2052 | for (start = h.raw; start < end; start += PAGE_SIZE) | |
2053 | flush_dcache_page(pgv_to_page(start)); | |
1da177e4 | 2054 | } |
f0d4eb29 | 2055 | smp_wmb(); |
f6dafa95 | 2056 | #endif |
f0d4eb29 | 2057 | |
da413eec | 2058 | if (po->tp_version <= TPACKET_V2) { |
f6fb8f10 | 2059 | __packet_set_status(po, h.raw, status); |
da413eec DC |
2060 | sk->sk_data_ready(sk); |
2061 | } else { | |
f6fb8f10 | 2062 | prb_clear_blk_fill_status(&po->rx_ring); |
da413eec | 2063 | } |
1da177e4 LT |
2064 | |
2065 | drop_n_restore: | |
2066 | if (skb_head != skb->data && skb_shared(skb)) { | |
2067 | skb->data = skb_head; | |
2068 | skb->len = skb_len; | |
2069 | } | |
2070 | drop: | |
1ce4f28b | 2071 | kfree_skb(skb); |
1da177e4 LT |
2072 | return 0; |
2073 | ||
2074 | ring_is_full: | |
ee80fbf3 | 2075 | po->stats.stats1.tp_drops++; |
1da177e4 LT |
2076 | spin_unlock(&sk->sk_receive_queue.lock); |
2077 | ||
676d2369 | 2078 | sk->sk_data_ready(sk); |
acb5d75b | 2079 | kfree_skb(copy_skb); |
1da177e4 LT |
2080 | goto drop_n_restore; |
2081 | } | |
2082 | ||
69e3c75f JB |
2083 | static void tpacket_destruct_skb(struct sk_buff *skb) |
2084 | { | |
2085 | struct packet_sock *po = pkt_sk(skb->sk); | |
1da177e4 | 2086 | |
69e3c75f | 2087 | if (likely(po->tx_ring.pg_vec)) { |
f0d4eb29 | 2088 | void *ph; |
b9c32fb2 DB |
2089 | __u32 ts; |
2090 | ||
69e3c75f | 2091 | ph = skb_shinfo(skb)->destructor_arg; |
b0138408 | 2092 | packet_dec_pending(&po->tx_ring); |
b9c32fb2 DB |
2093 | |
2094 | ts = __packet_set_timestamp(po, ph, skb); | |
2095 | __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); | |
69e3c75f JB |
2096 | } |
2097 | ||
2098 | sock_wfree(skb); | |
2099 | } | |
2100 | ||
9c707762 WB |
2101 | static bool ll_header_truncated(const struct net_device *dev, int len) |
2102 | { | |
2103 | /* net device doesn't like empty head */ | |
2104 | if (unlikely(len <= dev->hard_header_len)) { | |
2105 | net_warn_ratelimited("%s: packet size is too short (%d < %d)\n", | |
2106 | current->comm, len, dev->hard_header_len); | |
2107 | return true; | |
2108 | } | |
2109 | ||
2110 | return false; | |
2111 | } | |
2112 | ||
40d4e3df ED |
2113 | static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, |
2114 | void *frame, struct net_device *dev, int size_max, | |
ae641949 | 2115 | __be16 proto, unsigned char *addr, int hlen) |
69e3c75f | 2116 | { |
184f489e | 2117 | union tpacket_uhdr ph; |
09effa67 | 2118 | int to_write, offset, len, tp_len, nr_frags, len_max; |
69e3c75f JB |
2119 | struct socket *sock = po->sk.sk_socket; |
2120 | struct page *page; | |
2121 | void *data; | |
2122 | int err; | |
2123 | ||
2124 | ph.raw = frame; | |
2125 | ||
2126 | skb->protocol = proto; | |
2127 | skb->dev = dev; | |
2128 | skb->priority = po->sk.sk_priority; | |
2d37a186 | 2129 | skb->mark = po->sk.sk_mark; |
2e31396f | 2130 | sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags); |
69e3c75f JB |
2131 | skb_shinfo(skb)->destructor_arg = ph.raw; |
2132 | ||
2133 | switch (po->tp_version) { | |
2134 | case TPACKET_V2: | |
2135 | tp_len = ph.h2->tp_len; | |
2136 | break; | |
2137 | default: | |
2138 | tp_len = ph.h1->tp_len; | |
2139 | break; | |
2140 | } | |
09effa67 DM |
2141 | if (unlikely(tp_len > size_max)) { |
2142 | pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); | |
2143 | return -EMSGSIZE; | |
2144 | } | |
69e3c75f | 2145 | |
ae641949 | 2146 | skb_reserve(skb, hlen); |
69e3c75f | 2147 | skb_reset_network_header(skb); |
c1aad275 | 2148 | |
d346a3fa DB |
2149 | if (!packet_use_direct_xmit(po)) |
2150 | skb_probe_transport_header(skb, 0); | |
2151 | if (unlikely(po->tp_tx_has_off)) { | |
5920cd3a PC |
2152 | int off_min, off_max, off; |
2153 | off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); | |
2154 | off_max = po->tx_ring.frame_size - tp_len; | |
2155 | if (sock->type == SOCK_DGRAM) { | |
2156 | switch (po->tp_version) { | |
2157 | case TPACKET_V2: | |
2158 | off = ph.h2->tp_net; | |
2159 | break; | |
2160 | default: | |
2161 | off = ph.h1->tp_net; | |
2162 | break; | |
2163 | } | |
2164 | } else { | |
2165 | switch (po->tp_version) { | |
2166 | case TPACKET_V2: | |
2167 | off = ph.h2->tp_mac; | |
2168 | break; | |
2169 | default: | |
2170 | off = ph.h1->tp_mac; | |
2171 | break; | |
2172 | } | |
2173 | } | |
2174 | if (unlikely((off < off_min) || (off_max < off))) | |
2175 | return -EINVAL; | |
2176 | data = ph.raw + off; | |
2177 | } else { | |
2178 | data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll); | |
2179 | } | |
69e3c75f JB |
2180 | to_write = tp_len; |
2181 | ||
2182 | if (sock->type == SOCK_DGRAM) { | |
2183 | err = dev_hard_header(skb, dev, ntohs(proto), addr, | |
2184 | NULL, tp_len); | |
2185 | if (unlikely(err < 0)) | |
2186 | return -EINVAL; | |
40d4e3df | 2187 | } else if (dev->hard_header_len) { |
9c707762 | 2188 | if (ll_header_truncated(dev, tp_len)) |
69e3c75f | 2189 | return -EINVAL; |
69e3c75f JB |
2190 | |
2191 | skb_push(skb, dev->hard_header_len); | |
2192 | err = skb_store_bits(skb, 0, data, | |
2193 | dev->hard_header_len); | |
2194 | if (unlikely(err)) | |
2195 | return err; | |
2196 | ||
2197 | data += dev->hard_header_len; | |
2198 | to_write -= dev->hard_header_len; | |
2199 | } | |
2200 | ||
69e3c75f JB |
2201 | offset = offset_in_page(data); |
2202 | len_max = PAGE_SIZE - offset; | |
2203 | len = ((to_write > len_max) ? len_max : to_write); | |
2204 | ||
2205 | skb->data_len = to_write; | |
2206 | skb->len += to_write; | |
2207 | skb->truesize += to_write; | |
2208 | atomic_add(to_write, &po->sk.sk_wmem_alloc); | |
2209 | ||
2210 | while (likely(to_write)) { | |
2211 | nr_frags = skb_shinfo(skb)->nr_frags; | |
2212 | ||
2213 | if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { | |
40d4e3df ED |
2214 | pr_err("Packet exceed the number of skb frags(%lu)\n", |
2215 | MAX_SKB_FRAGS); | |
69e3c75f JB |
2216 | return -EFAULT; |
2217 | } | |
2218 | ||
0af55bb5 CG |
2219 | page = pgv_to_page(data); |
2220 | data += len; | |
69e3c75f JB |
2221 | flush_dcache_page(page); |
2222 | get_page(page); | |
0af55bb5 | 2223 | skb_fill_page_desc(skb, nr_frags, page, offset, len); |
69e3c75f JB |
2224 | to_write -= len; |
2225 | offset = 0; | |
2226 | len_max = PAGE_SIZE; | |
2227 | len = ((to_write > len_max) ? len_max : to_write); | |
2228 | } | |
2229 | ||
2230 | return tp_len; | |
2231 | } | |
2232 | ||
2233 | static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |
2234 | { | |
69e3c75f JB |
2235 | struct sk_buff *skb; |
2236 | struct net_device *dev; | |
2237 | __be16 proto; | |
09effa67 | 2238 | int err, reserve = 0; |
40d4e3df | 2239 | void *ph; |
342dfc30 | 2240 | DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); |
87a2fd28 | 2241 | bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); |
69e3c75f JB |
2242 | int tp_len, size_max; |
2243 | unsigned char *addr; | |
2244 | int len_sum = 0; | |
9e67030a | 2245 | int status = TP_STATUS_AVAILABLE; |
ae641949 | 2246 | int hlen, tlen; |
69e3c75f | 2247 | |
69e3c75f JB |
2248 | mutex_lock(&po->pg_vec_lock); |
2249 | ||
66e56cd4 | 2250 | if (likely(saddr == NULL)) { |
e40526cb | 2251 | dev = packet_cached_dev_get(po); |
69e3c75f JB |
2252 | proto = po->num; |
2253 | addr = NULL; | |
2254 | } else { | |
2255 | err = -EINVAL; | |
2256 | if (msg->msg_namelen < sizeof(struct sockaddr_ll)) | |
2257 | goto out; | |
2258 | if (msg->msg_namelen < (saddr->sll_halen | |
2259 | + offsetof(struct sockaddr_ll, | |
2260 | sll_addr))) | |
2261 | goto out; | |
69e3c75f JB |
2262 | proto = saddr->sll_protocol; |
2263 | addr = saddr->sll_addr; | |
827d9780 | 2264 | dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); |
69e3c75f JB |
2265 | } |
2266 | ||
69e3c75f JB |
2267 | err = -ENXIO; |
2268 | if (unlikely(dev == NULL)) | |
2269 | goto out; | |
69e3c75f JB |
2270 | err = -ENETDOWN; |
2271 | if (unlikely(!(dev->flags & IFF_UP))) | |
2272 | goto out_put; | |
2273 | ||
52f1454f | 2274 | reserve = dev->hard_header_len + VLAN_HLEN; |
69e3c75f | 2275 | size_max = po->tx_ring.frame_size |
b5dd884e | 2276 | - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); |
69e3c75f | 2277 | |
09effa67 DM |
2278 | if (size_max > dev->mtu + reserve) |
2279 | size_max = dev->mtu + reserve; | |
2280 | ||
69e3c75f JB |
2281 | do { |
2282 | ph = packet_current_frame(po, &po->tx_ring, | |
87a2fd28 | 2283 | TP_STATUS_SEND_REQUEST); |
69e3c75f | 2284 | if (unlikely(ph == NULL)) { |
87a2fd28 DB |
2285 | if (need_wait && need_resched()) |
2286 | schedule(); | |
69e3c75f JB |
2287 | continue; |
2288 | } | |
2289 | ||
2290 | status = TP_STATUS_SEND_REQUEST; | |
ae641949 HX |
2291 | hlen = LL_RESERVED_SPACE(dev); |
2292 | tlen = dev->needed_tailroom; | |
69e3c75f | 2293 | skb = sock_alloc_send_skb(&po->sk, |
ae641949 | 2294 | hlen + tlen + sizeof(struct sockaddr_ll), |
69e3c75f JB |
2295 | 0, &err); |
2296 | ||
2297 | if (unlikely(skb == NULL)) | |
2298 | goto out_status; | |
2299 | ||
2300 | tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, | |
52f1454f DB |
2301 | addr, hlen); |
2302 | if (tp_len > dev->mtu + dev->hard_header_len) { | |
2303 | struct ethhdr *ehdr; | |
2304 | /* Earlier code assumed this would be a VLAN pkt, | |
2305 | * double-check this now that we have the actual | |
2306 | * packet in hand. | |
2307 | */ | |
69e3c75f | 2308 | |
52f1454f DB |
2309 | skb_reset_mac_header(skb); |
2310 | ehdr = eth_hdr(skb); | |
2311 | if (ehdr->h_proto != htons(ETH_P_8021Q)) | |
2312 | tp_len = -EMSGSIZE; | |
2313 | } | |
69e3c75f JB |
2314 | if (unlikely(tp_len < 0)) { |
2315 | if (po->tp_loss) { | |
2316 | __packet_set_status(po, ph, | |
2317 | TP_STATUS_AVAILABLE); | |
2318 | packet_increment_head(&po->tx_ring); | |
2319 | kfree_skb(skb); | |
2320 | continue; | |
2321 | } else { | |
2322 | status = TP_STATUS_WRONG_FORMAT; | |
2323 | err = tp_len; | |
2324 | goto out_status; | |
2325 | } | |
2326 | } | |
2327 | ||
0fd5d57b DB |
2328 | packet_pick_tx_queue(dev, skb); |
2329 | ||
69e3c75f JB |
2330 | skb->destructor = tpacket_destruct_skb; |
2331 | __packet_set_status(po, ph, TP_STATUS_SENDING); | |
b0138408 | 2332 | packet_inc_pending(&po->tx_ring); |
69e3c75f JB |
2333 | |
2334 | status = TP_STATUS_SEND_REQUEST; | |
d346a3fa | 2335 | err = po->xmit(skb); |
eb70df13 JP |
2336 | if (unlikely(err > 0)) { |
2337 | err = net_xmit_errno(err); | |
2338 | if (err && __packet_get_status(po, ph) == | |
2339 | TP_STATUS_AVAILABLE) { | |
2340 | /* skb was destructed already */ | |
2341 | skb = NULL; | |
2342 | goto out_status; | |
2343 | } | |
2344 | /* | |
2345 | * skb was dropped but not destructed yet; | |
2346 | * let's treat it like congestion or err < 0 | |
2347 | */ | |
2348 | err = 0; | |
2349 | } | |
69e3c75f JB |
2350 | packet_increment_head(&po->tx_ring); |
2351 | len_sum += tp_len; | |
b0138408 DB |
2352 | } while (likely((ph != NULL) || |
2353 | /* Note: packet_read_pending() might be slow if we have | |
2354 | * to call it as it's per_cpu variable, but in fast-path | |
2355 | * we already short-circuit the loop with the first | |
2356 | * condition, and luckily don't have to go that path | |
2357 | * anyway. | |
2358 | */ | |
2359 | (need_wait && packet_read_pending(&po->tx_ring)))); | |
69e3c75f JB |
2360 | |
2361 | err = len_sum; | |
2362 | goto out_put; | |
2363 | ||
69e3c75f JB |
2364 | out_status: |
2365 | __packet_set_status(po, ph, status); | |
2366 | kfree_skb(skb); | |
2367 | out_put: | |
e40526cb | 2368 | dev_put(dev); |
69e3c75f JB |
2369 | out: |
2370 | mutex_unlock(&po->pg_vec_lock); | |
2371 | return err; | |
2372 | } | |
69e3c75f | 2373 | |
eea49cc9 OJ |
2374 | static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, |
2375 | size_t reserve, size_t len, | |
2376 | size_t linear, int noblock, | |
2377 | int *err) | |
bfd5f4a3 SS |
2378 | { |
2379 | struct sk_buff *skb; | |
2380 | ||
2381 | /* Under a page? Don't bother with paged skb. */ | |
2382 | if (prepad + len < PAGE_SIZE || !linear) | |
2383 | linear = len; | |
2384 | ||
2385 | skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, | |
28d64271 | 2386 | err, 0); |
bfd5f4a3 SS |
2387 | if (!skb) |
2388 | return NULL; | |
2389 | ||
2390 | skb_reserve(skb, reserve); | |
2391 | skb_put(skb, linear); | |
2392 | skb->data_len = len - linear; | |
2393 | skb->len += len - linear; | |
2394 | ||
2395 | return skb; | |
2396 | } | |
2397 | ||
d346a3fa | 2398 | static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) |
1da177e4 LT |
2399 | { |
2400 | struct sock *sk = sock->sk; | |
342dfc30 | 2401 | DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); |
1da177e4 LT |
2402 | struct sk_buff *skb; |
2403 | struct net_device *dev; | |
0e11c91e | 2404 | __be16 proto; |
1da177e4 | 2405 | unsigned char *addr; |
827d9780 | 2406 | int err, reserve = 0; |
bfd5f4a3 SS |
2407 | struct virtio_net_hdr vnet_hdr = { 0 }; |
2408 | int offset = 0; | |
2409 | int vnet_hdr_len; | |
2410 | struct packet_sock *po = pkt_sk(sk); | |
2411 | unsigned short gso_type = 0; | |
ae641949 | 2412 | int hlen, tlen; |
3bdc0eba | 2413 | int extra_len = 0; |
8feb2fb2 | 2414 | ssize_t n; |
1da177e4 LT |
2415 | |
2416 | /* | |
1ce4f28b | 2417 | * Get and verify the address. |
1da177e4 | 2418 | */ |
1ce4f28b | 2419 | |
66e56cd4 | 2420 | if (likely(saddr == NULL)) { |
e40526cb | 2421 | dev = packet_cached_dev_get(po); |
1da177e4 LT |
2422 | proto = po->num; |
2423 | addr = NULL; | |
2424 | } else { | |
2425 | err = -EINVAL; | |
2426 | if (msg->msg_namelen < sizeof(struct sockaddr_ll)) | |
2427 | goto out; | |
0fb375fb EB |
2428 | if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) |
2429 | goto out; | |
1da177e4 LT |
2430 | proto = saddr->sll_protocol; |
2431 | addr = saddr->sll_addr; | |
827d9780 | 2432 | dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); |
1da177e4 LT |
2433 | } |
2434 | ||
1da177e4 | 2435 | err = -ENXIO; |
e40526cb | 2436 | if (unlikely(dev == NULL)) |
1da177e4 | 2437 | goto out_unlock; |
d5e76b0a | 2438 | err = -ENETDOWN; |
e40526cb | 2439 | if (unlikely(!(dev->flags & IFF_UP))) |
d5e76b0a DM |
2440 | goto out_unlock; |
2441 | ||
e40526cb DB |
2442 | if (sock->type == SOCK_RAW) |
2443 | reserve = dev->hard_header_len; | |
bfd5f4a3 SS |
2444 | if (po->has_vnet_hdr) { |
2445 | vnet_hdr_len = sizeof(vnet_hdr); | |
2446 | ||
2447 | err = -EINVAL; | |
2448 | if (len < vnet_hdr_len) | |
2449 | goto out_unlock; | |
2450 | ||
2451 | len -= vnet_hdr_len; | |
2452 | ||
8feb2fb2 | 2453 | err = -EFAULT; |
c0371da6 | 2454 | n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter); |
8feb2fb2 | 2455 | if (n != vnet_hdr_len) |
bfd5f4a3 SS |
2456 | goto out_unlock; |
2457 | ||
2458 | if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && | |
dc9e5153 MT |
2459 | (__virtio16_to_cpu(false, vnet_hdr.csum_start) + |
2460 | __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 > | |
2461 | __virtio16_to_cpu(false, vnet_hdr.hdr_len))) | |
2462 | vnet_hdr.hdr_len = __cpu_to_virtio16(false, | |
2463 | __virtio16_to_cpu(false, vnet_hdr.csum_start) + | |
2464 | __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2); | |
bfd5f4a3 SS |
2465 | |
2466 | err = -EINVAL; | |
dc9e5153 | 2467 | if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len) |
bfd5f4a3 SS |
2468 | goto out_unlock; |
2469 | ||
2470 | if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { | |
2471 | switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { | |
2472 | case VIRTIO_NET_HDR_GSO_TCPV4: | |
2473 | gso_type = SKB_GSO_TCPV4; | |
2474 | break; | |
2475 | case VIRTIO_NET_HDR_GSO_TCPV6: | |
2476 | gso_type = SKB_GSO_TCPV6; | |
2477 | break; | |
2478 | case VIRTIO_NET_HDR_GSO_UDP: | |
2479 | gso_type = SKB_GSO_UDP; | |
2480 | break; | |
2481 | default: | |
2482 | goto out_unlock; | |
2483 | } | |
2484 | ||
2485 | if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) | |
2486 | gso_type |= SKB_GSO_TCP_ECN; | |
2487 | ||
2488 | if (vnet_hdr.gso_size == 0) | |
2489 | goto out_unlock; | |
2490 | ||
2491 | } | |
2492 | } | |
2493 | ||
3bdc0eba BG |
2494 | if (unlikely(sock_flag(sk, SOCK_NOFCS))) { |
2495 | if (!netif_supports_nofcs(dev)) { | |
2496 | err = -EPROTONOSUPPORT; | |
2497 | goto out_unlock; | |
2498 | } | |
2499 | extra_len = 4; /* We're doing our own CRC */ | |
2500 | } | |
2501 | ||
1da177e4 | 2502 | err = -EMSGSIZE; |
3bdc0eba | 2503 | if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) |
1da177e4 LT |
2504 | goto out_unlock; |
2505 | ||
bfd5f4a3 | 2506 | err = -ENOBUFS; |
ae641949 HX |
2507 | hlen = LL_RESERVED_SPACE(dev); |
2508 | tlen = dev->needed_tailroom; | |
dc9e5153 MT |
2509 | skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, |
2510 | __virtio16_to_cpu(false, vnet_hdr.hdr_len), | |
bfd5f4a3 | 2511 | msg->msg_flags & MSG_DONTWAIT, &err); |
40d4e3df | 2512 | if (skb == NULL) |
1da177e4 LT |
2513 | goto out_unlock; |
2514 | ||
bfd5f4a3 | 2515 | skb_set_network_header(skb, reserve); |
1da177e4 | 2516 | |
0c4e8581 | 2517 | err = -EINVAL; |
9c707762 WB |
2518 | if (sock->type == SOCK_DGRAM) { |
2519 | offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); | |
2520 | if (unlikely(offset) < 0) | |
2521 | goto out_free; | |
2522 | } else { | |
2523 | if (ll_header_truncated(dev, len)) | |
2524 | goto out_free; | |
2525 | } | |
1da177e4 LT |
2526 | |
2527 | /* Returns -EFAULT on error */ | |
c0371da6 | 2528 | err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); |
1da177e4 LT |
2529 | if (err) |
2530 | goto out_free; | |
bf84a010 DB |
2531 | |
2532 | sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); | |
1da177e4 | 2533 | |
3bdc0eba | 2534 | if (!gso_type && (len > dev->mtu + reserve + extra_len)) { |
09effa67 DM |
2535 | /* Earlier code assumed this would be a VLAN pkt, |
2536 | * double-check this now that we have the actual | |
2537 | * packet in hand. | |
2538 | */ | |
2539 | struct ethhdr *ehdr; | |
2540 | skb_reset_mac_header(skb); | |
2541 | ehdr = eth_hdr(skb); | |
2542 | if (ehdr->h_proto != htons(ETH_P_8021Q)) { | |
2543 | err = -EMSGSIZE; | |
2544 | goto out_free; | |
2545 | } | |
57f89bfa BG |
2546 | } |
2547 | ||
09effa67 DM |
2548 | skb->protocol = proto; |
2549 | skb->dev = dev; | |
1da177e4 | 2550 | skb->priority = sk->sk_priority; |
2d37a186 | 2551 | skb->mark = sk->sk_mark; |
0fd5d57b DB |
2552 | |
2553 | packet_pick_tx_queue(dev, skb); | |
1da177e4 | 2554 | |
bfd5f4a3 SS |
2555 | if (po->has_vnet_hdr) { |
2556 | if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | |
dc9e5153 MT |
2557 | u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start); |
2558 | u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset); | |
2559 | if (!skb_partial_csum_set(skb, s, o)) { | |
bfd5f4a3 SS |
2560 | err = -EINVAL; |
2561 | goto out_free; | |
2562 | } | |
2563 | } | |
2564 | ||
dc9e5153 MT |
2565 | skb_shinfo(skb)->gso_size = |
2566 | __virtio16_to_cpu(false, vnet_hdr.gso_size); | |
bfd5f4a3 SS |
2567 | skb_shinfo(skb)->gso_type = gso_type; |
2568 | ||
2569 | /* Header must be checked, and gso_segs computed. */ | |
2570 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | |
2571 | skb_shinfo(skb)->gso_segs = 0; | |
2572 | ||
2573 | len += vnet_hdr_len; | |
2574 | } | |
2575 | ||
d346a3fa DB |
2576 | if (!packet_use_direct_xmit(po)) |
2577 | skb_probe_transport_header(skb, reserve); | |
3bdc0eba BG |
2578 | if (unlikely(extra_len == 4)) |
2579 | skb->no_fcs = 1; | |
2580 | ||
d346a3fa | 2581 | err = po->xmit(skb); |
1da177e4 LT |
2582 | if (err > 0 && (err = net_xmit_errno(err)) != 0) |
2583 | goto out_unlock; | |
2584 | ||
e40526cb | 2585 | dev_put(dev); |
1da177e4 | 2586 | |
40d4e3df | 2587 | return len; |
1da177e4 LT |
2588 | |
2589 | out_free: | |
2590 | kfree_skb(skb); | |
2591 | out_unlock: | |
e40526cb | 2592 | if (dev) |
1da177e4 LT |
2593 | dev_put(dev); |
2594 | out: | |
2595 | return err; | |
2596 | } | |
2597 | ||
69e3c75f JB |
2598 | static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, |
2599 | struct msghdr *msg, size_t len) | |
2600 | { | |
69e3c75f JB |
2601 | struct sock *sk = sock->sk; |
2602 | struct packet_sock *po = pkt_sk(sk); | |
d346a3fa | 2603 | |
69e3c75f JB |
2604 | if (po->tx_ring.pg_vec) |
2605 | return tpacket_snd(po, msg); | |
2606 | else | |
69e3c75f JB |
2607 | return packet_snd(sock, msg, len); |
2608 | } | |
2609 | ||
1da177e4 LT |
2610 | /* |
2611 | * Close a PACKET socket. This is fairly simple. We immediately go | |
2612 | * to 'closed' state and remove our protocol entry in the device list. | |
2613 | */ | |
2614 | ||
2615 | static int packet_release(struct socket *sock) | |
2616 | { | |
2617 | struct sock *sk = sock->sk; | |
2618 | struct packet_sock *po; | |
d12d01d6 | 2619 | struct net *net; |
f6fb8f10 | 2620 | union tpacket_req_u req_u; |
1da177e4 LT |
2621 | |
2622 | if (!sk) | |
2623 | return 0; | |
2624 | ||
3b1e0a65 | 2625 | net = sock_net(sk); |
1da177e4 LT |
2626 | po = pkt_sk(sk); |
2627 | ||
0fa7fa98 | 2628 | mutex_lock(&net->packet.sklist_lock); |
808f5114 | 2629 | sk_del_node_init_rcu(sk); |
0fa7fa98 PE |
2630 | mutex_unlock(&net->packet.sklist_lock); |
2631 | ||
2632 | preempt_disable(); | |
920de804 | 2633 | sock_prot_inuse_add(net, sk->sk_prot, -1); |
0fa7fa98 | 2634 | preempt_enable(); |
1da177e4 | 2635 | |
808f5114 | 2636 | spin_lock(&po->bind_lock); |
ce06b03e | 2637 | unregister_prot_hook(sk, false); |
66e56cd4 DB |
2638 | packet_cached_dev_reset(po); |
2639 | ||
160ff18a BG |
2640 | if (po->prot_hook.dev) { |
2641 | dev_put(po->prot_hook.dev); | |
2642 | po->prot_hook.dev = NULL; | |
2643 | } | |
808f5114 | 2644 | spin_unlock(&po->bind_lock); |
1da177e4 | 2645 | |
1da177e4 | 2646 | packet_flush_mclist(sk); |
1da177e4 | 2647 | |
9665d5d6 PS |
2648 | if (po->rx_ring.pg_vec) { |
2649 | memset(&req_u, 0, sizeof(req_u)); | |
f6fb8f10 | 2650 | packet_set_ring(sk, &req_u, 1, 0); |
9665d5d6 | 2651 | } |
69e3c75f | 2652 | |
9665d5d6 PS |
2653 | if (po->tx_ring.pg_vec) { |
2654 | memset(&req_u, 0, sizeof(req_u)); | |
f6fb8f10 | 2655 | packet_set_ring(sk, &req_u, 1, 1); |
9665d5d6 | 2656 | } |
1da177e4 | 2657 | |
dc99f600 DM |
2658 | fanout_release(sk); |
2659 | ||
808f5114 | 2660 | synchronize_net(); |
1da177e4 LT |
2661 | /* |
2662 | * Now the socket is dead. No more input will appear. | |
2663 | */ | |
1da177e4 LT |
2664 | sock_orphan(sk); |
2665 | sock->sk = NULL; | |
2666 | ||
2667 | /* Purge queues */ | |
2668 | ||
2669 | skb_queue_purge(&sk->sk_receive_queue); | |
b0138408 | 2670 | packet_free_pending(po); |
17ab56a2 | 2671 | sk_refcnt_debug_release(sk); |
1da177e4 LT |
2672 | |
2673 | sock_put(sk); | |
2674 | return 0; | |
2675 | } | |
2676 | ||
2677 | /* | |
2678 | * Attach a packet hook. | |
2679 | */ | |
2680 | ||
902fefb8 | 2681 | static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) |
1da177e4 LT |
2682 | { |
2683 | struct packet_sock *po = pkt_sk(sk); | |
902fefb8 DB |
2684 | const struct net_device *dev_curr; |
2685 | __be16 proto_curr; | |
2686 | bool need_rehook; | |
dc99f600 | 2687 | |
aef950b4 WY |
2688 | if (po->fanout) { |
2689 | if (dev) | |
2690 | dev_put(dev); | |
2691 | ||
dc99f600 | 2692 | return -EINVAL; |
aef950b4 | 2693 | } |
1da177e4 LT |
2694 | |
2695 | lock_sock(sk); | |
1da177e4 | 2696 | spin_lock(&po->bind_lock); |
66e56cd4 | 2697 | |
902fefb8 DB |
2698 | proto_curr = po->prot_hook.type; |
2699 | dev_curr = po->prot_hook.dev; | |
2700 | ||
2701 | need_rehook = proto_curr != proto || dev_curr != dev; | |
2702 | ||
2703 | if (need_rehook) { | |
2704 | unregister_prot_hook(sk, true); | |
1da177e4 | 2705 | |
902fefb8 DB |
2706 | po->num = proto; |
2707 | po->prot_hook.type = proto; | |
1da177e4 | 2708 | |
902fefb8 DB |
2709 | if (po->prot_hook.dev) |
2710 | dev_put(po->prot_hook.dev); | |
2711 | ||
2712 | po->prot_hook.dev = dev; | |
2713 | ||
2714 | po->ifindex = dev ? dev->ifindex : 0; | |
2715 | packet_cached_dev_assign(po, dev); | |
2716 | } | |
66e56cd4 | 2717 | |
902fefb8 | 2718 | if (proto == 0 || !need_rehook) |
1da177e4 LT |
2719 | goto out_unlock; |
2720 | ||
be85d4ad | 2721 | if (!dev || (dev->flags & IFF_UP)) { |
ce06b03e | 2722 | register_prot_hook(sk); |
be85d4ad UT |
2723 | } else { |
2724 | sk->sk_err = ENETDOWN; | |
2725 | if (!sock_flag(sk, SOCK_DEAD)) | |
2726 | sk->sk_error_report(sk); | |
1da177e4 LT |
2727 | } |
2728 | ||
2729 | out_unlock: | |
2730 | spin_unlock(&po->bind_lock); | |
2731 | release_sock(sk); | |
2732 | return 0; | |
2733 | } | |
2734 | ||
2735 | /* | |
2736 | * Bind a packet socket to a device | |
2737 | */ | |
2738 | ||
40d4e3df ED |
2739 | static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, |
2740 | int addr_len) | |
1da177e4 | 2741 | { |
40d4e3df | 2742 | struct sock *sk = sock->sk; |
1da177e4 LT |
2743 | char name[15]; |
2744 | struct net_device *dev; | |
2745 | int err = -ENODEV; | |
1ce4f28b | 2746 | |
1da177e4 LT |
2747 | /* |
2748 | * Check legality | |
2749 | */ | |
1ce4f28b | 2750 | |
8ae55f04 | 2751 | if (addr_len != sizeof(struct sockaddr)) |
1da177e4 | 2752 | return -EINVAL; |
40d4e3df | 2753 | strlcpy(name, uaddr->sa_data, sizeof(name)); |
1da177e4 | 2754 | |
3b1e0a65 | 2755 | dev = dev_get_by_name(sock_net(sk), name); |
160ff18a | 2756 | if (dev) |
1da177e4 | 2757 | err = packet_do_bind(sk, dev, pkt_sk(sk)->num); |
1da177e4 LT |
2758 | return err; |
2759 | } | |
1da177e4 LT |
2760 | |
2761 | static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |
2762 | { | |
40d4e3df ED |
2763 | struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; |
2764 | struct sock *sk = sock->sk; | |
1da177e4 LT |
2765 | struct net_device *dev = NULL; |
2766 | int err; | |
2767 | ||
2768 | ||
2769 | /* | |
2770 | * Check legality | |
2771 | */ | |
1ce4f28b | 2772 | |
1da177e4 LT |
2773 | if (addr_len < sizeof(struct sockaddr_ll)) |
2774 | return -EINVAL; | |
2775 | if (sll->sll_family != AF_PACKET) | |
2776 | return -EINVAL; | |
2777 | ||
2778 | if (sll->sll_ifindex) { | |
2779 | err = -ENODEV; | |
3b1e0a65 | 2780 | dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex); |
1da177e4 LT |
2781 | if (dev == NULL) |
2782 | goto out; | |
2783 | } | |
2784 | err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num); | |
1da177e4 LT |
2785 | |
2786 | out: | |
2787 | return err; | |
2788 | } | |
2789 | ||
2790 | static struct proto packet_proto = { | |
2791 | .name = "PACKET", | |
2792 | .owner = THIS_MODULE, | |
2793 | .obj_size = sizeof(struct packet_sock), | |
2794 | }; | |
2795 | ||
2796 | /* | |
1ce4f28b | 2797 | * Create a packet of type SOCK_PACKET. |
1da177e4 LT |
2798 | */ |
2799 | ||
3f378b68 EP |
2800 | static int packet_create(struct net *net, struct socket *sock, int protocol, |
2801 | int kern) | |
1da177e4 LT |
2802 | { |
2803 | struct sock *sk; | |
2804 | struct packet_sock *po; | |
0e11c91e | 2805 | __be16 proto = (__force __be16)protocol; /* weird, but documented */ |
1da177e4 LT |
2806 | int err; |
2807 | ||
df008c91 | 2808 | if (!ns_capable(net->user_ns, CAP_NET_RAW)) |
1da177e4 | 2809 | return -EPERM; |
be02097c DM |
2810 | if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && |
2811 | sock->type != SOCK_PACKET) | |
1da177e4 LT |
2812 | return -ESOCKTNOSUPPORT; |
2813 | ||
2814 | sock->state = SS_UNCONNECTED; | |
2815 | ||
2816 | err = -ENOBUFS; | |
6257ff21 | 2817 | sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto); |
1da177e4 LT |
2818 | if (sk == NULL) |
2819 | goto out; | |
2820 | ||
2821 | sock->ops = &packet_ops; | |
1da177e4 LT |
2822 | if (sock->type == SOCK_PACKET) |
2823 | sock->ops = &packet_ops_spkt; | |
be02097c | 2824 | |
1da177e4 LT |
2825 | sock_init_data(sock, sk); |
2826 | ||
2827 | po = pkt_sk(sk); | |
2828 | sk->sk_family = PF_PACKET; | |
0e11c91e | 2829 | po->num = proto; |
d346a3fa | 2830 | po->xmit = dev_queue_xmit; |
66e56cd4 | 2831 | |
b0138408 DB |
2832 | err = packet_alloc_pending(po); |
2833 | if (err) | |
2834 | goto out2; | |
2835 | ||
66e56cd4 | 2836 | packet_cached_dev_reset(po); |
1da177e4 LT |
2837 | |
2838 | sk->sk_destruct = packet_sock_destruct; | |
17ab56a2 | 2839 | sk_refcnt_debug_inc(sk); |
1da177e4 LT |
2840 | |
2841 | /* | |
2842 | * Attach a protocol block | |
2843 | */ | |
2844 | ||
2845 | spin_lock_init(&po->bind_lock); | |
905db440 | 2846 | mutex_init(&po->pg_vec_lock); |
1da177e4 | 2847 | po->prot_hook.func = packet_rcv; |
be02097c | 2848 | |
1da177e4 LT |
2849 | if (sock->type == SOCK_PACKET) |
2850 | po->prot_hook.func = packet_rcv_spkt; | |
be02097c | 2851 | |
1da177e4 LT |
2852 | po->prot_hook.af_packet_priv = sk; |
2853 | ||
0e11c91e AV |
2854 | if (proto) { |
2855 | po->prot_hook.type = proto; | |
ce06b03e | 2856 | register_prot_hook(sk); |
1da177e4 LT |
2857 | } |
2858 | ||
0fa7fa98 | 2859 | mutex_lock(&net->packet.sklist_lock); |
808f5114 | 2860 | sk_add_node_rcu(sk, &net->packet.sklist); |
0fa7fa98 PE |
2861 | mutex_unlock(&net->packet.sklist_lock); |
2862 | ||
2863 | preempt_disable(); | |
3680453c | 2864 | sock_prot_inuse_add(net, &packet_proto, 1); |
0fa7fa98 | 2865 | preempt_enable(); |
808f5114 | 2866 | |
40d4e3df | 2867 | return 0; |
b0138408 DB |
2868 | out2: |
2869 | sk_free(sk); | |
1da177e4 LT |
2870 | out: |
2871 | return err; | |
2872 | } | |
2873 | ||
2874 | /* | |
2875 | * Pull a packet from our receive queue and hand it to the user. | |
2876 | * If necessary we block. | |
2877 | */ | |
2878 | ||
2879 | static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |
2880 | struct msghdr *msg, size_t len, int flags) | |
2881 | { | |
2882 | struct sock *sk = sock->sk; | |
2883 | struct sk_buff *skb; | |
2884 | int copied, err; | |
bfd5f4a3 | 2885 | int vnet_hdr_len = 0; |
1da177e4 LT |
2886 | |
2887 | err = -EINVAL; | |
ed85b565 | 2888 | if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) |
1da177e4 LT |
2889 | goto out; |
2890 | ||
2891 | #if 0 | |
2892 | /* What error should we return now? EUNATTACH? */ | |
2893 | if (pkt_sk(sk)->ifindex < 0) | |
2894 | return -ENODEV; | |
2895 | #endif | |
2896 | ||
ed85b565 | 2897 | if (flags & MSG_ERRQUEUE) { |
cb820f8e RC |
2898 | err = sock_recv_errqueue(sk, msg, len, |
2899 | SOL_PACKET, PACKET_TX_TIMESTAMP); | |
ed85b565 RC |
2900 | goto out; |
2901 | } | |
2902 | ||
1da177e4 LT |
2903 | /* |
2904 | * Call the generic datagram receiver. This handles all sorts | |
2905 | * of horrible races and re-entrancy so we can forget about it | |
2906 | * in the protocol layers. | |
2907 | * | |
2908 | * Now it will return ENETDOWN, if device have just gone down, | |
2909 | * but then it will block. | |
2910 | */ | |
2911 | ||
40d4e3df | 2912 | skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); |
1da177e4 LT |
2913 | |
2914 | /* | |
1ce4f28b | 2915 | * An error occurred so return it. Because skb_recv_datagram() |
1da177e4 LT |
2916 | * handles the blocking we don't see and worry about blocking |
2917 | * retries. | |
2918 | */ | |
2919 | ||
8ae55f04 | 2920 | if (skb == NULL) |
1da177e4 LT |
2921 | goto out; |
2922 | ||
bfd5f4a3 SS |
2923 | if (pkt_sk(sk)->has_vnet_hdr) { |
2924 | struct virtio_net_hdr vnet_hdr = { 0 }; | |
2925 | ||
2926 | err = -EINVAL; | |
2927 | vnet_hdr_len = sizeof(vnet_hdr); | |
1f18b717 | 2928 | if (len < vnet_hdr_len) |
bfd5f4a3 SS |
2929 | goto out_free; |
2930 | ||
1f18b717 MK |
2931 | len -= vnet_hdr_len; |
2932 | ||
bfd5f4a3 SS |
2933 | if (skb_is_gso(skb)) { |
2934 | struct skb_shared_info *sinfo = skb_shinfo(skb); | |
2935 | ||
2936 | /* This is a hint as to how much should be linear. */ | |
dc9e5153 MT |
2937 | vnet_hdr.hdr_len = |
2938 | __cpu_to_virtio16(false, skb_headlen(skb)); | |
2939 | vnet_hdr.gso_size = | |
2940 | __cpu_to_virtio16(false, sinfo->gso_size); | |
bfd5f4a3 SS |
2941 | if (sinfo->gso_type & SKB_GSO_TCPV4) |
2942 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | |
2943 | else if (sinfo->gso_type & SKB_GSO_TCPV6) | |
2944 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | |
2945 | else if (sinfo->gso_type & SKB_GSO_UDP) | |
2946 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; | |
2947 | else if (sinfo->gso_type & SKB_GSO_FCOE) | |
2948 | goto out_free; | |
2949 | else | |
2950 | BUG(); | |
2951 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) | |
2952 | vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; | |
2953 | } else | |
2954 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; | |
2955 | ||
2956 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
2957 | vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; | |
dc9e5153 MT |
2958 | vnet_hdr.csum_start = __cpu_to_virtio16(false, |
2959 | skb_checksum_start_offset(skb)); | |
2960 | vnet_hdr.csum_offset = __cpu_to_virtio16(false, | |
2961 | skb->csum_offset); | |
10a8d94a JW |
2962 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
2963 | vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; | |
bfd5f4a3 SS |
2964 | } /* else everything is zero */ |
2965 | ||
7eab8d9e | 2966 | err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len); |
bfd5f4a3 SS |
2967 | if (err < 0) |
2968 | goto out_free; | |
2969 | } | |
2970 | ||
f3d33426 HFS |
2971 | /* You lose any data beyond the buffer you gave. If it worries |
2972 | * a user program they can ask the device for its MTU | |
2973 | * anyway. | |
1da177e4 | 2974 | */ |
1da177e4 | 2975 | copied = skb->len; |
40d4e3df ED |
2976 | if (copied > len) { |
2977 | copied = len; | |
2978 | msg->msg_flags |= MSG_TRUNC; | |
1da177e4 LT |
2979 | } |
2980 | ||
51f3d02b | 2981 | err = skb_copy_datagram_msg(skb, 0, msg, copied); |
1da177e4 LT |
2982 | if (err) |
2983 | goto out_free; | |
2984 | ||
3b885787 | 2985 | sock_recv_ts_and_drops(msg, sk, skb); |
1da177e4 | 2986 | |
f3d33426 HFS |
2987 | if (msg->msg_name) { |
2988 | /* If the address length field is there to be filled | |
2989 | * in, we fill it in now. | |
2990 | */ | |
2991 | if (sock->type == SOCK_PACKET) { | |
342dfc30 | 2992 | __sockaddr_check_size(sizeof(struct sockaddr_pkt)); |
f3d33426 HFS |
2993 | msg->msg_namelen = sizeof(struct sockaddr_pkt); |
2994 | } else { | |
2995 | struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; | |
2996 | msg->msg_namelen = sll->sll_halen + | |
2997 | offsetof(struct sockaddr_ll, sll_addr); | |
2998 | } | |
ffbc6111 HX |
2999 | memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, |
3000 | msg->msg_namelen); | |
f3d33426 | 3001 | } |
1da177e4 | 3002 | |
8dc41944 | 3003 | if (pkt_sk(sk)->auxdata) { |
ffbc6111 HX |
3004 | struct tpacket_auxdata aux; |
3005 | ||
3006 | aux.tp_status = TP_STATUS_USER; | |
3007 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
3008 | aux.tp_status |= TP_STATUS_CSUMNOTREADY; | |
3009 | aux.tp_len = PACKET_SKB_CB(skb)->origlen; | |
3010 | aux.tp_snaplen = skb->len; | |
3011 | aux.tp_mac = 0; | |
bbe735e4 | 3012 | aux.tp_net = skb_network_offset(skb); |
a3bcc23e BG |
3013 | if (vlan_tx_tag_present(skb)) { |
3014 | aux.tp_vlan_tci = vlan_tx_tag_get(skb); | |
a0cdfcf3 AW |
3015 | aux.tp_vlan_tpid = ntohs(skb->vlan_proto); |
3016 | aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; | |
a3bcc23e BG |
3017 | } else { |
3018 | aux.tp_vlan_tci = 0; | |
a0cdfcf3 | 3019 | aux.tp_vlan_tpid = 0; |
a3bcc23e | 3020 | } |
ffbc6111 | 3021 | put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); |
8dc41944 HX |
3022 | } |
3023 | ||
1da177e4 LT |
3024 | /* |
3025 | * Free or return the buffer as appropriate. Again this | |
3026 | * hides all the races and re-entrancy issues from us. | |
3027 | */ | |
bfd5f4a3 | 3028 | err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); |
1da177e4 LT |
3029 | |
3030 | out_free: | |
3031 | skb_free_datagram(sk, skb); | |
3032 | out: | |
3033 | return err; | |
3034 | } | |
3035 | ||
1da177e4 LT |
3036 | static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, |
3037 | int *uaddr_len, int peer) | |
3038 | { | |
3039 | struct net_device *dev; | |
3040 | struct sock *sk = sock->sk; | |
3041 | ||
3042 | if (peer) | |
3043 | return -EOPNOTSUPP; | |
3044 | ||
3045 | uaddr->sa_family = AF_PACKET; | |
2dc85bf3 | 3046 | memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); |
654d1f8a ED |
3047 | rcu_read_lock(); |
3048 | dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); | |
3049 | if (dev) | |
2dc85bf3 | 3050 | strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); |
654d1f8a | 3051 | rcu_read_unlock(); |
1da177e4 LT |
3052 | *uaddr_len = sizeof(*uaddr); |
3053 | ||
3054 | return 0; | |
3055 | } | |
1da177e4 LT |
3056 | |
3057 | static int packet_getname(struct socket *sock, struct sockaddr *uaddr, | |
3058 | int *uaddr_len, int peer) | |
3059 | { | |
3060 | struct net_device *dev; | |
3061 | struct sock *sk = sock->sk; | |
3062 | struct packet_sock *po = pkt_sk(sk); | |
13cfa97b | 3063 | DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); |
1da177e4 LT |
3064 | |
3065 | if (peer) | |
3066 | return -EOPNOTSUPP; | |
3067 | ||
3068 | sll->sll_family = AF_PACKET; | |
3069 | sll->sll_ifindex = po->ifindex; | |
3070 | sll->sll_protocol = po->num; | |
67286640 | 3071 | sll->sll_pkttype = 0; |
654d1f8a ED |
3072 | rcu_read_lock(); |
3073 | dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); | |
1da177e4 LT |
3074 | if (dev) { |
3075 | sll->sll_hatype = dev->type; | |
3076 | sll->sll_halen = dev->addr_len; | |
3077 | memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); | |
1da177e4 LT |
3078 | } else { |
3079 | sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ | |
3080 | sll->sll_halen = 0; | |
3081 | } | |
654d1f8a | 3082 | rcu_read_unlock(); |
0fb375fb | 3083 | *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; |
1da177e4 LT |
3084 | |
3085 | return 0; | |
3086 | } | |
3087 | ||
2aeb0b88 WC |
3088 | static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, |
3089 | int what) | |
1da177e4 LT |
3090 | { |
3091 | switch (i->type) { | |
3092 | case PACKET_MR_MULTICAST: | |
1162563f JP |
3093 | if (i->alen != dev->addr_len) |
3094 | return -EINVAL; | |
1da177e4 | 3095 | if (what > 0) |
22bedad3 | 3096 | return dev_mc_add(dev, i->addr); |
1da177e4 | 3097 | else |
22bedad3 | 3098 | return dev_mc_del(dev, i->addr); |
1da177e4 LT |
3099 | break; |
3100 | case PACKET_MR_PROMISC: | |
2aeb0b88 | 3101 | return dev_set_promiscuity(dev, what); |
1da177e4 | 3102 | case PACKET_MR_ALLMULTI: |
2aeb0b88 | 3103 | return dev_set_allmulti(dev, what); |
d95ed927 | 3104 | case PACKET_MR_UNICAST: |
1162563f JP |
3105 | if (i->alen != dev->addr_len) |
3106 | return -EINVAL; | |
d95ed927 | 3107 | if (what > 0) |
a748ee24 | 3108 | return dev_uc_add(dev, i->addr); |
d95ed927 | 3109 | else |
a748ee24 | 3110 | return dev_uc_del(dev, i->addr); |
d95ed927 | 3111 | break; |
40d4e3df ED |
3112 | default: |
3113 | break; | |
1da177e4 | 3114 | } |
2aeb0b88 | 3115 | return 0; |
1da177e4 LT |
3116 | } |
3117 | ||
3118 | static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) | |
3119 | { | |
40d4e3df | 3120 | for ( ; i; i = i->next) { |
1da177e4 LT |
3121 | if (i->ifindex == dev->ifindex) |
3122 | packet_dev_mc(dev, i, what); | |
3123 | } | |
3124 | } | |
3125 | ||
0fb375fb | 3126 | static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) |
1da177e4 LT |
3127 | { |
3128 | struct packet_sock *po = pkt_sk(sk); | |
3129 | struct packet_mclist *ml, *i; | |
3130 | struct net_device *dev; | |
3131 | int err; | |
3132 | ||
3133 | rtnl_lock(); | |
3134 | ||
3135 | err = -ENODEV; | |
3b1e0a65 | 3136 | dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); |
1da177e4 LT |
3137 | if (!dev) |
3138 | goto done; | |
3139 | ||
3140 | err = -EINVAL; | |
1162563f | 3141 | if (mreq->mr_alen > dev->addr_len) |
1da177e4 LT |
3142 | goto done; |
3143 | ||
3144 | err = -ENOBUFS; | |
8b3a7005 | 3145 | i = kmalloc(sizeof(*i), GFP_KERNEL); |
1da177e4 LT |
3146 | if (i == NULL) |
3147 | goto done; | |
3148 | ||
3149 | err = 0; | |
3150 | for (ml = po->mclist; ml; ml = ml->next) { | |
3151 | if (ml->ifindex == mreq->mr_ifindex && | |
3152 | ml->type == mreq->mr_type && | |
3153 | ml->alen == mreq->mr_alen && | |
3154 | memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { | |
3155 | ml->count++; | |
3156 | /* Free the new element ... */ | |
3157 | kfree(i); | |
3158 | goto done; | |
3159 | } | |
3160 | } | |
3161 | ||
3162 | i->type = mreq->mr_type; | |
3163 | i->ifindex = mreq->mr_ifindex; | |
3164 | i->alen = mreq->mr_alen; | |
3165 | memcpy(i->addr, mreq->mr_address, i->alen); | |
3166 | i->count = 1; | |
3167 | i->next = po->mclist; | |
3168 | po->mclist = i; | |
2aeb0b88 WC |
3169 | err = packet_dev_mc(dev, i, 1); |
3170 | if (err) { | |
3171 | po->mclist = i->next; | |
3172 | kfree(i); | |
3173 | } | |
1da177e4 LT |
3174 | |
3175 | done: | |
3176 | rtnl_unlock(); | |
3177 | return err; | |
3178 | } | |
3179 | ||
0fb375fb | 3180 | static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) |
1da177e4 LT |
3181 | { |
3182 | struct packet_mclist *ml, **mlp; | |
3183 | ||
3184 | rtnl_lock(); | |
3185 | ||
3186 | for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { | |
3187 | if (ml->ifindex == mreq->mr_ifindex && | |
3188 | ml->type == mreq->mr_type && | |
3189 | ml->alen == mreq->mr_alen && | |
3190 | memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { | |
3191 | if (--ml->count == 0) { | |
3192 | struct net_device *dev; | |
3193 | *mlp = ml->next; | |
ad959e76 ED |
3194 | dev = __dev_get_by_index(sock_net(sk), ml->ifindex); |
3195 | if (dev) | |
1da177e4 | 3196 | packet_dev_mc(dev, ml, -1); |
1da177e4 LT |
3197 | kfree(ml); |
3198 | } | |
3199 | rtnl_unlock(); | |
3200 | return 0; | |
3201 | } | |
3202 | } | |
3203 | rtnl_unlock(); | |
3204 | return -EADDRNOTAVAIL; | |
3205 | } | |
3206 | ||
3207 | static void packet_flush_mclist(struct sock *sk) | |
3208 | { | |
3209 | struct packet_sock *po = pkt_sk(sk); | |
3210 | struct packet_mclist *ml; | |
3211 | ||
3212 | if (!po->mclist) | |
3213 | return; | |
3214 | ||
3215 | rtnl_lock(); | |
3216 | while ((ml = po->mclist) != NULL) { | |
3217 | struct net_device *dev; | |
3218 | ||
3219 | po->mclist = ml->next; | |
ad959e76 ED |
3220 | dev = __dev_get_by_index(sock_net(sk), ml->ifindex); |
3221 | if (dev != NULL) | |
1da177e4 | 3222 | packet_dev_mc(dev, ml, -1); |
1da177e4 LT |
3223 | kfree(ml); |
3224 | } | |
3225 | rtnl_unlock(); | |
3226 | } | |
1da177e4 LT |
3227 | |
3228 | static int | |
b7058842 | 3229 | packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) |
1da177e4 LT |
3230 | { |
3231 | struct sock *sk = sock->sk; | |
8dc41944 | 3232 | struct packet_sock *po = pkt_sk(sk); |
1da177e4 LT |
3233 | int ret; |
3234 | ||
3235 | if (level != SOL_PACKET) | |
3236 | return -ENOPROTOOPT; | |
3237 | ||
69e3c75f | 3238 | switch (optname) { |
1ce4f28b | 3239 | case PACKET_ADD_MEMBERSHIP: |
1da177e4 LT |
3240 | case PACKET_DROP_MEMBERSHIP: |
3241 | { | |
0fb375fb EB |
3242 | struct packet_mreq_max mreq; |
3243 | int len = optlen; | |
3244 | memset(&mreq, 0, sizeof(mreq)); | |
3245 | if (len < sizeof(struct packet_mreq)) | |
1da177e4 | 3246 | return -EINVAL; |
0fb375fb EB |
3247 | if (len > sizeof(mreq)) |
3248 | len = sizeof(mreq); | |
40d4e3df | 3249 | if (copy_from_user(&mreq, optval, len)) |
1da177e4 | 3250 | return -EFAULT; |
0fb375fb EB |
3251 | if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) |
3252 | return -EINVAL; | |
1da177e4 LT |
3253 | if (optname == PACKET_ADD_MEMBERSHIP) |
3254 | ret = packet_mc_add(sk, &mreq); | |
3255 | else | |
3256 | ret = packet_mc_drop(sk, &mreq); | |
3257 | return ret; | |
3258 | } | |
a2efcfa0 | 3259 | |
1da177e4 | 3260 | case PACKET_RX_RING: |
69e3c75f | 3261 | case PACKET_TX_RING: |
1da177e4 | 3262 | { |
f6fb8f10 | 3263 | union tpacket_req_u req_u; |
3264 | int len; | |
1da177e4 | 3265 | |
f6fb8f10 | 3266 | switch (po->tp_version) { |
3267 | case TPACKET_V1: | |
3268 | case TPACKET_V2: | |
3269 | len = sizeof(req_u.req); | |
3270 | break; | |
3271 | case TPACKET_V3: | |
3272 | default: | |
3273 | len = sizeof(req_u.req3); | |
3274 | break; | |
3275 | } | |
3276 | if (optlen < len) | |
1da177e4 | 3277 | return -EINVAL; |
bfd5f4a3 SS |
3278 | if (pkt_sk(sk)->has_vnet_hdr) |
3279 | return -EINVAL; | |
f6fb8f10 | 3280 | if (copy_from_user(&req_u.req, optval, len)) |
1da177e4 | 3281 | return -EFAULT; |
f6fb8f10 | 3282 | return packet_set_ring(sk, &req_u, 0, |
3283 | optname == PACKET_TX_RING); | |
1da177e4 LT |
3284 | } |
3285 | case PACKET_COPY_THRESH: | |
3286 | { | |
3287 | int val; | |
3288 | ||
40d4e3df | 3289 | if (optlen != sizeof(val)) |
1da177e4 | 3290 | return -EINVAL; |
40d4e3df | 3291 | if (copy_from_user(&val, optval, sizeof(val))) |
1da177e4 LT |
3292 | return -EFAULT; |
3293 | ||
3294 | pkt_sk(sk)->copy_thresh = val; | |
3295 | return 0; | |
3296 | } | |
bbd6ef87 PM |
3297 | case PACKET_VERSION: |
3298 | { | |
3299 | int val; | |
3300 | ||
3301 | if (optlen != sizeof(val)) | |
3302 | return -EINVAL; | |
69e3c75f | 3303 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) |
bbd6ef87 PM |
3304 | return -EBUSY; |
3305 | if (copy_from_user(&val, optval, sizeof(val))) | |
3306 | return -EFAULT; | |
3307 | switch (val) { | |
3308 | case TPACKET_V1: | |
3309 | case TPACKET_V2: | |
f6fb8f10 | 3310 | case TPACKET_V3: |
bbd6ef87 PM |
3311 | po->tp_version = val; |
3312 | return 0; | |
3313 | default: | |
3314 | return -EINVAL; | |
3315 | } | |
3316 | } | |
8913336a PM |
3317 | case PACKET_RESERVE: |
3318 | { | |
3319 | unsigned int val; | |
3320 | ||
3321 | if (optlen != sizeof(val)) | |
3322 | return -EINVAL; | |
69e3c75f | 3323 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) |
8913336a PM |
3324 | return -EBUSY; |
3325 | if (copy_from_user(&val, optval, sizeof(val))) | |
3326 | return -EFAULT; | |
3327 | po->tp_reserve = val; | |
3328 | return 0; | |
3329 | } | |
69e3c75f JB |
3330 | case PACKET_LOSS: |
3331 | { | |
3332 | unsigned int val; | |
3333 | ||
3334 | if (optlen != sizeof(val)) | |
3335 | return -EINVAL; | |
3336 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) | |
3337 | return -EBUSY; | |
3338 | if (copy_from_user(&val, optval, sizeof(val))) | |
3339 | return -EFAULT; | |
3340 | po->tp_loss = !!val; | |
3341 | return 0; | |
3342 | } | |
8dc41944 HX |
3343 | case PACKET_AUXDATA: |
3344 | { | |
3345 | int val; | |
3346 | ||
3347 | if (optlen < sizeof(val)) | |
3348 | return -EINVAL; | |
3349 | if (copy_from_user(&val, optval, sizeof(val))) | |
3350 | return -EFAULT; | |
3351 | ||
3352 | po->auxdata = !!val; | |
3353 | return 0; | |
3354 | } | |
80feaacb PWJ |
3355 | case PACKET_ORIGDEV: |
3356 | { | |
3357 | int val; | |
3358 | ||
3359 | if (optlen < sizeof(val)) | |
3360 | return -EINVAL; | |
3361 | if (copy_from_user(&val, optval, sizeof(val))) | |
3362 | return -EFAULT; | |
3363 | ||
3364 | po->origdev = !!val; | |
3365 | return 0; | |
3366 | } | |
bfd5f4a3 SS |
3367 | case PACKET_VNET_HDR: |
3368 | { | |
3369 | int val; | |
3370 | ||
3371 | if (sock->type != SOCK_RAW) | |
3372 | return -EINVAL; | |
3373 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) | |
3374 | return -EBUSY; | |
3375 | if (optlen < sizeof(val)) | |
3376 | return -EINVAL; | |
3377 | if (copy_from_user(&val, optval, sizeof(val))) | |
3378 | return -EFAULT; | |
3379 | ||
3380 | po->has_vnet_hdr = !!val; | |
3381 | return 0; | |
3382 | } | |
614f60fa SM |
3383 | case PACKET_TIMESTAMP: |
3384 | { | |
3385 | int val; | |
3386 | ||
3387 | if (optlen != sizeof(val)) | |
3388 | return -EINVAL; | |
3389 | if (copy_from_user(&val, optval, sizeof(val))) | |
3390 | return -EFAULT; | |
3391 | ||
3392 | po->tp_tstamp = val; | |
3393 | return 0; | |
3394 | } | |
dc99f600 DM |
3395 | case PACKET_FANOUT: |
3396 | { | |
3397 | int val; | |
3398 | ||
3399 | if (optlen != sizeof(val)) | |
3400 | return -EINVAL; | |
3401 | if (copy_from_user(&val, optval, sizeof(val))) | |
3402 | return -EFAULT; | |
3403 | ||
3404 | return fanout_add(sk, val & 0xffff, val >> 16); | |
3405 | } | |
5920cd3a PC |
3406 | case PACKET_TX_HAS_OFF: |
3407 | { | |
3408 | unsigned int val; | |
3409 | ||
3410 | if (optlen != sizeof(val)) | |
3411 | return -EINVAL; | |
3412 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) | |
3413 | return -EBUSY; | |
3414 | if (copy_from_user(&val, optval, sizeof(val))) | |
3415 | return -EFAULT; | |
3416 | po->tp_tx_has_off = !!val; | |
3417 | return 0; | |
3418 | } | |
d346a3fa DB |
3419 | case PACKET_QDISC_BYPASS: |
3420 | { | |
3421 | int val; | |
3422 | ||
3423 | if (optlen != sizeof(val)) | |
3424 | return -EINVAL; | |
3425 | if (copy_from_user(&val, optval, sizeof(val))) | |
3426 | return -EFAULT; | |
3427 | ||
3428 | po->xmit = val ? packet_direct_xmit : dev_queue_xmit; | |
3429 | return 0; | |
3430 | } | |
1da177e4 LT |
3431 | default: |
3432 | return -ENOPROTOOPT; | |
3433 | } | |
3434 | } | |
3435 | ||
3436 | static int packet_getsockopt(struct socket *sock, int level, int optname, | |
3437 | char __user *optval, int __user *optlen) | |
3438 | { | |
3439 | int len; | |
c06fff6e | 3440 | int val, lv = sizeof(val); |
1da177e4 LT |
3441 | struct sock *sk = sock->sk; |
3442 | struct packet_sock *po = pkt_sk(sk); | |
c06fff6e | 3443 | void *data = &val; |
ee80fbf3 | 3444 | union tpacket_stats_u st; |
1da177e4 LT |
3445 | |
3446 | if (level != SOL_PACKET) | |
3447 | return -ENOPROTOOPT; | |
3448 | ||
8ae55f04 KK |
3449 | if (get_user(len, optlen)) |
3450 | return -EFAULT; | |
1da177e4 LT |
3451 | |
3452 | if (len < 0) | |
3453 | return -EINVAL; | |
1ce4f28b | 3454 | |
69e3c75f | 3455 | switch (optname) { |
1da177e4 | 3456 | case PACKET_STATISTICS: |
1da177e4 | 3457 | spin_lock_bh(&sk->sk_receive_queue.lock); |
ee80fbf3 DB |
3458 | memcpy(&st, &po->stats, sizeof(st)); |
3459 | memset(&po->stats, 0, sizeof(po->stats)); | |
3460 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
3461 | ||
f6fb8f10 | 3462 | if (po->tp_version == TPACKET_V3) { |
c06fff6e | 3463 | lv = sizeof(struct tpacket_stats_v3); |
8bcdeaff | 3464 | st.stats3.tp_packets += st.stats3.tp_drops; |
ee80fbf3 | 3465 | data = &st.stats3; |
f6fb8f10 | 3466 | } else { |
c06fff6e | 3467 | lv = sizeof(struct tpacket_stats); |
8bcdeaff | 3468 | st.stats1.tp_packets += st.stats1.tp_drops; |
ee80fbf3 | 3469 | data = &st.stats1; |
f6fb8f10 | 3470 | } |
ee80fbf3 | 3471 | |
8dc41944 HX |
3472 | break; |
3473 | case PACKET_AUXDATA: | |
8dc41944 | 3474 | val = po->auxdata; |
80feaacb PWJ |
3475 | break; |
3476 | case PACKET_ORIGDEV: | |
80feaacb | 3477 | val = po->origdev; |
bfd5f4a3 SS |
3478 | break; |
3479 | case PACKET_VNET_HDR: | |
bfd5f4a3 | 3480 | val = po->has_vnet_hdr; |
1da177e4 | 3481 | break; |
bbd6ef87 | 3482 | case PACKET_VERSION: |
bbd6ef87 | 3483 | val = po->tp_version; |
bbd6ef87 PM |
3484 | break; |
3485 | case PACKET_HDRLEN: | |
3486 | if (len > sizeof(int)) | |
3487 | len = sizeof(int); | |
3488 | if (copy_from_user(&val, optval, len)) | |
3489 | return -EFAULT; | |
3490 | switch (val) { | |
3491 | case TPACKET_V1: | |
3492 | val = sizeof(struct tpacket_hdr); | |
3493 | break; | |
3494 | case TPACKET_V2: | |
3495 | val = sizeof(struct tpacket2_hdr); | |
3496 | break; | |
f6fb8f10 | 3497 | case TPACKET_V3: |
3498 | val = sizeof(struct tpacket3_hdr); | |
3499 | break; | |
bbd6ef87 PM |
3500 | default: |
3501 | return -EINVAL; | |
3502 | } | |
bbd6ef87 | 3503 | break; |
8913336a | 3504 | case PACKET_RESERVE: |
8913336a | 3505 | val = po->tp_reserve; |
8913336a | 3506 | break; |
69e3c75f | 3507 | case PACKET_LOSS: |
69e3c75f | 3508 | val = po->tp_loss; |
69e3c75f | 3509 | break; |
614f60fa | 3510 | case PACKET_TIMESTAMP: |
614f60fa | 3511 | val = po->tp_tstamp; |
614f60fa | 3512 | break; |
dc99f600 | 3513 | case PACKET_FANOUT: |
dc99f600 DM |
3514 | val = (po->fanout ? |
3515 | ((u32)po->fanout->id | | |
77f65ebd WB |
3516 | ((u32)po->fanout->type << 16) | |
3517 | ((u32)po->fanout->flags << 24)) : | |
dc99f600 | 3518 | 0); |
dc99f600 | 3519 | break; |
5920cd3a PC |
3520 | case PACKET_TX_HAS_OFF: |
3521 | val = po->tp_tx_has_off; | |
3522 | break; | |
d346a3fa DB |
3523 | case PACKET_QDISC_BYPASS: |
3524 | val = packet_use_direct_xmit(po); | |
3525 | break; | |
1da177e4 LT |
3526 | default: |
3527 | return -ENOPROTOOPT; | |
3528 | } | |
3529 | ||
c06fff6e ED |
3530 | if (len > lv) |
3531 | len = lv; | |
8ae55f04 KK |
3532 | if (put_user(len, optlen)) |
3533 | return -EFAULT; | |
8dc41944 HX |
3534 | if (copy_to_user(optval, data, len)) |
3535 | return -EFAULT; | |
8ae55f04 | 3536 | return 0; |
1da177e4 LT |
3537 | } |
3538 | ||
3539 | ||
351638e7 JP |
3540 | static int packet_notifier(struct notifier_block *this, |
3541 | unsigned long msg, void *ptr) | |
1da177e4 LT |
3542 | { |
3543 | struct sock *sk; | |
351638e7 | 3544 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
c346dca1 | 3545 | struct net *net = dev_net(dev); |
1da177e4 | 3546 | |
808f5114 | 3547 | rcu_read_lock(); |
b67bfe0d | 3548 | sk_for_each_rcu(sk, &net->packet.sklist) { |
1da177e4 LT |
3549 | struct packet_sock *po = pkt_sk(sk); |
3550 | ||
3551 | switch (msg) { | |
3552 | case NETDEV_UNREGISTER: | |
1da177e4 LT |
3553 | if (po->mclist) |
3554 | packet_dev_mclist(dev, po->mclist, -1); | |
a2efcfa0 DM |
3555 | /* fallthrough */ |
3556 | ||
1da177e4 LT |
3557 | case NETDEV_DOWN: |
3558 | if (dev->ifindex == po->ifindex) { | |
3559 | spin_lock(&po->bind_lock); | |
3560 | if (po->running) { | |
ce06b03e | 3561 | __unregister_prot_hook(sk, false); |
1da177e4 LT |
3562 | sk->sk_err = ENETDOWN; |
3563 | if (!sock_flag(sk, SOCK_DEAD)) | |
3564 | sk->sk_error_report(sk); | |
3565 | } | |
3566 | if (msg == NETDEV_UNREGISTER) { | |
66e56cd4 | 3567 | packet_cached_dev_reset(po); |
1da177e4 | 3568 | po->ifindex = -1; |
160ff18a BG |
3569 | if (po->prot_hook.dev) |
3570 | dev_put(po->prot_hook.dev); | |
1da177e4 LT |
3571 | po->prot_hook.dev = NULL; |
3572 | } | |
3573 | spin_unlock(&po->bind_lock); | |
3574 | } | |
3575 | break; | |
3576 | case NETDEV_UP: | |
808f5114 | 3577 | if (dev->ifindex == po->ifindex) { |
3578 | spin_lock(&po->bind_lock); | |
ce06b03e DM |
3579 | if (po->num) |
3580 | register_prot_hook(sk); | |
808f5114 | 3581 | spin_unlock(&po->bind_lock); |
1da177e4 | 3582 | } |
1da177e4 LT |
3583 | break; |
3584 | } | |
3585 | } | |
808f5114 | 3586 | rcu_read_unlock(); |
1da177e4 LT |
3587 | return NOTIFY_DONE; |
3588 | } | |
3589 | ||
3590 | ||
3591 | static int packet_ioctl(struct socket *sock, unsigned int cmd, | |
3592 | unsigned long arg) | |
3593 | { | |
3594 | struct sock *sk = sock->sk; | |
3595 | ||
69e3c75f | 3596 | switch (cmd) { |
40d4e3df ED |
3597 | case SIOCOUTQ: |
3598 | { | |
3599 | int amount = sk_wmem_alloc_get(sk); | |
31e6d363 | 3600 | |
40d4e3df ED |
3601 | return put_user(amount, (int __user *)arg); |
3602 | } | |
3603 | case SIOCINQ: | |
3604 | { | |
3605 | struct sk_buff *skb; | |
3606 | int amount = 0; | |
3607 | ||
3608 | spin_lock_bh(&sk->sk_receive_queue.lock); | |
3609 | skb = skb_peek(&sk->sk_receive_queue); | |
3610 | if (skb) | |
3611 | amount = skb->len; | |
3612 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
3613 | return put_user(amount, (int __user *)arg); | |
3614 | } | |
3615 | case SIOCGSTAMP: | |
3616 | return sock_get_timestamp(sk, (struct timeval __user *)arg); | |
3617 | case SIOCGSTAMPNS: | |
3618 | return sock_get_timestampns(sk, (struct timespec __user *)arg); | |
1ce4f28b | 3619 | |
1da177e4 | 3620 | #ifdef CONFIG_INET |
40d4e3df ED |
3621 | case SIOCADDRT: |
3622 | case SIOCDELRT: | |
3623 | case SIOCDARP: | |
3624 | case SIOCGARP: | |
3625 | case SIOCSARP: | |
3626 | case SIOCGIFADDR: | |
3627 | case SIOCSIFADDR: | |
3628 | case SIOCGIFBRDADDR: | |
3629 | case SIOCSIFBRDADDR: | |
3630 | case SIOCGIFNETMASK: | |
3631 | case SIOCSIFNETMASK: | |
3632 | case SIOCGIFDSTADDR: | |
3633 | case SIOCSIFDSTADDR: | |
3634 | case SIOCSIFFLAGS: | |
40d4e3df | 3635 | return inet_dgram_ops.ioctl(sock, cmd, arg); |
1da177e4 LT |
3636 | #endif |
3637 | ||
40d4e3df ED |
3638 | default: |
3639 | return -ENOIOCTLCMD; | |
1da177e4 LT |
3640 | } |
3641 | return 0; | |
3642 | } | |
3643 | ||
40d4e3df | 3644 | static unsigned int packet_poll(struct file *file, struct socket *sock, |
1da177e4 LT |
3645 | poll_table *wait) |
3646 | { | |
3647 | struct sock *sk = sock->sk; | |
3648 | struct packet_sock *po = pkt_sk(sk); | |
3649 | unsigned int mask = datagram_poll(file, sock, wait); | |
3650 | ||
3651 | spin_lock_bh(&sk->sk_receive_queue.lock); | |
69e3c75f | 3652 | if (po->rx_ring.pg_vec) { |
f6fb8f10 | 3653 | if (!packet_previous_rx_frame(po, &po->rx_ring, |
3654 | TP_STATUS_KERNEL)) | |
1da177e4 LT |
3655 | mask |= POLLIN | POLLRDNORM; |
3656 | } | |
3657 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
69e3c75f JB |
3658 | spin_lock_bh(&sk->sk_write_queue.lock); |
3659 | if (po->tx_ring.pg_vec) { | |
3660 | if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) | |
3661 | mask |= POLLOUT | POLLWRNORM; | |
3662 | } | |
3663 | spin_unlock_bh(&sk->sk_write_queue.lock); | |
1da177e4 LT |
3664 | return mask; |
3665 | } | |
3666 | ||
3667 | ||
3668 | /* Dirty? Well, I still did not learn better way to account | |
3669 | * for user mmaps. | |
3670 | */ | |
3671 | ||
3672 | static void packet_mm_open(struct vm_area_struct *vma) | |
3673 | { | |
3674 | struct file *file = vma->vm_file; | |
40d4e3df | 3675 | struct socket *sock = file->private_data; |
1da177e4 | 3676 | struct sock *sk = sock->sk; |
1ce4f28b | 3677 | |
1da177e4 LT |
3678 | if (sk) |
3679 | atomic_inc(&pkt_sk(sk)->mapped); | |
3680 | } | |
3681 | ||
3682 | static void packet_mm_close(struct vm_area_struct *vma) | |
3683 | { | |
3684 | struct file *file = vma->vm_file; | |
40d4e3df | 3685 | struct socket *sock = file->private_data; |
1da177e4 | 3686 | struct sock *sk = sock->sk; |
1ce4f28b | 3687 | |
1da177e4 LT |
3688 | if (sk) |
3689 | atomic_dec(&pkt_sk(sk)->mapped); | |
3690 | } | |
3691 | ||
f0f37e2f | 3692 | static const struct vm_operations_struct packet_mmap_ops = { |
40d4e3df ED |
3693 | .open = packet_mm_open, |
3694 | .close = packet_mm_close, | |
1da177e4 LT |
3695 | }; |
3696 | ||
0e3125c7 NH |
3697 | static void free_pg_vec(struct pgv *pg_vec, unsigned int order, |
3698 | unsigned int len) | |
1da177e4 LT |
3699 | { |
3700 | int i; | |
3701 | ||
4ebf0ae2 | 3702 | for (i = 0; i < len; i++) { |
0e3125c7 | 3703 | if (likely(pg_vec[i].buffer)) { |
c56b4d90 | 3704 | if (is_vmalloc_addr(pg_vec[i].buffer)) |
0e3125c7 NH |
3705 | vfree(pg_vec[i].buffer); |
3706 | else | |
3707 | free_pages((unsigned long)pg_vec[i].buffer, | |
3708 | order); | |
3709 | pg_vec[i].buffer = NULL; | |
3710 | } | |
1da177e4 LT |
3711 | } |
3712 | kfree(pg_vec); | |
3713 | } | |
3714 | ||
eea49cc9 | 3715 | static char *alloc_one_pg_vec_page(unsigned long order) |
4ebf0ae2 | 3716 | { |
f0d4eb29 | 3717 | char *buffer; |
0e3125c7 NH |
3718 | gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | |
3719 | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; | |
3720 | ||
3721 | buffer = (char *) __get_free_pages(gfp_flags, order); | |
0e3125c7 NH |
3722 | if (buffer) |
3723 | return buffer; | |
3724 | ||
f0d4eb29 | 3725 | /* __get_free_pages failed, fall back to vmalloc */ |
bbce5a59 | 3726 | buffer = vzalloc((1 << order) * PAGE_SIZE); |
0e3125c7 NH |
3727 | if (buffer) |
3728 | return buffer; | |
3729 | ||
f0d4eb29 | 3730 | /* vmalloc failed, lets dig into swap here */ |
0e3125c7 | 3731 | gfp_flags &= ~__GFP_NORETRY; |
f0d4eb29 | 3732 | buffer = (char *) __get_free_pages(gfp_flags, order); |
0e3125c7 NH |
3733 | if (buffer) |
3734 | return buffer; | |
3735 | ||
f0d4eb29 | 3736 | /* complete and utter failure */ |
0e3125c7 | 3737 | return NULL; |
4ebf0ae2 DM |
3738 | } |
3739 | ||
0e3125c7 | 3740 | static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) |
4ebf0ae2 DM |
3741 | { |
3742 | unsigned int block_nr = req->tp_block_nr; | |
0e3125c7 | 3743 | struct pgv *pg_vec; |
4ebf0ae2 DM |
3744 | int i; |
3745 | ||
0e3125c7 | 3746 | pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); |
4ebf0ae2 DM |
3747 | if (unlikely(!pg_vec)) |
3748 | goto out; | |
3749 | ||
3750 | for (i = 0; i < block_nr; i++) { | |
c56b4d90 | 3751 | pg_vec[i].buffer = alloc_one_pg_vec_page(order); |
0e3125c7 | 3752 | if (unlikely(!pg_vec[i].buffer)) |
4ebf0ae2 DM |
3753 | goto out_free_pgvec; |
3754 | } | |
3755 | ||
3756 | out: | |
3757 | return pg_vec; | |
3758 | ||
3759 | out_free_pgvec: | |
3760 | free_pg_vec(pg_vec, order, block_nr); | |
3761 | pg_vec = NULL; | |
3762 | goto out; | |
3763 | } | |
1da177e4 | 3764 | |
f6fb8f10 | 3765 | static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
69e3c75f | 3766 | int closing, int tx_ring) |
1da177e4 | 3767 | { |
0e3125c7 | 3768 | struct pgv *pg_vec = NULL; |
1da177e4 | 3769 | struct packet_sock *po = pkt_sk(sk); |
0e11c91e | 3770 | int was_running, order = 0; |
69e3c75f JB |
3771 | struct packet_ring_buffer *rb; |
3772 | struct sk_buff_head *rb_queue; | |
0e11c91e | 3773 | __be16 num; |
f6fb8f10 | 3774 | int err = -EINVAL; |
3775 | /* Added to avoid minimal code churn */ | |
3776 | struct tpacket_req *req = &req_u->req; | |
3777 | ||
3778 | /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ | |
3779 | if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { | |
3780 | WARN(1, "Tx-ring is not supported.\n"); | |
3781 | goto out; | |
3782 | } | |
1ce4f28b | 3783 | |
69e3c75f JB |
3784 | rb = tx_ring ? &po->tx_ring : &po->rx_ring; |
3785 | rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; | |
1da177e4 | 3786 | |
69e3c75f JB |
3787 | err = -EBUSY; |
3788 | if (!closing) { | |
3789 | if (atomic_read(&po->mapped)) | |
3790 | goto out; | |
b0138408 | 3791 | if (packet_read_pending(rb)) |
69e3c75f JB |
3792 | goto out; |
3793 | } | |
1da177e4 | 3794 | |
69e3c75f JB |
3795 | if (req->tp_block_nr) { |
3796 | /* Sanity tests and some calculations */ | |
3797 | err = -EBUSY; | |
3798 | if (unlikely(rb->pg_vec)) | |
3799 | goto out; | |
1da177e4 | 3800 | |
bbd6ef87 PM |
3801 | switch (po->tp_version) { |
3802 | case TPACKET_V1: | |
3803 | po->tp_hdrlen = TPACKET_HDRLEN; | |
3804 | break; | |
3805 | case TPACKET_V2: | |
3806 | po->tp_hdrlen = TPACKET2_HDRLEN; | |
3807 | break; | |
f6fb8f10 | 3808 | case TPACKET_V3: |
3809 | po->tp_hdrlen = TPACKET3_HDRLEN; | |
3810 | break; | |
bbd6ef87 PM |
3811 | } |
3812 | ||
69e3c75f | 3813 | err = -EINVAL; |
4ebf0ae2 | 3814 | if (unlikely((int)req->tp_block_size <= 0)) |
69e3c75f | 3815 | goto out; |
4ebf0ae2 | 3816 | if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) |
69e3c75f | 3817 | goto out; |
dc808110 ED |
3818 | if (po->tp_version >= TPACKET_V3 && |
3819 | (int)(req->tp_block_size - | |
3820 | BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) | |
3821 | goto out; | |
8913336a | 3822 | if (unlikely(req->tp_frame_size < po->tp_hdrlen + |
69e3c75f JB |
3823 | po->tp_reserve)) |
3824 | goto out; | |
4ebf0ae2 | 3825 | if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) |
69e3c75f | 3826 | goto out; |
1da177e4 | 3827 | |
69e3c75f JB |
3828 | rb->frames_per_block = req->tp_block_size/req->tp_frame_size; |
3829 | if (unlikely(rb->frames_per_block <= 0)) | |
3830 | goto out; | |
3831 | if (unlikely((rb->frames_per_block * req->tp_block_nr) != | |
3832 | req->tp_frame_nr)) | |
3833 | goto out; | |
1da177e4 LT |
3834 | |
3835 | err = -ENOMEM; | |
4ebf0ae2 DM |
3836 | order = get_order(req->tp_block_size); |
3837 | pg_vec = alloc_pg_vec(req, order); | |
3838 | if (unlikely(!pg_vec)) | |
1da177e4 | 3839 | goto out; |
f6fb8f10 | 3840 | switch (po->tp_version) { |
3841 | case TPACKET_V3: | |
3842 | /* Transmit path is not supported. We checked | |
3843 | * it above but just being paranoid | |
3844 | */ | |
3845 | if (!tx_ring) | |
3846 | init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); | |
d7cf0c34 | 3847 | break; |
f6fb8f10 | 3848 | default: |
3849 | break; | |
3850 | } | |
69e3c75f JB |
3851 | } |
3852 | /* Done */ | |
3853 | else { | |
3854 | err = -EINVAL; | |
4ebf0ae2 | 3855 | if (unlikely(req->tp_frame_nr)) |
69e3c75f | 3856 | goto out; |
1da177e4 LT |
3857 | } |
3858 | ||
3859 | lock_sock(sk); | |
3860 | ||
3861 | /* Detach socket from network */ | |
3862 | spin_lock(&po->bind_lock); | |
3863 | was_running = po->running; | |
3864 | num = po->num; | |
3865 | if (was_running) { | |
1da177e4 | 3866 | po->num = 0; |
ce06b03e | 3867 | __unregister_prot_hook(sk, false); |
1da177e4 LT |
3868 | } |
3869 | spin_unlock(&po->bind_lock); | |
1ce4f28b | 3870 | |
1da177e4 LT |
3871 | synchronize_net(); |
3872 | ||
3873 | err = -EBUSY; | |
905db440 | 3874 | mutex_lock(&po->pg_vec_lock); |
1da177e4 LT |
3875 | if (closing || atomic_read(&po->mapped) == 0) { |
3876 | err = 0; | |
69e3c75f | 3877 | spin_lock_bh(&rb_queue->lock); |
c053fd96 | 3878 | swap(rb->pg_vec, pg_vec); |
69e3c75f JB |
3879 | rb->frame_max = (req->tp_frame_nr - 1); |
3880 | rb->head = 0; | |
3881 | rb->frame_size = req->tp_frame_size; | |
3882 | spin_unlock_bh(&rb_queue->lock); | |
3883 | ||
c053fd96 CG |
3884 | swap(rb->pg_vec_order, order); |
3885 | swap(rb->pg_vec_len, req->tp_block_nr); | |
69e3c75f JB |
3886 | |
3887 | rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; | |
3888 | po->prot_hook.func = (po->rx_ring.pg_vec) ? | |
3889 | tpacket_rcv : packet_rcv; | |
3890 | skb_queue_purge(rb_queue); | |
1da177e4 | 3891 | if (atomic_read(&po->mapped)) |
40d4e3df ED |
3892 | pr_err("packet_mmap: vma is busy: %d\n", |
3893 | atomic_read(&po->mapped)); | |
1da177e4 | 3894 | } |
905db440 | 3895 | mutex_unlock(&po->pg_vec_lock); |
1da177e4 LT |
3896 | |
3897 | spin_lock(&po->bind_lock); | |
ce06b03e | 3898 | if (was_running) { |
1da177e4 | 3899 | po->num = num; |
ce06b03e | 3900 | register_prot_hook(sk); |
1da177e4 LT |
3901 | } |
3902 | spin_unlock(&po->bind_lock); | |
f6fb8f10 | 3903 | if (closing && (po->tp_version > TPACKET_V2)) { |
3904 | /* Because we don't support block-based V3 on tx-ring */ | |
3905 | if (!tx_ring) | |
3906 | prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue); | |
3907 | } | |
1da177e4 LT |
3908 | release_sock(sk); |
3909 | ||
1da177e4 LT |
3910 | if (pg_vec) |
3911 | free_pg_vec(pg_vec, order, req->tp_block_nr); | |
3912 | out: | |
3913 | return err; | |
3914 | } | |
3915 | ||
69e3c75f JB |
3916 | static int packet_mmap(struct file *file, struct socket *sock, |
3917 | struct vm_area_struct *vma) | |
1da177e4 LT |
3918 | { |
3919 | struct sock *sk = sock->sk; | |
3920 | struct packet_sock *po = pkt_sk(sk); | |
69e3c75f JB |
3921 | unsigned long size, expected_size; |
3922 | struct packet_ring_buffer *rb; | |
1da177e4 LT |
3923 | unsigned long start; |
3924 | int err = -EINVAL; | |
3925 | int i; | |
3926 | ||
3927 | if (vma->vm_pgoff) | |
3928 | return -EINVAL; | |
3929 | ||
905db440 | 3930 | mutex_lock(&po->pg_vec_lock); |
69e3c75f JB |
3931 | |
3932 | expected_size = 0; | |
3933 | for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { | |
3934 | if (rb->pg_vec) { | |
3935 | expected_size += rb->pg_vec_len | |
3936 | * rb->pg_vec_pages | |
3937 | * PAGE_SIZE; | |
3938 | } | |
3939 | } | |
3940 | ||
3941 | if (expected_size == 0) | |
1da177e4 | 3942 | goto out; |
69e3c75f JB |
3943 | |
3944 | size = vma->vm_end - vma->vm_start; | |
3945 | if (size != expected_size) | |
1da177e4 LT |
3946 | goto out; |
3947 | ||
1da177e4 | 3948 | start = vma->vm_start; |
69e3c75f JB |
3949 | for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { |
3950 | if (rb->pg_vec == NULL) | |
3951 | continue; | |
3952 | ||
3953 | for (i = 0; i < rb->pg_vec_len; i++) { | |
0e3125c7 NH |
3954 | struct page *page; |
3955 | void *kaddr = rb->pg_vec[i].buffer; | |
69e3c75f JB |
3956 | int pg_num; |
3957 | ||
c56b4d90 CG |
3958 | for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { |
3959 | page = pgv_to_page(kaddr); | |
69e3c75f JB |
3960 | err = vm_insert_page(vma, start, page); |
3961 | if (unlikely(err)) | |
3962 | goto out; | |
3963 | start += PAGE_SIZE; | |
0e3125c7 | 3964 | kaddr += PAGE_SIZE; |
69e3c75f | 3965 | } |
4ebf0ae2 | 3966 | } |
1da177e4 | 3967 | } |
69e3c75f | 3968 | |
4ebf0ae2 | 3969 | atomic_inc(&po->mapped); |
1da177e4 LT |
3970 | vma->vm_ops = &packet_mmap_ops; |
3971 | err = 0; | |
3972 | ||
3973 | out: | |
905db440 | 3974 | mutex_unlock(&po->pg_vec_lock); |
1da177e4 LT |
3975 | return err; |
3976 | } | |
1da177e4 | 3977 | |
90ddc4f0 | 3978 | static const struct proto_ops packet_ops_spkt = { |
1da177e4 LT |
3979 | .family = PF_PACKET, |
3980 | .owner = THIS_MODULE, | |
3981 | .release = packet_release, | |
3982 | .bind = packet_bind_spkt, | |
3983 | .connect = sock_no_connect, | |
3984 | .socketpair = sock_no_socketpair, | |
3985 | .accept = sock_no_accept, | |
3986 | .getname = packet_getname_spkt, | |
3987 | .poll = datagram_poll, | |
3988 | .ioctl = packet_ioctl, | |
3989 | .listen = sock_no_listen, | |
3990 | .shutdown = sock_no_shutdown, | |
3991 | .setsockopt = sock_no_setsockopt, | |
3992 | .getsockopt = sock_no_getsockopt, | |
3993 | .sendmsg = packet_sendmsg_spkt, | |
3994 | .recvmsg = packet_recvmsg, | |
3995 | .mmap = sock_no_mmap, | |
3996 | .sendpage = sock_no_sendpage, | |
3997 | }; | |
1da177e4 | 3998 | |
90ddc4f0 | 3999 | static const struct proto_ops packet_ops = { |
1da177e4 LT |
4000 | .family = PF_PACKET, |
4001 | .owner = THIS_MODULE, | |
4002 | .release = packet_release, | |
4003 | .bind = packet_bind, | |
4004 | .connect = sock_no_connect, | |
4005 | .socketpair = sock_no_socketpair, | |
4006 | .accept = sock_no_accept, | |
1ce4f28b | 4007 | .getname = packet_getname, |
1da177e4 LT |
4008 | .poll = packet_poll, |
4009 | .ioctl = packet_ioctl, | |
4010 | .listen = sock_no_listen, | |
4011 | .shutdown = sock_no_shutdown, | |
4012 | .setsockopt = packet_setsockopt, | |
4013 | .getsockopt = packet_getsockopt, | |
4014 | .sendmsg = packet_sendmsg, | |
4015 | .recvmsg = packet_recvmsg, | |
4016 | .mmap = packet_mmap, | |
4017 | .sendpage = sock_no_sendpage, | |
4018 | }; | |
4019 | ||
ec1b4cf7 | 4020 | static const struct net_proto_family packet_family_ops = { |
1da177e4 LT |
4021 | .family = PF_PACKET, |
4022 | .create = packet_create, | |
4023 | .owner = THIS_MODULE, | |
4024 | }; | |
4025 | ||
4026 | static struct notifier_block packet_netdev_notifier = { | |
40d4e3df | 4027 | .notifier_call = packet_notifier, |
1da177e4 LT |
4028 | }; |
4029 | ||
4030 | #ifdef CONFIG_PROC_FS | |
1da177e4 LT |
4031 | |
4032 | static void *packet_seq_start(struct seq_file *seq, loff_t *pos) | |
808f5114 | 4033 | __acquires(RCU) |
1da177e4 | 4034 | { |
e372c414 | 4035 | struct net *net = seq_file_net(seq); |
808f5114 | 4036 | |
4037 | rcu_read_lock(); | |
4038 | return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); | |
1da177e4 LT |
4039 | } |
4040 | ||
4041 | static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
4042 | { | |
1bf40954 | 4043 | struct net *net = seq_file_net(seq); |
808f5114 | 4044 | return seq_hlist_next_rcu(v, &net->packet.sklist, pos); |
1da177e4 LT |
4045 | } |
4046 | ||
4047 | static void packet_seq_stop(struct seq_file *seq, void *v) | |
808f5114 | 4048 | __releases(RCU) |
1da177e4 | 4049 | { |
808f5114 | 4050 | rcu_read_unlock(); |
1da177e4 LT |
4051 | } |
4052 | ||
1ce4f28b | 4053 | static int packet_seq_show(struct seq_file *seq, void *v) |
1da177e4 LT |
4054 | { |
4055 | if (v == SEQ_START_TOKEN) | |
4056 | seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); | |
4057 | else { | |
b7ceabd9 | 4058 | struct sock *s = sk_entry(v); |
1da177e4 LT |
4059 | const struct packet_sock *po = pkt_sk(s); |
4060 | ||
4061 | seq_printf(seq, | |
71338aa7 | 4062 | "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", |
1da177e4 LT |
4063 | s, |
4064 | atomic_read(&s->sk_refcnt), | |
4065 | s->sk_type, | |
4066 | ntohs(po->num), | |
4067 | po->ifindex, | |
4068 | po->running, | |
4069 | atomic_read(&s->sk_rmem_alloc), | |
a7cb5a49 | 4070 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), |
40d4e3df | 4071 | sock_i_ino(s)); |
1da177e4 LT |
4072 | } |
4073 | ||
4074 | return 0; | |
4075 | } | |
4076 | ||
56b3d975 | 4077 | static const struct seq_operations packet_seq_ops = { |
1da177e4 LT |
4078 | .start = packet_seq_start, |
4079 | .next = packet_seq_next, | |
4080 | .stop = packet_seq_stop, | |
4081 | .show = packet_seq_show, | |
4082 | }; | |
4083 | ||
4084 | static int packet_seq_open(struct inode *inode, struct file *file) | |
4085 | { | |
e372c414 DL |
4086 | return seq_open_net(inode, file, &packet_seq_ops, |
4087 | sizeof(struct seq_net_private)); | |
1da177e4 LT |
4088 | } |
4089 | ||
da7071d7 | 4090 | static const struct file_operations packet_seq_fops = { |
1da177e4 LT |
4091 | .owner = THIS_MODULE, |
4092 | .open = packet_seq_open, | |
4093 | .read = seq_read, | |
4094 | .llseek = seq_lseek, | |
e372c414 | 4095 | .release = seq_release_net, |
1da177e4 LT |
4096 | }; |
4097 | ||
4098 | #endif | |
4099 | ||
2c8c1e72 | 4100 | static int __net_init packet_net_init(struct net *net) |
d12d01d6 | 4101 | { |
0fa7fa98 | 4102 | mutex_init(&net->packet.sklist_lock); |
2aaef4e4 | 4103 | INIT_HLIST_HEAD(&net->packet.sklist); |
d12d01d6 | 4104 | |
d4beaa66 | 4105 | if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops)) |
d12d01d6 DL |
4106 | return -ENOMEM; |
4107 | ||
4108 | return 0; | |
4109 | } | |
4110 | ||
2c8c1e72 | 4111 | static void __net_exit packet_net_exit(struct net *net) |
d12d01d6 | 4112 | { |
ece31ffd | 4113 | remove_proc_entry("packet", net->proc_net); |
d12d01d6 DL |
4114 | } |
4115 | ||
4116 | static struct pernet_operations packet_net_ops = { | |
4117 | .init = packet_net_init, | |
4118 | .exit = packet_net_exit, | |
4119 | }; | |
4120 | ||
4121 | ||
1da177e4 LT |
4122 | static void __exit packet_exit(void) |
4123 | { | |
1da177e4 | 4124 | unregister_netdevice_notifier(&packet_netdev_notifier); |
d12d01d6 | 4125 | unregister_pernet_subsys(&packet_net_ops); |
1da177e4 LT |
4126 | sock_unregister(PF_PACKET); |
4127 | proto_unregister(&packet_proto); | |
4128 | } | |
4129 | ||
4130 | static int __init packet_init(void) | |
4131 | { | |
4132 | int rc = proto_register(&packet_proto, 0); | |
4133 | ||
4134 | if (rc != 0) | |
4135 | goto out; | |
4136 | ||
4137 | sock_register(&packet_family_ops); | |
d12d01d6 | 4138 | register_pernet_subsys(&packet_net_ops); |
1da177e4 | 4139 | register_netdevice_notifier(&packet_netdev_notifier); |
1da177e4 LT |
4140 | out: |
4141 | return rc; | |
4142 | } | |
4143 | ||
4144 | module_init(packet_init); | |
4145 | module_exit(packet_exit); | |
4146 | MODULE_LICENSE("GPL"); | |
4147 | MODULE_ALIAS_NETPROTO(PF_PACKET); |