]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * PACKET - implements raw packet sockets. | |
7 | * | |
02c30a84 | 8 | * Authors: Ross Biro |
1da177e4 LT |
9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
10 | * Alan Cox, <gw4pts@gw4pts.ampr.org> | |
11 | * | |
1ce4f28b | 12 | * Fixes: |
1da177e4 LT |
13 | * Alan Cox : verify_area() now used correctly |
14 | * Alan Cox : new skbuff lists, look ma no backlogs! | |
15 | * Alan Cox : tidied skbuff lists. | |
16 | * Alan Cox : Now uses generic datagram routines I | |
17 | * added. Also fixed the peek/read crash | |
18 | * from all old Linux datagram code. | |
19 | * Alan Cox : Uses the improved datagram code. | |
20 | * Alan Cox : Added NULL's for socket options. | |
21 | * Alan Cox : Re-commented the code. | |
22 | * Alan Cox : Use new kernel side addressing | |
23 | * Rob Janssen : Correct MTU usage. | |
24 | * Dave Platt : Counter leaks caused by incorrect | |
25 | * interrupt locking and some slightly | |
26 | * dubious gcc output. Can you read | |
27 | * compiler: it said _VOLATILE_ | |
28 | * Richard Kooijman : Timestamp fixes. | |
29 | * Alan Cox : New buffers. Use sk->mac.raw. | |
30 | * Alan Cox : sendmsg/recvmsg support. | |
31 | * Alan Cox : Protocol setting support | |
32 | * Alexey Kuznetsov : Untied from IPv4 stack. | |
33 | * Cyrus Durgin : Fixed kerneld for kmod. | |
34 | * Michal Ostrowski : Module initialization cleanup. | |
1ce4f28b | 35 | * Ulises Alonso : Frame number limit removal and |
1da177e4 | 36 | * packet_set_ring memory leak. |
0fb375fb EB |
37 | * Eric Biederman : Allow for > 8 byte hardware addresses. |
38 | * The convention is that longer addresses | |
39 | * will simply extend the hardware address | |
1ce4f28b | 40 | * byte arrays at the end of sockaddr_ll |
0fb375fb | 41 | * and packet_mreq. |
69e3c75f | 42 | * Johann Baudy : Added TX RING. |
f6fb8f10 | 43 | * Chetan Loke : Implemented TPACKET_V3 block abstraction |
44 | * layer. | |
45 | * Copyright (C) 2011, <lokec@ccs.neu.edu> | |
46 | * | |
1da177e4 LT |
47 | * |
48 | * This program is free software; you can redistribute it and/or | |
49 | * modify it under the terms of the GNU General Public License | |
50 | * as published by the Free Software Foundation; either version | |
51 | * 2 of the License, or (at your option) any later version. | |
52 | * | |
53 | */ | |
1ce4f28b | 54 | |
1da177e4 | 55 | #include <linux/types.h> |
1da177e4 | 56 | #include <linux/mm.h> |
4fc268d2 | 57 | #include <linux/capability.h> |
1da177e4 LT |
58 | #include <linux/fcntl.h> |
59 | #include <linux/socket.h> | |
60 | #include <linux/in.h> | |
61 | #include <linux/inet.h> | |
62 | #include <linux/netdevice.h> | |
63 | #include <linux/if_packet.h> | |
64 | #include <linux/wireless.h> | |
ffbc6111 | 65 | #include <linux/kernel.h> |
1da177e4 | 66 | #include <linux/kmod.h> |
5a0e3ad6 | 67 | #include <linux/slab.h> |
0e3125c7 | 68 | #include <linux/vmalloc.h> |
457c4cbc | 69 | #include <net/net_namespace.h> |
1da177e4 LT |
70 | #include <net/ip.h> |
71 | #include <net/protocol.h> | |
72 | #include <linux/skbuff.h> | |
73 | #include <net/sock.h> | |
74 | #include <linux/errno.h> | |
75 | #include <linux/timer.h> | |
1da177e4 LT |
76 | #include <asm/uaccess.h> |
77 | #include <asm/ioctls.h> | |
78 | #include <asm/page.h> | |
a1f8e7f7 | 79 | #include <asm/cacheflush.h> |
1da177e4 LT |
80 | #include <asm/io.h> |
81 | #include <linux/proc_fs.h> | |
82 | #include <linux/seq_file.h> | |
83 | #include <linux/poll.h> | |
84 | #include <linux/module.h> | |
85 | #include <linux/init.h> | |
905db440 | 86 | #include <linux/mutex.h> |
05423b24 | 87 | #include <linux/if_vlan.h> |
bfd5f4a3 | 88 | #include <linux/virtio_net.h> |
ed85b565 | 89 | #include <linux/errqueue.h> |
614f60fa | 90 | #include <linux/net_tstamp.h> |
1da177e4 LT |
91 | |
92 | #ifdef CONFIG_INET | |
93 | #include <net/inet_common.h> | |
94 | #endif | |
95 | ||
2787b04b PE |
96 | #include "internal.h" |
97 | ||
1da177e4 LT |
98 | /* |
99 | Assumptions: | |
100 | - if device has no dev->hard_header routine, it adds and removes ll header | |
101 | inside itself. In this case ll header is invisible outside of device, | |
102 | but higher levels still should reserve dev->hard_header_len. | |
103 | Some devices are enough clever to reallocate skb, when header | |
104 | will not fit to reserved space (tunnel), another ones are silly | |
105 | (PPP). | |
106 | - packet socket receives packets with pulled ll header, | |
107 | so that SOCK_RAW should push it back. | |
108 | ||
109 | On receive: | |
110 | ----------- | |
111 | ||
112 | Incoming, dev->hard_header!=NULL | |
b0e380b1 ACM |
113 | mac_header -> ll header |
114 | data -> data | |
1da177e4 LT |
115 | |
116 | Outgoing, dev->hard_header!=NULL | |
b0e380b1 ACM |
117 | mac_header -> ll header |
118 | data -> ll header | |
1da177e4 LT |
119 | |
120 | Incoming, dev->hard_header==NULL | |
b0e380b1 ACM |
121 | mac_header -> UNKNOWN position. It is very likely, that it points to ll |
122 | header. PPP makes it, that is wrong, because introduce | |
db0c58f9 | 123 | assymetry between rx and tx paths. |
b0e380b1 | 124 | data -> data |
1da177e4 LT |
125 | |
126 | Outgoing, dev->hard_header==NULL | |
b0e380b1 ACM |
127 | mac_header -> data. ll header is still not built! |
128 | data -> data | |
1da177e4 LT |
129 | |
130 | Resume | |
131 | If dev->hard_header==NULL we are unlikely to restore sensible ll header. | |
132 | ||
133 | ||
134 | On transmit: | |
135 | ------------ | |
136 | ||
137 | dev->hard_header != NULL | |
b0e380b1 ACM |
138 | mac_header -> ll header |
139 | data -> ll header | |
1da177e4 LT |
140 | |
141 | dev->hard_header == NULL (ll header is added by device, we cannot control it) | |
b0e380b1 ACM |
142 | mac_header -> data |
143 | data -> data | |
1da177e4 LT |
144 | |
145 | We should set nh.raw on output to correct posistion, | |
146 | packet classifier depends on it. | |
147 | */ | |
148 | ||
1da177e4 LT |
149 | /* Private packet socket structures. */ |
150 | ||
0fb375fb EB |
151 | /* identical to struct packet_mreq except it has |
152 | * a longer address field. | |
153 | */ | |
40d4e3df | 154 | struct packet_mreq_max { |
0fb375fb EB |
155 | int mr_ifindex; |
156 | unsigned short mr_type; | |
157 | unsigned short mr_alen; | |
158 | unsigned char mr_address[MAX_ADDR_LEN]; | |
1da177e4 | 159 | }; |
a2efcfa0 | 160 | |
f6fb8f10 | 161 | static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
69e3c75f JB |
162 | int closing, int tx_ring); |
163 | ||
f6fb8f10 | 164 | |
165 | #define V3_ALIGNMENT (8) | |
166 | ||
bc59ba39 | 167 | #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) |
f6fb8f10 | 168 | |
169 | #define BLK_PLUS_PRIV(sz_of_priv) \ | |
170 | (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) | |
171 | ||
f6fb8f10 | 172 | #define PGV_FROM_VMALLOC 1 |
69e3c75f | 173 | |
f6fb8f10 | 174 | #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) |
175 | #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) | |
176 | #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) | |
177 | #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) | |
178 | #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) | |
179 | #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) | |
180 | #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x))) | |
181 | ||
69e3c75f JB |
182 | struct packet_sock; |
183 | static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); | |
77f65ebd WB |
184 | static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, |
185 | struct packet_type *pt, struct net_device *orig_dev); | |
1da177e4 | 186 | |
f6fb8f10 | 187 | static void *packet_previous_frame(struct packet_sock *po, |
188 | struct packet_ring_buffer *rb, | |
189 | int status); | |
190 | static void packet_increment_head(struct packet_ring_buffer *buff); | |
bc59ba39 | 191 | static int prb_curr_blk_in_use(struct tpacket_kbdq_core *, |
192 | struct tpacket_block_desc *); | |
193 | static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, | |
f6fb8f10 | 194 | struct packet_sock *); |
bc59ba39 | 195 | static void prb_retire_current_block(struct tpacket_kbdq_core *, |
f6fb8f10 | 196 | struct packet_sock *, unsigned int status); |
bc59ba39 | 197 | static int prb_queue_frozen(struct tpacket_kbdq_core *); |
198 | static void prb_open_block(struct tpacket_kbdq_core *, | |
199 | struct tpacket_block_desc *); | |
f6fb8f10 | 200 | static void prb_retire_rx_blk_timer_expired(unsigned long); |
bc59ba39 | 201 | static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); |
202 | static void prb_init_blk_timer(struct packet_sock *, | |
203 | struct tpacket_kbdq_core *, | |
204 | void (*func) (unsigned long)); | |
205 | static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); | |
206 | static void prb_clear_rxhash(struct tpacket_kbdq_core *, | |
207 | struct tpacket3_hdr *); | |
208 | static void prb_fill_vlan_info(struct tpacket_kbdq_core *, | |
209 | struct tpacket3_hdr *); | |
1da177e4 LT |
210 | static void packet_flush_mclist(struct sock *sk); |
211 | ||
ffbc6111 HX |
212 | struct packet_skb_cb { |
213 | unsigned int origlen; | |
214 | union { | |
215 | struct sockaddr_pkt pkt; | |
216 | struct sockaddr_ll ll; | |
217 | } sa; | |
218 | }; | |
219 | ||
220 | #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) | |
8dc41944 | 221 | |
bc59ba39 | 222 | #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) |
f6fb8f10 | 223 | #define GET_PBLOCK_DESC(x, bid) \ |
bc59ba39 | 224 | ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) |
f6fb8f10 | 225 | #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ |
bc59ba39 | 226 | ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) |
f6fb8f10 | 227 | #define GET_NEXT_PRB_BLK_NUM(x) \ |
228 | (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ | |
229 | ((x)->kactive_blk_num+1) : 0) | |
230 | ||
dc99f600 DM |
231 | static void __fanout_unlink(struct sock *sk, struct packet_sock *po); |
232 | static void __fanout_link(struct sock *sk, struct packet_sock *po); | |
233 | ||
ce06b03e DM |
234 | /* register_prot_hook must be invoked with the po->bind_lock held, |
235 | * or from a context in which asynchronous accesses to the packet | |
236 | * socket is not possible (packet_create()). | |
237 | */ | |
238 | static void register_prot_hook(struct sock *sk) | |
239 | { | |
240 | struct packet_sock *po = pkt_sk(sk); | |
241 | if (!po->running) { | |
dc99f600 DM |
242 | if (po->fanout) |
243 | __fanout_link(sk, po); | |
244 | else | |
245 | dev_add_pack(&po->prot_hook); | |
ce06b03e DM |
246 | sock_hold(sk); |
247 | po->running = 1; | |
248 | } | |
249 | } | |
250 | ||
251 | /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock | |
252 | * held. If the sync parameter is true, we will temporarily drop | |
253 | * the po->bind_lock and do a synchronize_net to make sure no | |
254 | * asynchronous packet processing paths still refer to the elements | |
255 | * of po->prot_hook. If the sync parameter is false, it is the | |
256 | * callers responsibility to take care of this. | |
257 | */ | |
258 | static void __unregister_prot_hook(struct sock *sk, bool sync) | |
259 | { | |
260 | struct packet_sock *po = pkt_sk(sk); | |
261 | ||
262 | po->running = 0; | |
dc99f600 DM |
263 | if (po->fanout) |
264 | __fanout_unlink(sk, po); | |
265 | else | |
266 | __dev_remove_pack(&po->prot_hook); | |
ce06b03e DM |
267 | __sock_put(sk); |
268 | ||
269 | if (sync) { | |
270 | spin_unlock(&po->bind_lock); | |
271 | synchronize_net(); | |
272 | spin_lock(&po->bind_lock); | |
273 | } | |
274 | } | |
275 | ||
276 | static void unregister_prot_hook(struct sock *sk, bool sync) | |
277 | { | |
278 | struct packet_sock *po = pkt_sk(sk); | |
279 | ||
280 | if (po->running) | |
281 | __unregister_prot_hook(sk, sync); | |
282 | } | |
283 | ||
f6dafa95 | 284 | static inline __pure struct page *pgv_to_page(void *addr) |
0af55bb5 CG |
285 | { |
286 | if (is_vmalloc_addr(addr)) | |
287 | return vmalloc_to_page(addr); | |
288 | return virt_to_page(addr); | |
289 | } | |
290 | ||
69e3c75f | 291 | static void __packet_set_status(struct packet_sock *po, void *frame, int status) |
1da177e4 | 292 | { |
bbd6ef87 PM |
293 | union { |
294 | struct tpacket_hdr *h1; | |
295 | struct tpacket2_hdr *h2; | |
296 | void *raw; | |
297 | } h; | |
1da177e4 | 298 | |
69e3c75f | 299 | h.raw = frame; |
bbd6ef87 PM |
300 | switch (po->tp_version) { |
301 | case TPACKET_V1: | |
69e3c75f | 302 | h.h1->tp_status = status; |
0af55bb5 | 303 | flush_dcache_page(pgv_to_page(&h.h1->tp_status)); |
bbd6ef87 PM |
304 | break; |
305 | case TPACKET_V2: | |
69e3c75f | 306 | h.h2->tp_status = status; |
0af55bb5 | 307 | flush_dcache_page(pgv_to_page(&h.h2->tp_status)); |
bbd6ef87 | 308 | break; |
f6fb8f10 | 309 | case TPACKET_V3: |
69e3c75f | 310 | default: |
f6fb8f10 | 311 | WARN(1, "TPACKET version not supported.\n"); |
69e3c75f | 312 | BUG(); |
bbd6ef87 | 313 | } |
69e3c75f JB |
314 | |
315 | smp_wmb(); | |
bbd6ef87 PM |
316 | } |
317 | ||
69e3c75f | 318 | static int __packet_get_status(struct packet_sock *po, void *frame) |
bbd6ef87 PM |
319 | { |
320 | union { | |
321 | struct tpacket_hdr *h1; | |
322 | struct tpacket2_hdr *h2; | |
323 | void *raw; | |
324 | } h; | |
325 | ||
69e3c75f JB |
326 | smp_rmb(); |
327 | ||
bbd6ef87 PM |
328 | h.raw = frame; |
329 | switch (po->tp_version) { | |
330 | case TPACKET_V1: | |
0af55bb5 | 331 | flush_dcache_page(pgv_to_page(&h.h1->tp_status)); |
69e3c75f | 332 | return h.h1->tp_status; |
bbd6ef87 | 333 | case TPACKET_V2: |
0af55bb5 | 334 | flush_dcache_page(pgv_to_page(&h.h2->tp_status)); |
69e3c75f | 335 | return h.h2->tp_status; |
f6fb8f10 | 336 | case TPACKET_V3: |
69e3c75f | 337 | default: |
f6fb8f10 | 338 | WARN(1, "TPACKET version not supported.\n"); |
69e3c75f JB |
339 | BUG(); |
340 | return 0; | |
bbd6ef87 | 341 | } |
1da177e4 | 342 | } |
69e3c75f JB |
343 | |
344 | static void *packet_lookup_frame(struct packet_sock *po, | |
345 | struct packet_ring_buffer *rb, | |
346 | unsigned int position, | |
347 | int status) | |
348 | { | |
349 | unsigned int pg_vec_pos, frame_offset; | |
350 | union { | |
351 | struct tpacket_hdr *h1; | |
352 | struct tpacket2_hdr *h2; | |
353 | void *raw; | |
354 | } h; | |
355 | ||
356 | pg_vec_pos = position / rb->frames_per_block; | |
357 | frame_offset = position % rb->frames_per_block; | |
358 | ||
0e3125c7 NH |
359 | h.raw = rb->pg_vec[pg_vec_pos].buffer + |
360 | (frame_offset * rb->frame_size); | |
69e3c75f JB |
361 | |
362 | if (status != __packet_get_status(po, h.raw)) | |
363 | return NULL; | |
364 | ||
365 | return h.raw; | |
366 | } | |
367 | ||
eea49cc9 | 368 | static void *packet_current_frame(struct packet_sock *po, |
69e3c75f JB |
369 | struct packet_ring_buffer *rb, |
370 | int status) | |
371 | { | |
372 | return packet_lookup_frame(po, rb, rb->head, status); | |
373 | } | |
374 | ||
bc59ba39 | 375 | static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 376 | { |
377 | del_timer_sync(&pkc->retire_blk_timer); | |
378 | } | |
379 | ||
380 | static void prb_shutdown_retire_blk_timer(struct packet_sock *po, | |
381 | int tx_ring, | |
382 | struct sk_buff_head *rb_queue) | |
383 | { | |
bc59ba39 | 384 | struct tpacket_kbdq_core *pkc; |
f6fb8f10 | 385 | |
386 | pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc; | |
387 | ||
388 | spin_lock(&rb_queue->lock); | |
389 | pkc->delete_blk_timer = 1; | |
390 | spin_unlock(&rb_queue->lock); | |
391 | ||
392 | prb_del_retire_blk_timer(pkc); | |
393 | } | |
394 | ||
395 | static void prb_init_blk_timer(struct packet_sock *po, | |
bc59ba39 | 396 | struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 397 | void (*func) (unsigned long)) |
398 | { | |
399 | init_timer(&pkc->retire_blk_timer); | |
400 | pkc->retire_blk_timer.data = (long)po; | |
401 | pkc->retire_blk_timer.function = func; | |
402 | pkc->retire_blk_timer.expires = jiffies; | |
403 | } | |
404 | ||
405 | static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring) | |
406 | { | |
bc59ba39 | 407 | struct tpacket_kbdq_core *pkc; |
f6fb8f10 | 408 | |
409 | if (tx_ring) | |
410 | BUG(); | |
411 | ||
412 | pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc; | |
413 | prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired); | |
414 | } | |
415 | ||
416 | static int prb_calc_retire_blk_tmo(struct packet_sock *po, | |
417 | int blk_size_in_bytes) | |
418 | { | |
419 | struct net_device *dev; | |
420 | unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; | |
4bc71cb9 JP |
421 | struct ethtool_cmd ecmd; |
422 | int err; | |
e440cf2c | 423 | u32 speed; |
f6fb8f10 | 424 | |
4bc71cb9 JP |
425 | rtnl_lock(); |
426 | dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); | |
427 | if (unlikely(!dev)) { | |
428 | rtnl_unlock(); | |
f6fb8f10 | 429 | return DEFAULT_PRB_RETIRE_TOV; |
4bc71cb9 JP |
430 | } |
431 | err = __ethtool_get_settings(dev, &ecmd); | |
e440cf2c | 432 | speed = ethtool_cmd_speed(&ecmd); |
4bc71cb9 JP |
433 | rtnl_unlock(); |
434 | if (!err) { | |
4bc71cb9 JP |
435 | /* |
436 | * If the link speed is so slow you don't really | |
437 | * need to worry about perf anyways | |
438 | */ | |
e440cf2c | 439 | if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) { |
4bc71cb9 | 440 | return DEFAULT_PRB_RETIRE_TOV; |
e440cf2c | 441 | } else { |
442 | msec = 1; | |
443 | div = speed / 1000; | |
f6fb8f10 | 444 | } |
445 | } | |
446 | ||
447 | mbits = (blk_size_in_bytes * 8) / (1024 * 1024); | |
448 | ||
449 | if (div) | |
450 | mbits /= div; | |
451 | ||
452 | tmo = mbits * msec; | |
453 | ||
454 | if (div) | |
455 | return tmo+1; | |
456 | return tmo; | |
457 | } | |
458 | ||
bc59ba39 | 459 | static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, |
f6fb8f10 | 460 | union tpacket_req_u *req_u) |
461 | { | |
462 | p1->feature_req_word = req_u->req3.tp_feature_req_word; | |
463 | } | |
464 | ||
465 | static void init_prb_bdqc(struct packet_sock *po, | |
466 | struct packet_ring_buffer *rb, | |
467 | struct pgv *pg_vec, | |
468 | union tpacket_req_u *req_u, int tx_ring) | |
469 | { | |
bc59ba39 | 470 | struct tpacket_kbdq_core *p1 = &rb->prb_bdqc; |
471 | struct tpacket_block_desc *pbd; | |
f6fb8f10 | 472 | |
473 | memset(p1, 0x0, sizeof(*p1)); | |
474 | ||
475 | p1->knxt_seq_num = 1; | |
476 | p1->pkbdq = pg_vec; | |
bc59ba39 | 477 | pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; |
e3192690 | 478 | p1->pkblk_start = pg_vec[0].buffer; |
f6fb8f10 | 479 | p1->kblk_size = req_u->req3.tp_block_size; |
480 | p1->knum_blocks = req_u->req3.tp_block_nr; | |
481 | p1->hdrlen = po->tp_hdrlen; | |
482 | p1->version = po->tp_version; | |
483 | p1->last_kactive_blk_num = 0; | |
484 | po->stats_u.stats3.tp_freeze_q_cnt = 0; | |
485 | if (req_u->req3.tp_retire_blk_tov) | |
486 | p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; | |
487 | else | |
488 | p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, | |
489 | req_u->req3.tp_block_size); | |
490 | p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); | |
491 | p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; | |
492 | ||
493 | prb_init_ft_ops(p1, req_u); | |
494 | prb_setup_retire_blk_timer(po, tx_ring); | |
495 | prb_open_block(p1, pbd); | |
496 | } | |
497 | ||
498 | /* Do NOT update the last_blk_num first. | |
499 | * Assumes sk_buff_head lock is held. | |
500 | */ | |
bc59ba39 | 501 | static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 502 | { |
503 | mod_timer(&pkc->retire_blk_timer, | |
504 | jiffies + pkc->tov_in_jiffies); | |
505 | pkc->last_kactive_blk_num = pkc->kactive_blk_num; | |
506 | } | |
507 | ||
508 | /* | |
509 | * Timer logic: | |
510 | * 1) We refresh the timer only when we open a block. | |
511 | * By doing this we don't waste cycles refreshing the timer | |
512 | * on packet-by-packet basis. | |
513 | * | |
514 | * With a 1MB block-size, on a 1Gbps line, it will take | |
515 | * i) ~8 ms to fill a block + ii) memcpy etc. | |
516 | * In this cut we are not accounting for the memcpy time. | |
517 | * | |
518 | * So, if the user sets the 'tmo' to 10ms then the timer | |
519 | * will never fire while the block is still getting filled | |
520 | * (which is what we want). However, the user could choose | |
521 | * to close a block early and that's fine. | |
522 | * | |
523 | * But when the timer does fire, we check whether or not to refresh it. | |
524 | * Since the tmo granularity is in msecs, it is not too expensive | |
525 | * to refresh the timer, lets say every '8' msecs. | |
526 | * Either the user can set the 'tmo' or we can derive it based on | |
527 | * a) line-speed and b) block-size. | |
528 | * prb_calc_retire_blk_tmo() calculates the tmo. | |
529 | * | |
530 | */ | |
531 | static void prb_retire_rx_blk_timer_expired(unsigned long data) | |
532 | { | |
533 | struct packet_sock *po = (struct packet_sock *)data; | |
bc59ba39 | 534 | struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc; |
f6fb8f10 | 535 | unsigned int frozen; |
bc59ba39 | 536 | struct tpacket_block_desc *pbd; |
f6fb8f10 | 537 | |
538 | spin_lock(&po->sk.sk_receive_queue.lock); | |
539 | ||
540 | frozen = prb_queue_frozen(pkc); | |
541 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | |
542 | ||
543 | if (unlikely(pkc->delete_blk_timer)) | |
544 | goto out; | |
545 | ||
546 | /* We only need to plug the race when the block is partially filled. | |
547 | * tpacket_rcv: | |
548 | * lock(); increment BLOCK_NUM_PKTS; unlock() | |
549 | * copy_bits() is in progress ... | |
550 | * timer fires on other cpu: | |
551 | * we can't retire the current block because copy_bits | |
552 | * is in progress. | |
553 | * | |
554 | */ | |
555 | if (BLOCK_NUM_PKTS(pbd)) { | |
556 | while (atomic_read(&pkc->blk_fill_in_prog)) { | |
557 | /* Waiting for skb_copy_bits to finish... */ | |
558 | cpu_relax(); | |
559 | } | |
560 | } | |
561 | ||
562 | if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { | |
563 | if (!frozen) { | |
564 | prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); | |
565 | if (!prb_dispatch_next_block(pkc, po)) | |
566 | goto refresh_timer; | |
567 | else | |
568 | goto out; | |
569 | } else { | |
570 | /* Case 1. Queue was frozen because user-space was | |
571 | * lagging behind. | |
572 | */ | |
573 | if (prb_curr_blk_in_use(pkc, pbd)) { | |
574 | /* | |
575 | * Ok, user-space is still behind. | |
576 | * So just refresh the timer. | |
577 | */ | |
578 | goto refresh_timer; | |
579 | } else { | |
580 | /* Case 2. queue was frozen,user-space caught up, | |
581 | * now the link went idle && the timer fired. | |
582 | * We don't have a block to close.So we open this | |
583 | * block and restart the timer. | |
584 | * opening a block thaws the queue,restarts timer | |
585 | * Thawing/timer-refresh is a side effect. | |
586 | */ | |
587 | prb_open_block(pkc, pbd); | |
588 | goto out; | |
589 | } | |
590 | } | |
591 | } | |
592 | ||
593 | refresh_timer: | |
594 | _prb_refresh_rx_retire_blk_timer(pkc); | |
595 | ||
596 | out: | |
597 | spin_unlock(&po->sk.sk_receive_queue.lock); | |
598 | } | |
599 | ||
eea49cc9 | 600 | static void prb_flush_block(struct tpacket_kbdq_core *pkc1, |
bc59ba39 | 601 | struct tpacket_block_desc *pbd1, __u32 status) |
f6fb8f10 | 602 | { |
603 | /* Flush everything minus the block header */ | |
604 | ||
605 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 | |
606 | u8 *start, *end; | |
607 | ||
608 | start = (u8 *)pbd1; | |
609 | ||
610 | /* Skip the block header(we know header WILL fit in 4K) */ | |
611 | start += PAGE_SIZE; | |
612 | ||
613 | end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); | |
614 | for (; start < end; start += PAGE_SIZE) | |
615 | flush_dcache_page(pgv_to_page(start)); | |
616 | ||
617 | smp_wmb(); | |
618 | #endif | |
619 | ||
620 | /* Now update the block status. */ | |
621 | ||
622 | BLOCK_STATUS(pbd1) = status; | |
623 | ||
624 | /* Flush the block header */ | |
625 | ||
626 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 | |
627 | start = (u8 *)pbd1; | |
628 | flush_dcache_page(pgv_to_page(start)); | |
629 | ||
630 | smp_wmb(); | |
631 | #endif | |
632 | } | |
633 | ||
634 | /* | |
635 | * Side effect: | |
636 | * | |
637 | * 1) flush the block | |
638 | * 2) Increment active_blk_num | |
639 | * | |
640 | * Note:We DONT refresh the timer on purpose. | |
641 | * Because almost always the next block will be opened. | |
642 | */ | |
bc59ba39 | 643 | static void prb_close_block(struct tpacket_kbdq_core *pkc1, |
644 | struct tpacket_block_desc *pbd1, | |
f6fb8f10 | 645 | struct packet_sock *po, unsigned int stat) |
646 | { | |
647 | __u32 status = TP_STATUS_USER | stat; | |
648 | ||
649 | struct tpacket3_hdr *last_pkt; | |
bc59ba39 | 650 | struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; |
f6fb8f10 | 651 | |
652 | if (po->stats.tp_drops) | |
653 | status |= TP_STATUS_LOSING; | |
654 | ||
655 | last_pkt = (struct tpacket3_hdr *)pkc1->prev; | |
656 | last_pkt->tp_next_offset = 0; | |
657 | ||
658 | /* Get the ts of the last pkt */ | |
659 | if (BLOCK_NUM_PKTS(pbd1)) { | |
660 | h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; | |
661 | h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; | |
662 | } else { | |
663 | /* Ok, we tmo'd - so get the current time */ | |
664 | struct timespec ts; | |
665 | getnstimeofday(&ts); | |
666 | h1->ts_last_pkt.ts_sec = ts.tv_sec; | |
667 | h1->ts_last_pkt.ts_nsec = ts.tv_nsec; | |
668 | } | |
669 | ||
670 | smp_wmb(); | |
671 | ||
672 | /* Flush the block */ | |
673 | prb_flush_block(pkc1, pbd1, status); | |
674 | ||
675 | pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); | |
676 | } | |
677 | ||
eea49cc9 | 678 | static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 679 | { |
680 | pkc->reset_pending_on_curr_blk = 0; | |
681 | } | |
682 | ||
683 | /* | |
684 | * Side effect of opening a block: | |
685 | * | |
686 | * 1) prb_queue is thawed. | |
687 | * 2) retire_blk_timer is refreshed. | |
688 | * | |
689 | */ | |
bc59ba39 | 690 | static void prb_open_block(struct tpacket_kbdq_core *pkc1, |
691 | struct tpacket_block_desc *pbd1) | |
f6fb8f10 | 692 | { |
693 | struct timespec ts; | |
bc59ba39 | 694 | struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; |
f6fb8f10 | 695 | |
696 | smp_rmb(); | |
697 | ||
698 | if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) { | |
699 | ||
700 | /* We could have just memset this but we will lose the | |
701 | * flexibility of making the priv area sticky | |
702 | */ | |
703 | BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; | |
704 | BLOCK_NUM_PKTS(pbd1) = 0; | |
705 | BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); | |
706 | getnstimeofday(&ts); | |
707 | h1->ts_first_pkt.ts_sec = ts.tv_sec; | |
708 | h1->ts_first_pkt.ts_nsec = ts.tv_nsec; | |
709 | pkc1->pkblk_start = (char *)pbd1; | |
e3192690 | 710 | pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); |
f6fb8f10 | 711 | BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); |
712 | BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; | |
713 | pbd1->version = pkc1->version; | |
714 | pkc1->prev = pkc1->nxt_offset; | |
715 | pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; | |
716 | prb_thaw_queue(pkc1); | |
717 | _prb_refresh_rx_retire_blk_timer(pkc1); | |
718 | ||
719 | smp_wmb(); | |
720 | ||
721 | return; | |
722 | } | |
723 | ||
724 | WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n", | |
725 | pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num); | |
726 | dump_stack(); | |
727 | BUG(); | |
728 | } | |
729 | ||
730 | /* | |
731 | * Queue freeze logic: | |
732 | * 1) Assume tp_block_nr = 8 blocks. | |
733 | * 2) At time 't0', user opens Rx ring. | |
734 | * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 | |
735 | * 4) user-space is either sleeping or processing block '0'. | |
736 | * 5) tpacket_rcv is currently filling block '7', since there is no space left, | |
737 | * it will close block-7,loop around and try to fill block '0'. | |
738 | * call-flow: | |
739 | * __packet_lookup_frame_in_block | |
740 | * prb_retire_current_block() | |
741 | * prb_dispatch_next_block() | |
742 | * |->(BLOCK_STATUS == USER) evaluates to true | |
743 | * 5.1) Since block-0 is currently in-use, we just freeze the queue. | |
744 | * 6) Now there are two cases: | |
745 | * 6.1) Link goes idle right after the queue is frozen. | |
746 | * But remember, the last open_block() refreshed the timer. | |
747 | * When this timer expires,it will refresh itself so that we can | |
748 | * re-open block-0 in near future. | |
749 | * 6.2) Link is busy and keeps on receiving packets. This is a simple | |
750 | * case and __packet_lookup_frame_in_block will check if block-0 | |
751 | * is free and can now be re-used. | |
752 | */ | |
eea49cc9 | 753 | static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 754 | struct packet_sock *po) |
755 | { | |
756 | pkc->reset_pending_on_curr_blk = 1; | |
757 | po->stats_u.stats3.tp_freeze_q_cnt++; | |
758 | } | |
759 | ||
760 | #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) | |
761 | ||
762 | /* | |
763 | * If the next block is free then we will dispatch it | |
764 | * and return a good offset. | |
765 | * Else, we will freeze the queue. | |
766 | * So, caller must check the return value. | |
767 | */ | |
bc59ba39 | 768 | static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 769 | struct packet_sock *po) |
770 | { | |
bc59ba39 | 771 | struct tpacket_block_desc *pbd; |
f6fb8f10 | 772 | |
773 | smp_rmb(); | |
774 | ||
775 | /* 1. Get current block num */ | |
776 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | |
777 | ||
778 | /* 2. If this block is currently in_use then freeze the queue */ | |
779 | if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { | |
780 | prb_freeze_queue(pkc, po); | |
781 | return NULL; | |
782 | } | |
783 | ||
784 | /* | |
785 | * 3. | |
786 | * open this block and return the offset where the first packet | |
787 | * needs to get stored. | |
788 | */ | |
789 | prb_open_block(pkc, pbd); | |
790 | return (void *)pkc->nxt_offset; | |
791 | } | |
792 | ||
bc59ba39 | 793 | static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 794 | struct packet_sock *po, unsigned int status) |
795 | { | |
bc59ba39 | 796 | struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); |
f6fb8f10 | 797 | |
798 | /* retire/close the current block */ | |
799 | if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { | |
800 | /* | |
801 | * Plug the case where copy_bits() is in progress on | |
802 | * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't | |
803 | * have space to copy the pkt in the current block and | |
804 | * called prb_retire_current_block() | |
805 | * | |
806 | * We don't need to worry about the TMO case because | |
807 | * the timer-handler already handled this case. | |
808 | */ | |
809 | if (!(status & TP_STATUS_BLK_TMO)) { | |
810 | while (atomic_read(&pkc->blk_fill_in_prog)) { | |
811 | /* Waiting for skb_copy_bits to finish... */ | |
812 | cpu_relax(); | |
813 | } | |
814 | } | |
815 | prb_close_block(pkc, pbd, po, status); | |
816 | return; | |
817 | } | |
818 | ||
819 | WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd); | |
820 | dump_stack(); | |
821 | BUG(); | |
822 | } | |
823 | ||
eea49cc9 | 824 | static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc, |
bc59ba39 | 825 | struct tpacket_block_desc *pbd) |
f6fb8f10 | 826 | { |
827 | return TP_STATUS_USER & BLOCK_STATUS(pbd); | |
828 | } | |
829 | ||
eea49cc9 | 830 | static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 831 | { |
832 | return pkc->reset_pending_on_curr_blk; | |
833 | } | |
834 | ||
eea49cc9 | 835 | static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) |
f6fb8f10 | 836 | { |
bc59ba39 | 837 | struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); |
f6fb8f10 | 838 | atomic_dec(&pkc->blk_fill_in_prog); |
839 | } | |
840 | ||
eea49cc9 | 841 | static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 842 | struct tpacket3_hdr *ppd) |
843 | { | |
844 | ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb); | |
845 | } | |
846 | ||
eea49cc9 | 847 | static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 848 | struct tpacket3_hdr *ppd) |
849 | { | |
850 | ppd->hv1.tp_rxhash = 0; | |
851 | } | |
852 | ||
eea49cc9 | 853 | static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 854 | struct tpacket3_hdr *ppd) |
855 | { | |
856 | if (vlan_tx_tag_present(pkc->skb)) { | |
857 | ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb); | |
858 | ppd->tp_status = TP_STATUS_VLAN_VALID; | |
859 | } else { | |
9e67030a | 860 | ppd->hv1.tp_vlan_tci = 0; |
861 | ppd->tp_status = TP_STATUS_AVAILABLE; | |
f6fb8f10 | 862 | } |
863 | } | |
864 | ||
bc59ba39 | 865 | static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 866 | struct tpacket3_hdr *ppd) |
867 | { | |
868 | prb_fill_vlan_info(pkc, ppd); | |
869 | ||
870 | if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) | |
871 | prb_fill_rxhash(pkc, ppd); | |
872 | else | |
873 | prb_clear_rxhash(pkc, ppd); | |
874 | } | |
875 | ||
eea49cc9 | 876 | static void prb_fill_curr_block(char *curr, |
bc59ba39 | 877 | struct tpacket_kbdq_core *pkc, |
878 | struct tpacket_block_desc *pbd, | |
f6fb8f10 | 879 | unsigned int len) |
880 | { | |
881 | struct tpacket3_hdr *ppd; | |
882 | ||
883 | ppd = (struct tpacket3_hdr *)curr; | |
884 | ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); | |
885 | pkc->prev = curr; | |
886 | pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); | |
887 | BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); | |
888 | BLOCK_NUM_PKTS(pbd) += 1; | |
889 | atomic_inc(&pkc->blk_fill_in_prog); | |
890 | prb_run_all_ft_ops(pkc, ppd); | |
891 | } | |
892 | ||
893 | /* Assumes caller has the sk->rx_queue.lock */ | |
894 | static void *__packet_lookup_frame_in_block(struct packet_sock *po, | |
895 | struct sk_buff *skb, | |
896 | int status, | |
897 | unsigned int len | |
898 | ) | |
899 | { | |
bc59ba39 | 900 | struct tpacket_kbdq_core *pkc; |
901 | struct tpacket_block_desc *pbd; | |
f6fb8f10 | 902 | char *curr, *end; |
903 | ||
e3192690 | 904 | pkc = GET_PBDQC_FROM_RB(&po->rx_ring); |
f6fb8f10 | 905 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); |
906 | ||
907 | /* Queue is frozen when user space is lagging behind */ | |
908 | if (prb_queue_frozen(pkc)) { | |
909 | /* | |
910 | * Check if that last block which caused the queue to freeze, | |
911 | * is still in_use by user-space. | |
912 | */ | |
913 | if (prb_curr_blk_in_use(pkc, pbd)) { | |
914 | /* Can't record this packet */ | |
915 | return NULL; | |
916 | } else { | |
917 | /* | |
918 | * Ok, the block was released by user-space. | |
919 | * Now let's open that block. | |
920 | * opening a block also thaws the queue. | |
921 | * Thawing is a side effect. | |
922 | */ | |
923 | prb_open_block(pkc, pbd); | |
924 | } | |
925 | } | |
926 | ||
927 | smp_mb(); | |
928 | curr = pkc->nxt_offset; | |
929 | pkc->skb = skb; | |
e3192690 | 930 | end = (char *)pbd + pkc->kblk_size; |
f6fb8f10 | 931 | |
932 | /* first try the current block */ | |
933 | if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { | |
934 | prb_fill_curr_block(curr, pkc, pbd, len); | |
935 | return (void *)curr; | |
936 | } | |
937 | ||
938 | /* Ok, close the current block */ | |
939 | prb_retire_current_block(pkc, po, 0); | |
940 | ||
941 | /* Now, try to dispatch the next block */ | |
942 | curr = (char *)prb_dispatch_next_block(pkc, po); | |
943 | if (curr) { | |
944 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | |
945 | prb_fill_curr_block(curr, pkc, pbd, len); | |
946 | return (void *)curr; | |
947 | } | |
948 | ||
949 | /* | |
950 | * No free blocks are available.user_space hasn't caught up yet. | |
951 | * Queue was just frozen and now this packet will get dropped. | |
952 | */ | |
953 | return NULL; | |
954 | } | |
955 | ||
eea49cc9 | 956 | static void *packet_current_rx_frame(struct packet_sock *po, |
f6fb8f10 | 957 | struct sk_buff *skb, |
958 | int status, unsigned int len) | |
959 | { | |
960 | char *curr = NULL; | |
961 | switch (po->tp_version) { | |
962 | case TPACKET_V1: | |
963 | case TPACKET_V2: | |
964 | curr = packet_lookup_frame(po, &po->rx_ring, | |
965 | po->rx_ring.head, status); | |
966 | return curr; | |
967 | case TPACKET_V3: | |
968 | return __packet_lookup_frame_in_block(po, skb, status, len); | |
969 | default: | |
970 | WARN(1, "TPACKET version not supported\n"); | |
971 | BUG(); | |
99aa3473 | 972 | return NULL; |
f6fb8f10 | 973 | } |
974 | } | |
975 | ||
eea49cc9 | 976 | static void *prb_lookup_block(struct packet_sock *po, |
f6fb8f10 | 977 | struct packet_ring_buffer *rb, |
77f65ebd | 978 | unsigned int idx, |
f6fb8f10 | 979 | int status) |
980 | { | |
bc59ba39 | 981 | struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); |
77f65ebd | 982 | struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); |
f6fb8f10 | 983 | |
984 | if (status != BLOCK_STATUS(pbd)) | |
985 | return NULL; | |
986 | return pbd; | |
987 | } | |
988 | ||
eea49cc9 | 989 | static int prb_previous_blk_num(struct packet_ring_buffer *rb) |
f6fb8f10 | 990 | { |
991 | unsigned int prev; | |
992 | if (rb->prb_bdqc.kactive_blk_num) | |
993 | prev = rb->prb_bdqc.kactive_blk_num-1; | |
994 | else | |
995 | prev = rb->prb_bdqc.knum_blocks-1; | |
996 | return prev; | |
997 | } | |
998 | ||
999 | /* Assumes caller has held the rx_queue.lock */ | |
eea49cc9 | 1000 | static void *__prb_previous_block(struct packet_sock *po, |
f6fb8f10 | 1001 | struct packet_ring_buffer *rb, |
1002 | int status) | |
1003 | { | |
1004 | unsigned int previous = prb_previous_blk_num(rb); | |
1005 | return prb_lookup_block(po, rb, previous, status); | |
1006 | } | |
1007 | ||
eea49cc9 | 1008 | static void *packet_previous_rx_frame(struct packet_sock *po, |
f6fb8f10 | 1009 | struct packet_ring_buffer *rb, |
1010 | int status) | |
1011 | { | |
1012 | if (po->tp_version <= TPACKET_V2) | |
1013 | return packet_previous_frame(po, rb, status); | |
1014 | ||
1015 | return __prb_previous_block(po, rb, status); | |
1016 | } | |
1017 | ||
eea49cc9 | 1018 | static void packet_increment_rx_head(struct packet_sock *po, |
f6fb8f10 | 1019 | struct packet_ring_buffer *rb) |
1020 | { | |
1021 | switch (po->tp_version) { | |
1022 | case TPACKET_V1: | |
1023 | case TPACKET_V2: | |
1024 | return packet_increment_head(rb); | |
1025 | case TPACKET_V3: | |
1026 | default: | |
1027 | WARN(1, "TPACKET version not supported.\n"); | |
1028 | BUG(); | |
1029 | return; | |
1030 | } | |
1031 | } | |
1032 | ||
eea49cc9 | 1033 | static void *packet_previous_frame(struct packet_sock *po, |
69e3c75f JB |
1034 | struct packet_ring_buffer *rb, |
1035 | int status) | |
1036 | { | |
1037 | unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; | |
1038 | return packet_lookup_frame(po, rb, previous, status); | |
1039 | } | |
1040 | ||
eea49cc9 | 1041 | static void packet_increment_head(struct packet_ring_buffer *buff) |
69e3c75f JB |
1042 | { |
1043 | buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; | |
1044 | } | |
1045 | ||
77f65ebd WB |
1046 | static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) |
1047 | { | |
1048 | struct sock *sk = &po->sk; | |
1049 | bool has_room; | |
1050 | ||
1051 | if (po->prot_hook.func != tpacket_rcv) | |
1052 | return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize) | |
1053 | <= sk->sk_rcvbuf; | |
1054 | ||
1055 | spin_lock(&sk->sk_receive_queue.lock); | |
1056 | if (po->tp_version == TPACKET_V3) | |
1057 | has_room = prb_lookup_block(po, &po->rx_ring, | |
1058 | po->rx_ring.prb_bdqc.kactive_blk_num, | |
1059 | TP_STATUS_KERNEL); | |
1060 | else | |
1061 | has_room = packet_lookup_frame(po, &po->rx_ring, | |
1062 | po->rx_ring.head, | |
1063 | TP_STATUS_KERNEL); | |
1064 | spin_unlock(&sk->sk_receive_queue.lock); | |
1065 | ||
1066 | return has_room; | |
1067 | } | |
1068 | ||
1da177e4 LT |
1069 | static void packet_sock_destruct(struct sock *sk) |
1070 | { | |
ed85b565 RC |
1071 | skb_queue_purge(&sk->sk_error_queue); |
1072 | ||
547b792c IJ |
1073 | WARN_ON(atomic_read(&sk->sk_rmem_alloc)); |
1074 | WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | |
1da177e4 LT |
1075 | |
1076 | if (!sock_flag(sk, SOCK_DEAD)) { | |
40d4e3df | 1077 | pr_err("Attempt to release alive packet socket: %p\n", sk); |
1da177e4 LT |
1078 | return; |
1079 | } | |
1080 | ||
17ab56a2 | 1081 | sk_refcnt_debug_dec(sk); |
1da177e4 LT |
1082 | } |
1083 | ||
dc99f600 DM |
1084 | static int fanout_rr_next(struct packet_fanout *f, unsigned int num) |
1085 | { | |
1086 | int x = atomic_read(&f->rr_cur) + 1; | |
1087 | ||
1088 | if (x >= num) | |
1089 | x = 0; | |
1090 | ||
1091 | return x; | |
1092 | } | |
1093 | ||
77f65ebd WB |
1094 | static unsigned int fanout_demux_hash(struct packet_fanout *f, |
1095 | struct sk_buff *skb, | |
1096 | unsigned int num) | |
dc99f600 | 1097 | { |
77f65ebd | 1098 | return (((u64)skb->rxhash) * num) >> 32; |
dc99f600 DM |
1099 | } |
1100 | ||
77f65ebd WB |
1101 | static unsigned int fanout_demux_lb(struct packet_fanout *f, |
1102 | struct sk_buff *skb, | |
1103 | unsigned int num) | |
dc99f600 DM |
1104 | { |
1105 | int cur, old; | |
1106 | ||
1107 | cur = atomic_read(&f->rr_cur); | |
1108 | while ((old = atomic_cmpxchg(&f->rr_cur, cur, | |
1109 | fanout_rr_next(f, num))) != cur) | |
1110 | cur = old; | |
77f65ebd WB |
1111 | return cur; |
1112 | } | |
1113 | ||
1114 | static unsigned int fanout_demux_cpu(struct packet_fanout *f, | |
1115 | struct sk_buff *skb, | |
1116 | unsigned int num) | |
1117 | { | |
1118 | return smp_processor_id() % num; | |
dc99f600 DM |
1119 | } |
1120 | ||
77f65ebd WB |
1121 | static unsigned int fanout_demux_rollover(struct packet_fanout *f, |
1122 | struct sk_buff *skb, | |
1123 | unsigned int idx, unsigned int skip, | |
1124 | unsigned int num) | |
95ec3eb4 | 1125 | { |
77f65ebd | 1126 | unsigned int i, j; |
95ec3eb4 | 1127 | |
77f65ebd WB |
1128 | i = j = min_t(int, f->next[idx], num - 1); |
1129 | do { | |
1130 | if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) { | |
1131 | if (i != j) | |
1132 | f->next[idx] = i; | |
1133 | return i; | |
1134 | } | |
1135 | if (++i == num) | |
1136 | i = 0; | |
1137 | } while (i != j); | |
1138 | ||
1139 | return idx; | |
1140 | } | |
1141 | ||
1142 | static bool fanout_has_flag(struct packet_fanout *f, u16 flag) | |
1143 | { | |
1144 | return f->flags & (flag >> 8); | |
95ec3eb4 DM |
1145 | } |
1146 | ||
95ec3eb4 DM |
1147 | static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, |
1148 | struct packet_type *pt, struct net_device *orig_dev) | |
dc99f600 DM |
1149 | { |
1150 | struct packet_fanout *f = pt->af_packet_priv; | |
1151 | unsigned int num = f->num_members; | |
1152 | struct packet_sock *po; | |
77f65ebd | 1153 | unsigned int idx; |
dc99f600 DM |
1154 | |
1155 | if (!net_eq(dev_net(dev), read_pnet(&f->net)) || | |
1156 | !num) { | |
1157 | kfree_skb(skb); | |
1158 | return 0; | |
1159 | } | |
1160 | ||
95ec3eb4 DM |
1161 | switch (f->type) { |
1162 | case PACKET_FANOUT_HASH: | |
1163 | default: | |
77f65ebd | 1164 | if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { |
bc416d97 | 1165 | skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET); |
95ec3eb4 DM |
1166 | if (!skb) |
1167 | return 0; | |
1168 | } | |
1169 | skb_get_rxhash(skb); | |
77f65ebd | 1170 | idx = fanout_demux_hash(f, skb, num); |
95ec3eb4 DM |
1171 | break; |
1172 | case PACKET_FANOUT_LB: | |
77f65ebd | 1173 | idx = fanout_demux_lb(f, skb, num); |
95ec3eb4 DM |
1174 | break; |
1175 | case PACKET_FANOUT_CPU: | |
77f65ebd WB |
1176 | idx = fanout_demux_cpu(f, skb, num); |
1177 | break; | |
1178 | case PACKET_FANOUT_ROLLOVER: | |
1179 | idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num); | |
95ec3eb4 | 1180 | break; |
dc99f600 DM |
1181 | } |
1182 | ||
77f65ebd WB |
1183 | po = pkt_sk(f->arr[idx]); |
1184 | if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) && | |
1185 | unlikely(!packet_rcv_has_room(po, skb))) { | |
1186 | idx = fanout_demux_rollover(f, skb, idx, idx, num); | |
1187 | po = pkt_sk(f->arr[idx]); | |
1188 | } | |
dc99f600 DM |
1189 | |
1190 | return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); | |
1191 | } | |
1192 | ||
fff3321d PE |
1193 | DEFINE_MUTEX(fanout_mutex); |
1194 | EXPORT_SYMBOL_GPL(fanout_mutex); | |
dc99f600 DM |
1195 | static LIST_HEAD(fanout_list); |
1196 | ||
1197 | static void __fanout_link(struct sock *sk, struct packet_sock *po) | |
1198 | { | |
1199 | struct packet_fanout *f = po->fanout; | |
1200 | ||
1201 | spin_lock(&f->lock); | |
1202 | f->arr[f->num_members] = sk; | |
1203 | smp_wmb(); | |
1204 | f->num_members++; | |
1205 | spin_unlock(&f->lock); | |
1206 | } | |
1207 | ||
1208 | static void __fanout_unlink(struct sock *sk, struct packet_sock *po) | |
1209 | { | |
1210 | struct packet_fanout *f = po->fanout; | |
1211 | int i; | |
1212 | ||
1213 | spin_lock(&f->lock); | |
1214 | for (i = 0; i < f->num_members; i++) { | |
1215 | if (f->arr[i] == sk) | |
1216 | break; | |
1217 | } | |
1218 | BUG_ON(i >= f->num_members); | |
1219 | f->arr[i] = f->arr[f->num_members - 1]; | |
1220 | f->num_members--; | |
1221 | spin_unlock(&f->lock); | |
1222 | } | |
1223 | ||
a0dfb263 | 1224 | static bool match_fanout_group(struct packet_type *ptype, struct sock * sk) |
c0de08d0 EL |
1225 | { |
1226 | if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout) | |
1227 | return true; | |
1228 | ||
1229 | return false; | |
1230 | } | |
1231 | ||
7736d33f | 1232 | static int fanout_add(struct sock *sk, u16 id, u16 type_flags) |
dc99f600 DM |
1233 | { |
1234 | struct packet_sock *po = pkt_sk(sk); | |
1235 | struct packet_fanout *f, *match; | |
7736d33f | 1236 | u8 type = type_flags & 0xff; |
77f65ebd | 1237 | u8 flags = type_flags >> 8; |
dc99f600 DM |
1238 | int err; |
1239 | ||
1240 | switch (type) { | |
77f65ebd WB |
1241 | case PACKET_FANOUT_ROLLOVER: |
1242 | if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) | |
1243 | return -EINVAL; | |
dc99f600 DM |
1244 | case PACKET_FANOUT_HASH: |
1245 | case PACKET_FANOUT_LB: | |
95ec3eb4 | 1246 | case PACKET_FANOUT_CPU: |
dc99f600 DM |
1247 | break; |
1248 | default: | |
1249 | return -EINVAL; | |
1250 | } | |
1251 | ||
1252 | if (!po->running) | |
1253 | return -EINVAL; | |
1254 | ||
1255 | if (po->fanout) | |
1256 | return -EALREADY; | |
1257 | ||
1258 | mutex_lock(&fanout_mutex); | |
1259 | match = NULL; | |
1260 | list_for_each_entry(f, &fanout_list, list) { | |
1261 | if (f->id == id && | |
1262 | read_pnet(&f->net) == sock_net(sk)) { | |
1263 | match = f; | |
1264 | break; | |
1265 | } | |
1266 | } | |
afe62c68 | 1267 | err = -EINVAL; |
77f65ebd | 1268 | if (match && match->flags != flags) |
afe62c68 | 1269 | goto out; |
dc99f600 | 1270 | if (!match) { |
afe62c68 | 1271 | err = -ENOMEM; |
dc99f600 | 1272 | match = kzalloc(sizeof(*match), GFP_KERNEL); |
afe62c68 ED |
1273 | if (!match) |
1274 | goto out; | |
1275 | write_pnet(&match->net, sock_net(sk)); | |
1276 | match->id = id; | |
1277 | match->type = type; | |
77f65ebd | 1278 | match->flags = flags; |
afe62c68 ED |
1279 | atomic_set(&match->rr_cur, 0); |
1280 | INIT_LIST_HEAD(&match->list); | |
1281 | spin_lock_init(&match->lock); | |
1282 | atomic_set(&match->sk_ref, 0); | |
1283 | match->prot_hook.type = po->prot_hook.type; | |
1284 | match->prot_hook.dev = po->prot_hook.dev; | |
1285 | match->prot_hook.func = packet_rcv_fanout; | |
1286 | match->prot_hook.af_packet_priv = match; | |
c0de08d0 | 1287 | match->prot_hook.id_match = match_fanout_group; |
afe62c68 ED |
1288 | dev_add_pack(&match->prot_hook); |
1289 | list_add(&match->list, &fanout_list); | |
dc99f600 | 1290 | } |
afe62c68 ED |
1291 | err = -EINVAL; |
1292 | if (match->type == type && | |
1293 | match->prot_hook.type == po->prot_hook.type && | |
1294 | match->prot_hook.dev == po->prot_hook.dev) { | |
1295 | err = -ENOSPC; | |
1296 | if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) { | |
1297 | __dev_remove_pack(&po->prot_hook); | |
1298 | po->fanout = match; | |
1299 | atomic_inc(&match->sk_ref); | |
1300 | __fanout_link(sk, po); | |
1301 | err = 0; | |
dc99f600 DM |
1302 | } |
1303 | } | |
afe62c68 | 1304 | out: |
dc99f600 DM |
1305 | mutex_unlock(&fanout_mutex); |
1306 | return err; | |
1307 | } | |
1308 | ||
1309 | static void fanout_release(struct sock *sk) | |
1310 | { | |
1311 | struct packet_sock *po = pkt_sk(sk); | |
1312 | struct packet_fanout *f; | |
1313 | ||
1314 | f = po->fanout; | |
1315 | if (!f) | |
1316 | return; | |
1317 | ||
fff3321d | 1318 | mutex_lock(&fanout_mutex); |
dc99f600 DM |
1319 | po->fanout = NULL; |
1320 | ||
dc99f600 DM |
1321 | if (atomic_dec_and_test(&f->sk_ref)) { |
1322 | list_del(&f->list); | |
1323 | dev_remove_pack(&f->prot_hook); | |
1324 | kfree(f); | |
1325 | } | |
1326 | mutex_unlock(&fanout_mutex); | |
1327 | } | |
1da177e4 | 1328 | |
90ddc4f0 | 1329 | static const struct proto_ops packet_ops; |
1da177e4 | 1330 | |
90ddc4f0 | 1331 | static const struct proto_ops packet_ops_spkt; |
1da177e4 | 1332 | |
40d4e3df ED |
1333 | static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, |
1334 | struct packet_type *pt, struct net_device *orig_dev) | |
1da177e4 LT |
1335 | { |
1336 | struct sock *sk; | |
1337 | struct sockaddr_pkt *spkt; | |
1338 | ||
1339 | /* | |
1340 | * When we registered the protocol we saved the socket in the data | |
1341 | * field for just this event. | |
1342 | */ | |
1343 | ||
1344 | sk = pt->af_packet_priv; | |
1ce4f28b | 1345 | |
1da177e4 LT |
1346 | /* |
1347 | * Yank back the headers [hope the device set this | |
1348 | * right or kerboom...] | |
1349 | * | |
1350 | * Incoming packets have ll header pulled, | |
1351 | * push it back. | |
1352 | * | |
98e399f8 | 1353 | * For outgoing ones skb->data == skb_mac_header(skb) |
1da177e4 LT |
1354 | * so that this procedure is noop. |
1355 | */ | |
1356 | ||
1357 | if (skb->pkt_type == PACKET_LOOPBACK) | |
1358 | goto out; | |
1359 | ||
09ad9bc7 | 1360 | if (!net_eq(dev_net(dev), sock_net(sk))) |
d12d01d6 DL |
1361 | goto out; |
1362 | ||
40d4e3df ED |
1363 | skb = skb_share_check(skb, GFP_ATOMIC); |
1364 | if (skb == NULL) | |
1da177e4 LT |
1365 | goto oom; |
1366 | ||
1367 | /* drop any routing info */ | |
adf30907 | 1368 | skb_dst_drop(skb); |
1da177e4 | 1369 | |
84531c24 PO |
1370 | /* drop conntrack reference */ |
1371 | nf_reset(skb); | |
1372 | ||
ffbc6111 | 1373 | spkt = &PACKET_SKB_CB(skb)->sa.pkt; |
1da177e4 | 1374 | |
98e399f8 | 1375 | skb_push(skb, skb->data - skb_mac_header(skb)); |
1da177e4 LT |
1376 | |
1377 | /* | |
1378 | * The SOCK_PACKET socket receives _all_ frames. | |
1379 | */ | |
1380 | ||
1381 | spkt->spkt_family = dev->type; | |
1382 | strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); | |
1383 | spkt->spkt_protocol = skb->protocol; | |
1384 | ||
1385 | /* | |
1386 | * Charge the memory to the socket. This is done specifically | |
1387 | * to prevent sockets using all the memory up. | |
1388 | */ | |
1389 | ||
40d4e3df | 1390 | if (sock_queue_rcv_skb(sk, skb) == 0) |
1da177e4 LT |
1391 | return 0; |
1392 | ||
1393 | out: | |
1394 | kfree_skb(skb); | |
1395 | oom: | |
1396 | return 0; | |
1397 | } | |
1398 | ||
1399 | ||
1400 | /* | |
1401 | * Output a raw packet to a device layer. This bypasses all the other | |
1402 | * protocol layers and you must therefore supply it with a complete frame | |
1403 | */ | |
1ce4f28b | 1404 | |
1da177e4 LT |
1405 | static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, |
1406 | struct msghdr *msg, size_t len) | |
1407 | { | |
1408 | struct sock *sk = sock->sk; | |
40d4e3df | 1409 | struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name; |
1a35ca80 | 1410 | struct sk_buff *skb = NULL; |
1da177e4 | 1411 | struct net_device *dev; |
40d4e3df | 1412 | __be16 proto = 0; |
1da177e4 | 1413 | int err; |
3bdc0eba | 1414 | int extra_len = 0; |
1ce4f28b | 1415 | |
1da177e4 | 1416 | /* |
1ce4f28b | 1417 | * Get and verify the address. |
1da177e4 LT |
1418 | */ |
1419 | ||
40d4e3df | 1420 | if (saddr) { |
1da177e4 | 1421 | if (msg->msg_namelen < sizeof(struct sockaddr)) |
40d4e3df ED |
1422 | return -EINVAL; |
1423 | if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) | |
1424 | proto = saddr->spkt_protocol; | |
1425 | } else | |
1426 | return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ | |
1da177e4 LT |
1427 | |
1428 | /* | |
1ce4f28b | 1429 | * Find the device first to size check it |
1da177e4 LT |
1430 | */ |
1431 | ||
de74e92a | 1432 | saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; |
1a35ca80 | 1433 | retry: |
654d1f8a ED |
1434 | rcu_read_lock(); |
1435 | dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); | |
1da177e4 LT |
1436 | err = -ENODEV; |
1437 | if (dev == NULL) | |
1438 | goto out_unlock; | |
1ce4f28b | 1439 | |
d5e76b0a DM |
1440 | err = -ENETDOWN; |
1441 | if (!(dev->flags & IFF_UP)) | |
1442 | goto out_unlock; | |
1443 | ||
1da177e4 | 1444 | /* |
40d4e3df ED |
1445 | * You may not queue a frame bigger than the mtu. This is the lowest level |
1446 | * raw protocol and you must do your own fragmentation at this level. | |
1da177e4 | 1447 | */ |
1ce4f28b | 1448 | |
3bdc0eba BG |
1449 | if (unlikely(sock_flag(sk, SOCK_NOFCS))) { |
1450 | if (!netif_supports_nofcs(dev)) { | |
1451 | err = -EPROTONOSUPPORT; | |
1452 | goto out_unlock; | |
1453 | } | |
1454 | extra_len = 4; /* We're doing our own CRC */ | |
1455 | } | |
1456 | ||
1da177e4 | 1457 | err = -EMSGSIZE; |
3bdc0eba | 1458 | if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) |
1da177e4 LT |
1459 | goto out_unlock; |
1460 | ||
1a35ca80 ED |
1461 | if (!skb) { |
1462 | size_t reserved = LL_RESERVED_SPACE(dev); | |
4ce40912 | 1463 | int tlen = dev->needed_tailroom; |
1a35ca80 ED |
1464 | unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; |
1465 | ||
1466 | rcu_read_unlock(); | |
4ce40912 | 1467 | skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); |
1a35ca80 ED |
1468 | if (skb == NULL) |
1469 | return -ENOBUFS; | |
1470 | /* FIXME: Save some space for broken drivers that write a hard | |
1471 | * header at transmission time by themselves. PPP is the notable | |
1472 | * one here. This should really be fixed at the driver level. | |
1473 | */ | |
1474 | skb_reserve(skb, reserved); | |
1475 | skb_reset_network_header(skb); | |
1476 | ||
1477 | /* Try to align data part correctly */ | |
1478 | if (hhlen) { | |
1479 | skb->data -= hhlen; | |
1480 | skb->tail -= hhlen; | |
1481 | if (len < hhlen) | |
1482 | skb_reset_network_header(skb); | |
1483 | } | |
1484 | err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); | |
1485 | if (err) | |
1486 | goto out_free; | |
1487 | goto retry; | |
1da177e4 LT |
1488 | } |
1489 | ||
3bdc0eba | 1490 | if (len > (dev->mtu + dev->hard_header_len + extra_len)) { |
57f89bfa BG |
1491 | /* Earlier code assumed this would be a VLAN pkt, |
1492 | * double-check this now that we have the actual | |
1493 | * packet in hand. | |
1494 | */ | |
1495 | struct ethhdr *ehdr; | |
1496 | skb_reset_mac_header(skb); | |
1497 | ehdr = eth_hdr(skb); | |
1498 | if (ehdr->h_proto != htons(ETH_P_8021Q)) { | |
1499 | err = -EMSGSIZE; | |
1500 | goto out_unlock; | |
1501 | } | |
1502 | } | |
1a35ca80 | 1503 | |
1da177e4 LT |
1504 | skb->protocol = proto; |
1505 | skb->dev = dev; | |
1506 | skb->priority = sk->sk_priority; | |
2d37a186 | 1507 | skb->mark = sk->sk_mark; |
2244d07b | 1508 | err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); |
ed85b565 RC |
1509 | if (err < 0) |
1510 | goto out_unlock; | |
1da177e4 | 1511 | |
3bdc0eba BG |
1512 | if (unlikely(extra_len == 4)) |
1513 | skb->no_fcs = 1; | |
1514 | ||
40893fd0 | 1515 | skb_probe_transport_header(skb, 0); |
c1aad275 | 1516 | |
1da177e4 | 1517 | dev_queue_xmit(skb); |
654d1f8a | 1518 | rcu_read_unlock(); |
40d4e3df | 1519 | return len; |
1da177e4 | 1520 | |
1da177e4 | 1521 | out_unlock: |
654d1f8a | 1522 | rcu_read_unlock(); |
1a35ca80 ED |
1523 | out_free: |
1524 | kfree_skb(skb); | |
1da177e4 LT |
1525 | return err; |
1526 | } | |
1da177e4 | 1527 | |
eea49cc9 | 1528 | static unsigned int run_filter(const struct sk_buff *skb, |
62ab0812 | 1529 | const struct sock *sk, |
dbcb5855 | 1530 | unsigned int res) |
1da177e4 LT |
1531 | { |
1532 | struct sk_filter *filter; | |
fda9ef5d | 1533 | |
80f8f102 ED |
1534 | rcu_read_lock(); |
1535 | filter = rcu_dereference(sk->sk_filter); | |
dbcb5855 | 1536 | if (filter != NULL) |
0a14842f | 1537 | res = SK_RUN_FILTER(filter, skb); |
80f8f102 | 1538 | rcu_read_unlock(); |
1da177e4 | 1539 | |
dbcb5855 | 1540 | return res; |
1da177e4 LT |
1541 | } |
1542 | ||
1543 | /* | |
62ab0812 ED |
1544 | * This function makes lazy skb cloning in hope that most of packets |
1545 | * are discarded by BPF. | |
1546 | * | |
1547 | * Note tricky part: we DO mangle shared skb! skb->data, skb->len | |
1548 | * and skb->cb are mangled. It works because (and until) packets | |
1549 | * falling here are owned by current CPU. Output packets are cloned | |
1550 | * by dev_queue_xmit_nit(), input packets are processed by net_bh | |
1551 | * sequencially, so that if we return skb to original state on exit, | |
1552 | * we will not harm anyone. | |
1da177e4 LT |
1553 | */ |
1554 | ||
40d4e3df ED |
1555 | static int packet_rcv(struct sk_buff *skb, struct net_device *dev, |
1556 | struct packet_type *pt, struct net_device *orig_dev) | |
1da177e4 LT |
1557 | { |
1558 | struct sock *sk; | |
1559 | struct sockaddr_ll *sll; | |
1560 | struct packet_sock *po; | |
40d4e3df | 1561 | u8 *skb_head = skb->data; |
1da177e4 | 1562 | int skb_len = skb->len; |
dbcb5855 | 1563 | unsigned int snaplen, res; |
1da177e4 LT |
1564 | |
1565 | if (skb->pkt_type == PACKET_LOOPBACK) | |
1566 | goto drop; | |
1567 | ||
1568 | sk = pt->af_packet_priv; | |
1569 | po = pkt_sk(sk); | |
1570 | ||
09ad9bc7 | 1571 | if (!net_eq(dev_net(dev), sock_net(sk))) |
d12d01d6 DL |
1572 | goto drop; |
1573 | ||
1da177e4 LT |
1574 | skb->dev = dev; |
1575 | ||
3b04ddde | 1576 | if (dev->header_ops) { |
1da177e4 | 1577 | /* The device has an explicit notion of ll header, |
62ab0812 ED |
1578 | * exported to higher levels. |
1579 | * | |
1580 | * Otherwise, the device hides details of its frame | |
1581 | * structure, so that corresponding packet head is | |
1582 | * never delivered to user. | |
1da177e4 LT |
1583 | */ |
1584 | if (sk->sk_type != SOCK_DGRAM) | |
98e399f8 | 1585 | skb_push(skb, skb->data - skb_mac_header(skb)); |
1da177e4 LT |
1586 | else if (skb->pkt_type == PACKET_OUTGOING) { |
1587 | /* Special case: outgoing packets have ll header at head */ | |
bbe735e4 | 1588 | skb_pull(skb, skb_network_offset(skb)); |
1da177e4 LT |
1589 | } |
1590 | } | |
1591 | ||
1592 | snaplen = skb->len; | |
1593 | ||
dbcb5855 DM |
1594 | res = run_filter(skb, sk, snaplen); |
1595 | if (!res) | |
fda9ef5d | 1596 | goto drop_n_restore; |
dbcb5855 DM |
1597 | if (snaplen > res) |
1598 | snaplen = res; | |
1da177e4 | 1599 | |
0fd7bac6 | 1600 | if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) |
1da177e4 LT |
1601 | goto drop_n_acct; |
1602 | ||
1603 | if (skb_shared(skb)) { | |
1604 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); | |
1605 | if (nskb == NULL) | |
1606 | goto drop_n_acct; | |
1607 | ||
1608 | if (skb_head != skb->data) { | |
1609 | skb->data = skb_head; | |
1610 | skb->len = skb_len; | |
1611 | } | |
abc4e4fa | 1612 | consume_skb(skb); |
1da177e4 LT |
1613 | skb = nskb; |
1614 | } | |
1615 | ||
ffbc6111 HX |
1616 | BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 > |
1617 | sizeof(skb->cb)); | |
1618 | ||
1619 | sll = &PACKET_SKB_CB(skb)->sa.ll; | |
1da177e4 LT |
1620 | sll->sll_family = AF_PACKET; |
1621 | sll->sll_hatype = dev->type; | |
1622 | sll->sll_protocol = skb->protocol; | |
1623 | sll->sll_pkttype = skb->pkt_type; | |
8032b464 | 1624 | if (unlikely(po->origdev)) |
80feaacb PWJ |
1625 | sll->sll_ifindex = orig_dev->ifindex; |
1626 | else | |
1627 | sll->sll_ifindex = dev->ifindex; | |
1da177e4 | 1628 | |
b95cce35 | 1629 | sll->sll_halen = dev_parse_header(skb, sll->sll_addr); |
1da177e4 | 1630 | |
ffbc6111 | 1631 | PACKET_SKB_CB(skb)->origlen = skb->len; |
8dc41944 | 1632 | |
1da177e4 LT |
1633 | if (pskb_trim(skb, snaplen)) |
1634 | goto drop_n_acct; | |
1635 | ||
1636 | skb_set_owner_r(skb, sk); | |
1637 | skb->dev = NULL; | |
adf30907 | 1638 | skb_dst_drop(skb); |
1da177e4 | 1639 | |
84531c24 PO |
1640 | /* drop conntrack reference */ |
1641 | nf_reset(skb); | |
1642 | ||
1da177e4 LT |
1643 | spin_lock(&sk->sk_receive_queue.lock); |
1644 | po->stats.tp_packets++; | |
3b885787 | 1645 | skb->dropcount = atomic_read(&sk->sk_drops); |
1da177e4 LT |
1646 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
1647 | spin_unlock(&sk->sk_receive_queue.lock); | |
1648 | sk->sk_data_ready(sk, skb->len); | |
1649 | return 0; | |
1650 | ||
1651 | drop_n_acct: | |
7091fbd8 WB |
1652 | spin_lock(&sk->sk_receive_queue.lock); |
1653 | po->stats.tp_drops++; | |
1654 | atomic_inc(&sk->sk_drops); | |
1655 | spin_unlock(&sk->sk_receive_queue.lock); | |
1da177e4 LT |
1656 | |
1657 | drop_n_restore: | |
1658 | if (skb_head != skb->data && skb_shared(skb)) { | |
1659 | skb->data = skb_head; | |
1660 | skb->len = skb_len; | |
1661 | } | |
1662 | drop: | |
ead2ceb0 | 1663 | consume_skb(skb); |
1da177e4 LT |
1664 | return 0; |
1665 | } | |
1666 | ||
40d4e3df ED |
1667 | static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, |
1668 | struct packet_type *pt, struct net_device *orig_dev) | |
1da177e4 LT |
1669 | { |
1670 | struct sock *sk; | |
1671 | struct packet_sock *po; | |
1672 | struct sockaddr_ll *sll; | |
bbd6ef87 PM |
1673 | union { |
1674 | struct tpacket_hdr *h1; | |
1675 | struct tpacket2_hdr *h2; | |
f6fb8f10 | 1676 | struct tpacket3_hdr *h3; |
bbd6ef87 PM |
1677 | void *raw; |
1678 | } h; | |
40d4e3df | 1679 | u8 *skb_head = skb->data; |
1da177e4 | 1680 | int skb_len = skb->len; |
dbcb5855 | 1681 | unsigned int snaplen, res; |
f6fb8f10 | 1682 | unsigned long status = TP_STATUS_USER; |
bbd6ef87 | 1683 | unsigned short macoff, netoff, hdrlen; |
1da177e4 | 1684 | struct sk_buff *copy_skb = NULL; |
b7aa0bf7 | 1685 | struct timeval tv; |
bbd6ef87 | 1686 | struct timespec ts; |
614f60fa | 1687 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); |
1da177e4 LT |
1688 | |
1689 | if (skb->pkt_type == PACKET_LOOPBACK) | |
1690 | goto drop; | |
1691 | ||
1692 | sk = pt->af_packet_priv; | |
1693 | po = pkt_sk(sk); | |
1694 | ||
09ad9bc7 | 1695 | if (!net_eq(dev_net(dev), sock_net(sk))) |
d12d01d6 DL |
1696 | goto drop; |
1697 | ||
3b04ddde | 1698 | if (dev->header_ops) { |
1da177e4 | 1699 | if (sk->sk_type != SOCK_DGRAM) |
98e399f8 | 1700 | skb_push(skb, skb->data - skb_mac_header(skb)); |
1da177e4 LT |
1701 | else if (skb->pkt_type == PACKET_OUTGOING) { |
1702 | /* Special case: outgoing packets have ll header at head */ | |
bbe735e4 | 1703 | skb_pull(skb, skb_network_offset(skb)); |
1da177e4 LT |
1704 | } |
1705 | } | |
1706 | ||
8dc41944 HX |
1707 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
1708 | status |= TP_STATUS_CSUMNOTREADY; | |
1709 | ||
1da177e4 LT |
1710 | snaplen = skb->len; |
1711 | ||
dbcb5855 DM |
1712 | res = run_filter(skb, sk, snaplen); |
1713 | if (!res) | |
fda9ef5d | 1714 | goto drop_n_restore; |
dbcb5855 DM |
1715 | if (snaplen > res) |
1716 | snaplen = res; | |
1da177e4 LT |
1717 | |
1718 | if (sk->sk_type == SOCK_DGRAM) { | |
8913336a PM |
1719 | macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + |
1720 | po->tp_reserve; | |
1da177e4 | 1721 | } else { |
95c96174 | 1722 | unsigned int maclen = skb_network_offset(skb); |
bbd6ef87 | 1723 | netoff = TPACKET_ALIGN(po->tp_hdrlen + |
8913336a PM |
1724 | (maclen < 16 ? 16 : maclen)) + |
1725 | po->tp_reserve; | |
1da177e4 LT |
1726 | macoff = netoff - maclen; |
1727 | } | |
f6fb8f10 | 1728 | if (po->tp_version <= TPACKET_V2) { |
1729 | if (macoff + snaplen > po->rx_ring.frame_size) { | |
1730 | if (po->copy_thresh && | |
0fd7bac6 | 1731 | atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { |
f6fb8f10 | 1732 | if (skb_shared(skb)) { |
1733 | copy_skb = skb_clone(skb, GFP_ATOMIC); | |
1734 | } else { | |
1735 | copy_skb = skb_get(skb); | |
1736 | skb_head = skb->data; | |
1737 | } | |
1738 | if (copy_skb) | |
1739 | skb_set_owner_r(copy_skb, sk); | |
1da177e4 | 1740 | } |
f6fb8f10 | 1741 | snaplen = po->rx_ring.frame_size - macoff; |
1742 | if ((int)snaplen < 0) | |
1743 | snaplen = 0; | |
1da177e4 | 1744 | } |
1da177e4 | 1745 | } |
1da177e4 | 1746 | spin_lock(&sk->sk_receive_queue.lock); |
f6fb8f10 | 1747 | h.raw = packet_current_rx_frame(po, skb, |
1748 | TP_STATUS_KERNEL, (macoff+snaplen)); | |
bbd6ef87 | 1749 | if (!h.raw) |
1da177e4 | 1750 | goto ring_is_full; |
f6fb8f10 | 1751 | if (po->tp_version <= TPACKET_V2) { |
1752 | packet_increment_rx_head(po, &po->rx_ring); | |
1753 | /* | |
1754 | * LOSING will be reported till you read the stats, | |
1755 | * because it's COR - Clear On Read. | |
1756 | * Anyways, moving it for V1/V2 only as V3 doesn't need this | |
1757 | * at packet level. | |
1758 | */ | |
1759 | if (po->stats.tp_drops) | |
1760 | status |= TP_STATUS_LOSING; | |
1761 | } | |
1da177e4 LT |
1762 | po->stats.tp_packets++; |
1763 | if (copy_skb) { | |
1764 | status |= TP_STATUS_COPY; | |
1765 | __skb_queue_tail(&sk->sk_receive_queue, copy_skb); | |
1766 | } | |
1da177e4 LT |
1767 | spin_unlock(&sk->sk_receive_queue.lock); |
1768 | ||
bbd6ef87 | 1769 | skb_copy_bits(skb, 0, h.raw + macoff, snaplen); |
1da177e4 | 1770 | |
bbd6ef87 PM |
1771 | switch (po->tp_version) { |
1772 | case TPACKET_V1: | |
1773 | h.h1->tp_len = skb->len; | |
1774 | h.h1->tp_snaplen = snaplen; | |
1775 | h.h1->tp_mac = macoff; | |
1776 | h.h1->tp_net = netoff; | |
614f60fa SM |
1777 | if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) |
1778 | && shhwtstamps->syststamp.tv64) | |
1779 | tv = ktime_to_timeval(shhwtstamps->syststamp); | |
1780 | else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) | |
1781 | && shhwtstamps->hwtstamp.tv64) | |
1782 | tv = ktime_to_timeval(shhwtstamps->hwtstamp); | |
1783 | else if (skb->tstamp.tv64) | |
bbd6ef87 PM |
1784 | tv = ktime_to_timeval(skb->tstamp); |
1785 | else | |
1786 | do_gettimeofday(&tv); | |
1787 | h.h1->tp_sec = tv.tv_sec; | |
1788 | h.h1->tp_usec = tv.tv_usec; | |
1789 | hdrlen = sizeof(*h.h1); | |
1790 | break; | |
1791 | case TPACKET_V2: | |
1792 | h.h2->tp_len = skb->len; | |
1793 | h.h2->tp_snaplen = snaplen; | |
1794 | h.h2->tp_mac = macoff; | |
1795 | h.h2->tp_net = netoff; | |
614f60fa SM |
1796 | if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) |
1797 | && shhwtstamps->syststamp.tv64) | |
1798 | ts = ktime_to_timespec(shhwtstamps->syststamp); | |
1799 | else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) | |
1800 | && shhwtstamps->hwtstamp.tv64) | |
1801 | ts = ktime_to_timespec(shhwtstamps->hwtstamp); | |
1802 | else if (skb->tstamp.tv64) | |
bbd6ef87 PM |
1803 | ts = ktime_to_timespec(skb->tstamp); |
1804 | else | |
1805 | getnstimeofday(&ts); | |
1806 | h.h2->tp_sec = ts.tv_sec; | |
1807 | h.h2->tp_nsec = ts.tv_nsec; | |
a3bcc23e BG |
1808 | if (vlan_tx_tag_present(skb)) { |
1809 | h.h2->tp_vlan_tci = vlan_tx_tag_get(skb); | |
1810 | status |= TP_STATUS_VLAN_VALID; | |
1811 | } else { | |
1812 | h.h2->tp_vlan_tci = 0; | |
1813 | } | |
13fcb7bd | 1814 | h.h2->tp_padding = 0; |
bbd6ef87 PM |
1815 | hdrlen = sizeof(*h.h2); |
1816 | break; | |
f6fb8f10 | 1817 | case TPACKET_V3: |
1818 | /* tp_nxt_offset,vlan are already populated above. | |
1819 | * So DONT clear those fields here | |
1820 | */ | |
1821 | h.h3->tp_status |= status; | |
1822 | h.h3->tp_len = skb->len; | |
1823 | h.h3->tp_snaplen = snaplen; | |
1824 | h.h3->tp_mac = macoff; | |
1825 | h.h3->tp_net = netoff; | |
1826 | if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) | |
1827 | && shhwtstamps->syststamp.tv64) | |
1828 | ts = ktime_to_timespec(shhwtstamps->syststamp); | |
1829 | else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) | |
1830 | && shhwtstamps->hwtstamp.tv64) | |
1831 | ts = ktime_to_timespec(shhwtstamps->hwtstamp); | |
1832 | else if (skb->tstamp.tv64) | |
1833 | ts = ktime_to_timespec(skb->tstamp); | |
1834 | else | |
1835 | getnstimeofday(&ts); | |
1836 | h.h3->tp_sec = ts.tv_sec; | |
1837 | h.h3->tp_nsec = ts.tv_nsec; | |
1838 | hdrlen = sizeof(*h.h3); | |
1839 | break; | |
bbd6ef87 PM |
1840 | default: |
1841 | BUG(); | |
1842 | } | |
1da177e4 | 1843 | |
bbd6ef87 | 1844 | sll = h.raw + TPACKET_ALIGN(hdrlen); |
b95cce35 | 1845 | sll->sll_halen = dev_parse_header(skb, sll->sll_addr); |
1da177e4 LT |
1846 | sll->sll_family = AF_PACKET; |
1847 | sll->sll_hatype = dev->type; | |
1848 | sll->sll_protocol = skb->protocol; | |
1849 | sll->sll_pkttype = skb->pkt_type; | |
8032b464 | 1850 | if (unlikely(po->origdev)) |
80feaacb PWJ |
1851 | sll->sll_ifindex = orig_dev->ifindex; |
1852 | else | |
1853 | sll->sll_ifindex = dev->ifindex; | |
1da177e4 | 1854 | |
e16aa207 | 1855 | smp_mb(); |
f6dafa95 | 1856 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 |
1da177e4 | 1857 | { |
0af55bb5 CG |
1858 | u8 *start, *end; |
1859 | ||
f6fb8f10 | 1860 | if (po->tp_version <= TPACKET_V2) { |
1861 | end = (u8 *)PAGE_ALIGN((unsigned long)h.raw | |
1862 | + macoff + snaplen); | |
1863 | for (start = h.raw; start < end; start += PAGE_SIZE) | |
1864 | flush_dcache_page(pgv_to_page(start)); | |
1865 | } | |
cc9f01b2 | 1866 | smp_wmb(); |
1da177e4 | 1867 | } |
f6dafa95 | 1868 | #endif |
f6fb8f10 | 1869 | if (po->tp_version <= TPACKET_V2) |
1870 | __packet_set_status(po, h.raw, status); | |
1871 | else | |
1872 | prb_clear_blk_fill_status(&po->rx_ring); | |
1da177e4 LT |
1873 | |
1874 | sk->sk_data_ready(sk, 0); | |
1875 | ||
1876 | drop_n_restore: | |
1877 | if (skb_head != skb->data && skb_shared(skb)) { | |
1878 | skb->data = skb_head; | |
1879 | skb->len = skb_len; | |
1880 | } | |
1881 | drop: | |
1ce4f28b | 1882 | kfree_skb(skb); |
1da177e4 LT |
1883 | return 0; |
1884 | ||
1885 | ring_is_full: | |
1886 | po->stats.tp_drops++; | |
1887 | spin_unlock(&sk->sk_receive_queue.lock); | |
1888 | ||
1889 | sk->sk_data_ready(sk, 0); | |
acb5d75b | 1890 | kfree_skb(copy_skb); |
1da177e4 LT |
1891 | goto drop_n_restore; |
1892 | } | |
1893 | ||
69e3c75f JB |
1894 | static void tpacket_destruct_skb(struct sk_buff *skb) |
1895 | { | |
1896 | struct packet_sock *po = pkt_sk(skb->sk); | |
40d4e3df | 1897 | void *ph; |
1da177e4 | 1898 | |
69e3c75f JB |
1899 | if (likely(po->tx_ring.pg_vec)) { |
1900 | ph = skb_shinfo(skb)->destructor_arg; | |
69e3c75f JB |
1901 | BUG_ON(atomic_read(&po->tx_ring.pending) == 0); |
1902 | atomic_dec(&po->tx_ring.pending); | |
1903 | __packet_set_status(po, ph, TP_STATUS_AVAILABLE); | |
1904 | } | |
1905 | ||
1906 | sock_wfree(skb); | |
1907 | } | |
1908 | ||
40d4e3df ED |
1909 | static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, |
1910 | void *frame, struct net_device *dev, int size_max, | |
ae641949 | 1911 | __be16 proto, unsigned char *addr, int hlen) |
69e3c75f JB |
1912 | { |
1913 | union { | |
1914 | struct tpacket_hdr *h1; | |
1915 | struct tpacket2_hdr *h2; | |
1916 | void *raw; | |
1917 | } ph; | |
1918 | int to_write, offset, len, tp_len, nr_frags, len_max; | |
1919 | struct socket *sock = po->sk.sk_socket; | |
1920 | struct page *page; | |
1921 | void *data; | |
1922 | int err; | |
1923 | ||
1924 | ph.raw = frame; | |
1925 | ||
1926 | skb->protocol = proto; | |
1927 | skb->dev = dev; | |
1928 | skb->priority = po->sk.sk_priority; | |
2d37a186 | 1929 | skb->mark = po->sk.sk_mark; |
69e3c75f JB |
1930 | skb_shinfo(skb)->destructor_arg = ph.raw; |
1931 | ||
1932 | switch (po->tp_version) { | |
1933 | case TPACKET_V2: | |
1934 | tp_len = ph.h2->tp_len; | |
1935 | break; | |
1936 | default: | |
1937 | tp_len = ph.h1->tp_len; | |
1938 | break; | |
1939 | } | |
1940 | if (unlikely(tp_len > size_max)) { | |
40d4e3df | 1941 | pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); |
69e3c75f JB |
1942 | return -EMSGSIZE; |
1943 | } | |
1944 | ||
ae641949 | 1945 | skb_reserve(skb, hlen); |
69e3c75f | 1946 | skb_reset_network_header(skb); |
40893fd0 | 1947 | skb_probe_transport_header(skb, 0); |
c1aad275 | 1948 | |
5920cd3a PC |
1949 | if (po->tp_tx_has_off) { |
1950 | int off_min, off_max, off; | |
1951 | off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); | |
1952 | off_max = po->tx_ring.frame_size - tp_len; | |
1953 | if (sock->type == SOCK_DGRAM) { | |
1954 | switch (po->tp_version) { | |
1955 | case TPACKET_V2: | |
1956 | off = ph.h2->tp_net; | |
1957 | break; | |
1958 | default: | |
1959 | off = ph.h1->tp_net; | |
1960 | break; | |
1961 | } | |
1962 | } else { | |
1963 | switch (po->tp_version) { | |
1964 | case TPACKET_V2: | |
1965 | off = ph.h2->tp_mac; | |
1966 | break; | |
1967 | default: | |
1968 | off = ph.h1->tp_mac; | |
1969 | break; | |
1970 | } | |
1971 | } | |
1972 | if (unlikely((off < off_min) || (off_max < off))) | |
1973 | return -EINVAL; | |
1974 | data = ph.raw + off; | |
1975 | } else { | |
1976 | data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll); | |
1977 | } | |
69e3c75f JB |
1978 | to_write = tp_len; |
1979 | ||
1980 | if (sock->type == SOCK_DGRAM) { | |
1981 | err = dev_hard_header(skb, dev, ntohs(proto), addr, | |
1982 | NULL, tp_len); | |
1983 | if (unlikely(err < 0)) | |
1984 | return -EINVAL; | |
40d4e3df | 1985 | } else if (dev->hard_header_len) { |
69e3c75f JB |
1986 | /* net device doesn't like empty head */ |
1987 | if (unlikely(tp_len <= dev->hard_header_len)) { | |
40d4e3df ED |
1988 | pr_err("packet size is too short (%d < %d)\n", |
1989 | tp_len, dev->hard_header_len); | |
69e3c75f JB |
1990 | return -EINVAL; |
1991 | } | |
1992 | ||
1993 | skb_push(skb, dev->hard_header_len); | |
1994 | err = skb_store_bits(skb, 0, data, | |
1995 | dev->hard_header_len); | |
1996 | if (unlikely(err)) | |
1997 | return err; | |
1998 | ||
1999 | data += dev->hard_header_len; | |
2000 | to_write -= dev->hard_header_len; | |
2001 | } | |
2002 | ||
69e3c75f JB |
2003 | offset = offset_in_page(data); |
2004 | len_max = PAGE_SIZE - offset; | |
2005 | len = ((to_write > len_max) ? len_max : to_write); | |
2006 | ||
2007 | skb->data_len = to_write; | |
2008 | skb->len += to_write; | |
2009 | skb->truesize += to_write; | |
2010 | atomic_add(to_write, &po->sk.sk_wmem_alloc); | |
2011 | ||
2012 | while (likely(to_write)) { | |
2013 | nr_frags = skb_shinfo(skb)->nr_frags; | |
2014 | ||
2015 | if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { | |
40d4e3df ED |
2016 | pr_err("Packet exceed the number of skb frags(%lu)\n", |
2017 | MAX_SKB_FRAGS); | |
69e3c75f JB |
2018 | return -EFAULT; |
2019 | } | |
2020 | ||
0af55bb5 CG |
2021 | page = pgv_to_page(data); |
2022 | data += len; | |
69e3c75f JB |
2023 | flush_dcache_page(page); |
2024 | get_page(page); | |
0af55bb5 | 2025 | skb_fill_page_desc(skb, nr_frags, page, offset, len); |
69e3c75f JB |
2026 | to_write -= len; |
2027 | offset = 0; | |
2028 | len_max = PAGE_SIZE; | |
2029 | len = ((to_write > len_max) ? len_max : to_write); | |
2030 | } | |
2031 | ||
2032 | return tp_len; | |
2033 | } | |
2034 | ||
2035 | static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |
2036 | { | |
69e3c75f JB |
2037 | struct sk_buff *skb; |
2038 | struct net_device *dev; | |
2039 | __be16 proto; | |
827d9780 BG |
2040 | bool need_rls_dev = false; |
2041 | int err, reserve = 0; | |
40d4e3df ED |
2042 | void *ph; |
2043 | struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name; | |
69e3c75f JB |
2044 | int tp_len, size_max; |
2045 | unsigned char *addr; | |
2046 | int len_sum = 0; | |
9e67030a | 2047 | int status = TP_STATUS_AVAILABLE; |
ae641949 | 2048 | int hlen, tlen; |
69e3c75f | 2049 | |
69e3c75f JB |
2050 | mutex_lock(&po->pg_vec_lock); |
2051 | ||
69e3c75f | 2052 | if (saddr == NULL) { |
827d9780 | 2053 | dev = po->prot_hook.dev; |
69e3c75f JB |
2054 | proto = po->num; |
2055 | addr = NULL; | |
2056 | } else { | |
2057 | err = -EINVAL; | |
2058 | if (msg->msg_namelen < sizeof(struct sockaddr_ll)) | |
2059 | goto out; | |
2060 | if (msg->msg_namelen < (saddr->sll_halen | |
2061 | + offsetof(struct sockaddr_ll, | |
2062 | sll_addr))) | |
2063 | goto out; | |
69e3c75f JB |
2064 | proto = saddr->sll_protocol; |
2065 | addr = saddr->sll_addr; | |
827d9780 BG |
2066 | dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); |
2067 | need_rls_dev = true; | |
69e3c75f JB |
2068 | } |
2069 | ||
69e3c75f JB |
2070 | err = -ENXIO; |
2071 | if (unlikely(dev == NULL)) | |
2072 | goto out; | |
2073 | ||
2074 | reserve = dev->hard_header_len; | |
2075 | ||
2076 | err = -ENETDOWN; | |
2077 | if (unlikely(!(dev->flags & IFF_UP))) | |
2078 | goto out_put; | |
2079 | ||
2080 | size_max = po->tx_ring.frame_size | |
b5dd884e | 2081 | - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); |
69e3c75f JB |
2082 | |
2083 | if (size_max > dev->mtu + reserve) | |
2084 | size_max = dev->mtu + reserve; | |
2085 | ||
2086 | do { | |
2087 | ph = packet_current_frame(po, &po->tx_ring, | |
2088 | TP_STATUS_SEND_REQUEST); | |
2089 | ||
2090 | if (unlikely(ph == NULL)) { | |
2091 | schedule(); | |
2092 | continue; | |
2093 | } | |
2094 | ||
2095 | status = TP_STATUS_SEND_REQUEST; | |
ae641949 HX |
2096 | hlen = LL_RESERVED_SPACE(dev); |
2097 | tlen = dev->needed_tailroom; | |
69e3c75f | 2098 | skb = sock_alloc_send_skb(&po->sk, |
ae641949 | 2099 | hlen + tlen + sizeof(struct sockaddr_ll), |
69e3c75f JB |
2100 | 0, &err); |
2101 | ||
2102 | if (unlikely(skb == NULL)) | |
2103 | goto out_status; | |
2104 | ||
2105 | tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, | |
ae641949 | 2106 | addr, hlen); |
69e3c75f JB |
2107 | |
2108 | if (unlikely(tp_len < 0)) { | |
2109 | if (po->tp_loss) { | |
2110 | __packet_set_status(po, ph, | |
2111 | TP_STATUS_AVAILABLE); | |
2112 | packet_increment_head(&po->tx_ring); | |
2113 | kfree_skb(skb); | |
2114 | continue; | |
2115 | } else { | |
2116 | status = TP_STATUS_WRONG_FORMAT; | |
2117 | err = tp_len; | |
2118 | goto out_status; | |
2119 | } | |
2120 | } | |
2121 | ||
2122 | skb->destructor = tpacket_destruct_skb; | |
2123 | __packet_set_status(po, ph, TP_STATUS_SENDING); | |
2124 | atomic_inc(&po->tx_ring.pending); | |
2125 | ||
2126 | status = TP_STATUS_SEND_REQUEST; | |
2127 | err = dev_queue_xmit(skb); | |
eb70df13 JP |
2128 | if (unlikely(err > 0)) { |
2129 | err = net_xmit_errno(err); | |
2130 | if (err && __packet_get_status(po, ph) == | |
2131 | TP_STATUS_AVAILABLE) { | |
2132 | /* skb was destructed already */ | |
2133 | skb = NULL; | |
2134 | goto out_status; | |
2135 | } | |
2136 | /* | |
2137 | * skb was dropped but not destructed yet; | |
2138 | * let's treat it like congestion or err < 0 | |
2139 | */ | |
2140 | err = 0; | |
2141 | } | |
69e3c75f JB |
2142 | packet_increment_head(&po->tx_ring); |
2143 | len_sum += tp_len; | |
f64f9e71 JP |
2144 | } while (likely((ph != NULL) || |
2145 | ((!(msg->msg_flags & MSG_DONTWAIT)) && | |
2146 | (atomic_read(&po->tx_ring.pending)))) | |
2147 | ); | |
69e3c75f JB |
2148 | |
2149 | err = len_sum; | |
2150 | goto out_put; | |
2151 | ||
69e3c75f JB |
2152 | out_status: |
2153 | __packet_set_status(po, ph, status); | |
2154 | kfree_skb(skb); | |
2155 | out_put: | |
827d9780 BG |
2156 | if (need_rls_dev) |
2157 | dev_put(dev); | |
69e3c75f JB |
2158 | out: |
2159 | mutex_unlock(&po->pg_vec_lock); | |
2160 | return err; | |
2161 | } | |
69e3c75f | 2162 | |
eea49cc9 OJ |
2163 | static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, |
2164 | size_t reserve, size_t len, | |
2165 | size_t linear, int noblock, | |
2166 | int *err) | |
bfd5f4a3 SS |
2167 | { |
2168 | struct sk_buff *skb; | |
2169 | ||
2170 | /* Under a page? Don't bother with paged skb. */ | |
2171 | if (prepad + len < PAGE_SIZE || !linear) | |
2172 | linear = len; | |
2173 | ||
2174 | skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, | |
2175 | err); | |
2176 | if (!skb) | |
2177 | return NULL; | |
2178 | ||
2179 | skb_reserve(skb, reserve); | |
2180 | skb_put(skb, linear); | |
2181 | skb->data_len = len - linear; | |
2182 | skb->len += len - linear; | |
2183 | ||
2184 | return skb; | |
2185 | } | |
2186 | ||
69e3c75f | 2187 | static int packet_snd(struct socket *sock, |
1da177e4 LT |
2188 | struct msghdr *msg, size_t len) |
2189 | { | |
2190 | struct sock *sk = sock->sk; | |
40d4e3df | 2191 | struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name; |
1da177e4 LT |
2192 | struct sk_buff *skb; |
2193 | struct net_device *dev; | |
0e11c91e | 2194 | __be16 proto; |
827d9780 | 2195 | bool need_rls_dev = false; |
1da177e4 | 2196 | unsigned char *addr; |
827d9780 | 2197 | int err, reserve = 0; |
bfd5f4a3 SS |
2198 | struct virtio_net_hdr vnet_hdr = { 0 }; |
2199 | int offset = 0; | |
2200 | int vnet_hdr_len; | |
2201 | struct packet_sock *po = pkt_sk(sk); | |
2202 | unsigned short gso_type = 0; | |
ae641949 | 2203 | int hlen, tlen; |
3bdc0eba | 2204 | int extra_len = 0; |
1da177e4 LT |
2205 | |
2206 | /* | |
1ce4f28b | 2207 | * Get and verify the address. |
1da177e4 | 2208 | */ |
1ce4f28b | 2209 | |
1da177e4 | 2210 | if (saddr == NULL) { |
827d9780 | 2211 | dev = po->prot_hook.dev; |
1da177e4 LT |
2212 | proto = po->num; |
2213 | addr = NULL; | |
2214 | } else { | |
2215 | err = -EINVAL; | |
2216 | if (msg->msg_namelen < sizeof(struct sockaddr_ll)) | |
2217 | goto out; | |
0fb375fb EB |
2218 | if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) |
2219 | goto out; | |
1da177e4 LT |
2220 | proto = saddr->sll_protocol; |
2221 | addr = saddr->sll_addr; | |
827d9780 BG |
2222 | dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); |
2223 | need_rls_dev = true; | |
1da177e4 LT |
2224 | } |
2225 | ||
1da177e4 LT |
2226 | err = -ENXIO; |
2227 | if (dev == NULL) | |
2228 | goto out_unlock; | |
2229 | if (sock->type == SOCK_RAW) | |
2230 | reserve = dev->hard_header_len; | |
2231 | ||
d5e76b0a DM |
2232 | err = -ENETDOWN; |
2233 | if (!(dev->flags & IFF_UP)) | |
2234 | goto out_unlock; | |
2235 | ||
bfd5f4a3 SS |
2236 | if (po->has_vnet_hdr) { |
2237 | vnet_hdr_len = sizeof(vnet_hdr); | |
2238 | ||
2239 | err = -EINVAL; | |
2240 | if (len < vnet_hdr_len) | |
2241 | goto out_unlock; | |
2242 | ||
2243 | len -= vnet_hdr_len; | |
2244 | ||
2245 | err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov, | |
2246 | vnet_hdr_len); | |
2247 | if (err < 0) | |
2248 | goto out_unlock; | |
2249 | ||
2250 | if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && | |
2251 | (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 > | |
2252 | vnet_hdr.hdr_len)) | |
2253 | vnet_hdr.hdr_len = vnet_hdr.csum_start + | |
2254 | vnet_hdr.csum_offset + 2; | |
2255 | ||
2256 | err = -EINVAL; | |
2257 | if (vnet_hdr.hdr_len > len) | |
2258 | goto out_unlock; | |
2259 | ||
2260 | if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { | |
2261 | switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { | |
2262 | case VIRTIO_NET_HDR_GSO_TCPV4: | |
2263 | gso_type = SKB_GSO_TCPV4; | |
2264 | break; | |
2265 | case VIRTIO_NET_HDR_GSO_TCPV6: | |
2266 | gso_type = SKB_GSO_TCPV6; | |
2267 | break; | |
2268 | case VIRTIO_NET_HDR_GSO_UDP: | |
2269 | gso_type = SKB_GSO_UDP; | |
2270 | break; | |
2271 | default: | |
2272 | goto out_unlock; | |
2273 | } | |
2274 | ||
2275 | if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) | |
2276 | gso_type |= SKB_GSO_TCP_ECN; | |
2277 | ||
2278 | if (vnet_hdr.gso_size == 0) | |
2279 | goto out_unlock; | |
2280 | ||
2281 | } | |
2282 | } | |
2283 | ||
3bdc0eba BG |
2284 | if (unlikely(sock_flag(sk, SOCK_NOFCS))) { |
2285 | if (!netif_supports_nofcs(dev)) { | |
2286 | err = -EPROTONOSUPPORT; | |
2287 | goto out_unlock; | |
2288 | } | |
2289 | extra_len = 4; /* We're doing our own CRC */ | |
2290 | } | |
2291 | ||
1da177e4 | 2292 | err = -EMSGSIZE; |
3bdc0eba | 2293 | if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) |
1da177e4 LT |
2294 | goto out_unlock; |
2295 | ||
bfd5f4a3 | 2296 | err = -ENOBUFS; |
ae641949 HX |
2297 | hlen = LL_RESERVED_SPACE(dev); |
2298 | tlen = dev->needed_tailroom; | |
2299 | skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len, | |
bfd5f4a3 | 2300 | msg->msg_flags & MSG_DONTWAIT, &err); |
40d4e3df | 2301 | if (skb == NULL) |
1da177e4 LT |
2302 | goto out_unlock; |
2303 | ||
bfd5f4a3 | 2304 | skb_set_network_header(skb, reserve); |
1da177e4 | 2305 | |
0c4e8581 SH |
2306 | err = -EINVAL; |
2307 | if (sock->type == SOCK_DGRAM && | |
bfd5f4a3 | 2308 | (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0) |
0c4e8581 | 2309 | goto out_free; |
1da177e4 LT |
2310 | |
2311 | /* Returns -EFAULT on error */ | |
bfd5f4a3 | 2312 | err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len); |
1da177e4 LT |
2313 | if (err) |
2314 | goto out_free; | |
2244d07b | 2315 | err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); |
ed85b565 RC |
2316 | if (err < 0) |
2317 | goto out_free; | |
1da177e4 | 2318 | |
3bdc0eba | 2319 | if (!gso_type && (len > dev->mtu + reserve + extra_len)) { |
57f89bfa BG |
2320 | /* Earlier code assumed this would be a VLAN pkt, |
2321 | * double-check this now that we have the actual | |
2322 | * packet in hand. | |
2323 | */ | |
2324 | struct ethhdr *ehdr; | |
2325 | skb_reset_mac_header(skb); | |
2326 | ehdr = eth_hdr(skb); | |
2327 | if (ehdr->h_proto != htons(ETH_P_8021Q)) { | |
2328 | err = -EMSGSIZE; | |
2329 | goto out_free; | |
2330 | } | |
2331 | } | |
2332 | ||
1da177e4 LT |
2333 | skb->protocol = proto; |
2334 | skb->dev = dev; | |
2335 | skb->priority = sk->sk_priority; | |
2d37a186 | 2336 | skb->mark = sk->sk_mark; |
1da177e4 | 2337 | |
bfd5f4a3 SS |
2338 | if (po->has_vnet_hdr) { |
2339 | if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | |
2340 | if (!skb_partial_csum_set(skb, vnet_hdr.csum_start, | |
2341 | vnet_hdr.csum_offset)) { | |
2342 | err = -EINVAL; | |
2343 | goto out_free; | |
2344 | } | |
2345 | } | |
2346 | ||
2347 | skb_shinfo(skb)->gso_size = vnet_hdr.gso_size; | |
2348 | skb_shinfo(skb)->gso_type = gso_type; | |
2349 | ||
2350 | /* Header must be checked, and gso_segs computed. */ | |
2351 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | |
2352 | skb_shinfo(skb)->gso_segs = 0; | |
2353 | ||
2354 | len += vnet_hdr_len; | |
2355 | } | |
2356 | ||
40893fd0 | 2357 | skb_probe_transport_header(skb, reserve); |
c1aad275 | 2358 | |
3bdc0eba BG |
2359 | if (unlikely(extra_len == 4)) |
2360 | skb->no_fcs = 1; | |
2361 | ||
1da177e4 LT |
2362 | /* |
2363 | * Now send it | |
2364 | */ | |
2365 | ||
2366 | err = dev_queue_xmit(skb); | |
2367 | if (err > 0 && (err = net_xmit_errno(err)) != 0) | |
2368 | goto out_unlock; | |
2369 | ||
827d9780 BG |
2370 | if (need_rls_dev) |
2371 | dev_put(dev); | |
1da177e4 | 2372 | |
40d4e3df | 2373 | return len; |
1da177e4 LT |
2374 | |
2375 | out_free: | |
2376 | kfree_skb(skb); | |
2377 | out_unlock: | |
827d9780 | 2378 | if (dev && need_rls_dev) |
1da177e4 LT |
2379 | dev_put(dev); |
2380 | out: | |
2381 | return err; | |
2382 | } | |
2383 | ||
69e3c75f JB |
2384 | static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, |
2385 | struct msghdr *msg, size_t len) | |
2386 | { | |
69e3c75f JB |
2387 | struct sock *sk = sock->sk; |
2388 | struct packet_sock *po = pkt_sk(sk); | |
2389 | if (po->tx_ring.pg_vec) | |
2390 | return tpacket_snd(po, msg); | |
2391 | else | |
69e3c75f JB |
2392 | return packet_snd(sock, msg, len); |
2393 | } | |
2394 | ||
1da177e4 LT |
2395 | /* |
2396 | * Close a PACKET socket. This is fairly simple. We immediately go | |
2397 | * to 'closed' state and remove our protocol entry in the device list. | |
2398 | */ | |
2399 | ||
2400 | static int packet_release(struct socket *sock) | |
2401 | { | |
2402 | struct sock *sk = sock->sk; | |
2403 | struct packet_sock *po; | |
d12d01d6 | 2404 | struct net *net; |
f6fb8f10 | 2405 | union tpacket_req_u req_u; |
1da177e4 LT |
2406 | |
2407 | if (!sk) | |
2408 | return 0; | |
2409 | ||
3b1e0a65 | 2410 | net = sock_net(sk); |
1da177e4 LT |
2411 | po = pkt_sk(sk); |
2412 | ||
0fa7fa98 | 2413 | mutex_lock(&net->packet.sklist_lock); |
808f5114 | 2414 | sk_del_node_init_rcu(sk); |
0fa7fa98 PE |
2415 | mutex_unlock(&net->packet.sklist_lock); |
2416 | ||
2417 | preempt_disable(); | |
920de804 | 2418 | sock_prot_inuse_add(net, sk->sk_prot, -1); |
0fa7fa98 | 2419 | preempt_enable(); |
1da177e4 | 2420 | |
808f5114 | 2421 | spin_lock(&po->bind_lock); |
ce06b03e | 2422 | unregister_prot_hook(sk, false); |
160ff18a BG |
2423 | if (po->prot_hook.dev) { |
2424 | dev_put(po->prot_hook.dev); | |
2425 | po->prot_hook.dev = NULL; | |
2426 | } | |
808f5114 | 2427 | spin_unlock(&po->bind_lock); |
1da177e4 | 2428 | |
1da177e4 | 2429 | packet_flush_mclist(sk); |
1da177e4 | 2430 | |
9665d5d6 PS |
2431 | if (po->rx_ring.pg_vec) { |
2432 | memset(&req_u, 0, sizeof(req_u)); | |
f6fb8f10 | 2433 | packet_set_ring(sk, &req_u, 1, 0); |
9665d5d6 | 2434 | } |
69e3c75f | 2435 | |
9665d5d6 PS |
2436 | if (po->tx_ring.pg_vec) { |
2437 | memset(&req_u, 0, sizeof(req_u)); | |
f6fb8f10 | 2438 | packet_set_ring(sk, &req_u, 1, 1); |
9665d5d6 | 2439 | } |
1da177e4 | 2440 | |
dc99f600 DM |
2441 | fanout_release(sk); |
2442 | ||
808f5114 | 2443 | synchronize_net(); |
1da177e4 LT |
2444 | /* |
2445 | * Now the socket is dead. No more input will appear. | |
2446 | */ | |
1da177e4 LT |
2447 | sock_orphan(sk); |
2448 | sock->sk = NULL; | |
2449 | ||
2450 | /* Purge queues */ | |
2451 | ||
2452 | skb_queue_purge(&sk->sk_receive_queue); | |
17ab56a2 | 2453 | sk_refcnt_debug_release(sk); |
1da177e4 LT |
2454 | |
2455 | sock_put(sk); | |
2456 | return 0; | |
2457 | } | |
2458 | ||
2459 | /* | |
2460 | * Attach a packet hook. | |
2461 | */ | |
2462 | ||
0e11c91e | 2463 | static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol) |
1da177e4 LT |
2464 | { |
2465 | struct packet_sock *po = pkt_sk(sk); | |
dc99f600 | 2466 | |
aef950b4 WY |
2467 | if (po->fanout) { |
2468 | if (dev) | |
2469 | dev_put(dev); | |
2470 | ||
dc99f600 | 2471 | return -EINVAL; |
aef950b4 | 2472 | } |
1da177e4 LT |
2473 | |
2474 | lock_sock(sk); | |
2475 | ||
2476 | spin_lock(&po->bind_lock); | |
ce06b03e | 2477 | unregister_prot_hook(sk, true); |
1da177e4 LT |
2478 | po->num = protocol; |
2479 | po->prot_hook.type = protocol; | |
160ff18a BG |
2480 | if (po->prot_hook.dev) |
2481 | dev_put(po->prot_hook.dev); | |
1da177e4 LT |
2482 | po->prot_hook.dev = dev; |
2483 | ||
2484 | po->ifindex = dev ? dev->ifindex : 0; | |
2485 | ||
2486 | if (protocol == 0) | |
2487 | goto out_unlock; | |
2488 | ||
be85d4ad | 2489 | if (!dev || (dev->flags & IFF_UP)) { |
ce06b03e | 2490 | register_prot_hook(sk); |
be85d4ad UT |
2491 | } else { |
2492 | sk->sk_err = ENETDOWN; | |
2493 | if (!sock_flag(sk, SOCK_DEAD)) | |
2494 | sk->sk_error_report(sk); | |
1da177e4 LT |
2495 | } |
2496 | ||
2497 | out_unlock: | |
2498 | spin_unlock(&po->bind_lock); | |
2499 | release_sock(sk); | |
2500 | return 0; | |
2501 | } | |
2502 | ||
2503 | /* | |
2504 | * Bind a packet socket to a device | |
2505 | */ | |
2506 | ||
40d4e3df ED |
2507 | static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, |
2508 | int addr_len) | |
1da177e4 | 2509 | { |
40d4e3df | 2510 | struct sock *sk = sock->sk; |
1da177e4 LT |
2511 | char name[15]; |
2512 | struct net_device *dev; | |
2513 | int err = -ENODEV; | |
1ce4f28b | 2514 | |
1da177e4 LT |
2515 | /* |
2516 | * Check legality | |
2517 | */ | |
1ce4f28b | 2518 | |
8ae55f04 | 2519 | if (addr_len != sizeof(struct sockaddr)) |
1da177e4 | 2520 | return -EINVAL; |
40d4e3df | 2521 | strlcpy(name, uaddr->sa_data, sizeof(name)); |
1da177e4 | 2522 | |
3b1e0a65 | 2523 | dev = dev_get_by_name(sock_net(sk), name); |
160ff18a | 2524 | if (dev) |
1da177e4 | 2525 | err = packet_do_bind(sk, dev, pkt_sk(sk)->num); |
1da177e4 LT |
2526 | return err; |
2527 | } | |
1da177e4 LT |
2528 | |
2529 | static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |
2530 | { | |
40d4e3df ED |
2531 | struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; |
2532 | struct sock *sk = sock->sk; | |
1da177e4 LT |
2533 | struct net_device *dev = NULL; |
2534 | int err; | |
2535 | ||
2536 | ||
2537 | /* | |
2538 | * Check legality | |
2539 | */ | |
1ce4f28b | 2540 | |
1da177e4 LT |
2541 | if (addr_len < sizeof(struct sockaddr_ll)) |
2542 | return -EINVAL; | |
2543 | if (sll->sll_family != AF_PACKET) | |
2544 | return -EINVAL; | |
2545 | ||
2546 | if (sll->sll_ifindex) { | |
2547 | err = -ENODEV; | |
3b1e0a65 | 2548 | dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex); |
1da177e4 LT |
2549 | if (dev == NULL) |
2550 | goto out; | |
2551 | } | |
2552 | err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num); | |
1da177e4 LT |
2553 | |
2554 | out: | |
2555 | return err; | |
2556 | } | |
2557 | ||
2558 | static struct proto packet_proto = { | |
2559 | .name = "PACKET", | |
2560 | .owner = THIS_MODULE, | |
2561 | .obj_size = sizeof(struct packet_sock), | |
2562 | }; | |
2563 | ||
2564 | /* | |
1ce4f28b | 2565 | * Create a packet of type SOCK_PACKET. |
1da177e4 LT |
2566 | */ |
2567 | ||
3f378b68 EP |
2568 | static int packet_create(struct net *net, struct socket *sock, int protocol, |
2569 | int kern) | |
1da177e4 LT |
2570 | { |
2571 | struct sock *sk; | |
2572 | struct packet_sock *po; | |
0e11c91e | 2573 | __be16 proto = (__force __be16)protocol; /* weird, but documented */ |
1da177e4 LT |
2574 | int err; |
2575 | ||
df008c91 | 2576 | if (!ns_capable(net->user_ns, CAP_NET_RAW)) |
1da177e4 | 2577 | return -EPERM; |
be02097c DM |
2578 | if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && |
2579 | sock->type != SOCK_PACKET) | |
1da177e4 LT |
2580 | return -ESOCKTNOSUPPORT; |
2581 | ||
2582 | sock->state = SS_UNCONNECTED; | |
2583 | ||
2584 | err = -ENOBUFS; | |
6257ff21 | 2585 | sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto); |
1da177e4 LT |
2586 | if (sk == NULL) |
2587 | goto out; | |
2588 | ||
2589 | sock->ops = &packet_ops; | |
1da177e4 LT |
2590 | if (sock->type == SOCK_PACKET) |
2591 | sock->ops = &packet_ops_spkt; | |
be02097c | 2592 | |
1da177e4 LT |
2593 | sock_init_data(sock, sk); |
2594 | ||
2595 | po = pkt_sk(sk); | |
2596 | sk->sk_family = PF_PACKET; | |
0e11c91e | 2597 | po->num = proto; |
1da177e4 LT |
2598 | |
2599 | sk->sk_destruct = packet_sock_destruct; | |
17ab56a2 | 2600 | sk_refcnt_debug_inc(sk); |
1da177e4 LT |
2601 | |
2602 | /* | |
2603 | * Attach a protocol block | |
2604 | */ | |
2605 | ||
2606 | spin_lock_init(&po->bind_lock); | |
905db440 | 2607 | mutex_init(&po->pg_vec_lock); |
1da177e4 | 2608 | po->prot_hook.func = packet_rcv; |
be02097c | 2609 | |
1da177e4 LT |
2610 | if (sock->type == SOCK_PACKET) |
2611 | po->prot_hook.func = packet_rcv_spkt; | |
be02097c | 2612 | |
1da177e4 LT |
2613 | po->prot_hook.af_packet_priv = sk; |
2614 | ||
0e11c91e AV |
2615 | if (proto) { |
2616 | po->prot_hook.type = proto; | |
ce06b03e | 2617 | register_prot_hook(sk); |
1da177e4 LT |
2618 | } |
2619 | ||
0fa7fa98 | 2620 | mutex_lock(&net->packet.sklist_lock); |
808f5114 | 2621 | sk_add_node_rcu(sk, &net->packet.sklist); |
0fa7fa98 PE |
2622 | mutex_unlock(&net->packet.sklist_lock); |
2623 | ||
2624 | preempt_disable(); | |
3680453c | 2625 | sock_prot_inuse_add(net, &packet_proto, 1); |
0fa7fa98 | 2626 | preempt_enable(); |
808f5114 | 2627 | |
40d4e3df | 2628 | return 0; |
1da177e4 LT |
2629 | out: |
2630 | return err; | |
2631 | } | |
2632 | ||
ed85b565 RC |
2633 | static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len) |
2634 | { | |
2635 | struct sock_exterr_skb *serr; | |
2636 | struct sk_buff *skb, *skb2; | |
2637 | int copied, err; | |
2638 | ||
2639 | err = -EAGAIN; | |
2640 | skb = skb_dequeue(&sk->sk_error_queue); | |
2641 | if (skb == NULL) | |
2642 | goto out; | |
2643 | ||
2644 | copied = skb->len; | |
2645 | if (copied > len) { | |
2646 | msg->msg_flags |= MSG_TRUNC; | |
2647 | copied = len; | |
2648 | } | |
2649 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | |
2650 | if (err) | |
2651 | goto out_free_skb; | |
2652 | ||
2653 | sock_recv_timestamp(msg, sk, skb); | |
2654 | ||
2655 | serr = SKB_EXT_ERR(skb); | |
2656 | put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP, | |
2657 | sizeof(serr->ee), &serr->ee); | |
2658 | ||
2659 | msg->msg_flags |= MSG_ERRQUEUE; | |
2660 | err = copied; | |
2661 | ||
2662 | /* Reset and regenerate socket error */ | |
2663 | spin_lock_bh(&sk->sk_error_queue.lock); | |
2664 | sk->sk_err = 0; | |
2665 | if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { | |
2666 | sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; | |
2667 | spin_unlock_bh(&sk->sk_error_queue.lock); | |
2668 | sk->sk_error_report(sk); | |
2669 | } else | |
2670 | spin_unlock_bh(&sk->sk_error_queue.lock); | |
2671 | ||
2672 | out_free_skb: | |
2673 | kfree_skb(skb); | |
2674 | out: | |
2675 | return err; | |
2676 | } | |
2677 | ||
1da177e4 LT |
2678 | /* |
2679 | * Pull a packet from our receive queue and hand it to the user. | |
2680 | * If necessary we block. | |
2681 | */ | |
2682 | ||
2683 | static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |
2684 | struct msghdr *msg, size_t len, int flags) | |
2685 | { | |
2686 | struct sock *sk = sock->sk; | |
2687 | struct sk_buff *skb; | |
2688 | int copied, err; | |
0fb375fb | 2689 | struct sockaddr_ll *sll; |
bfd5f4a3 | 2690 | int vnet_hdr_len = 0; |
1da177e4 LT |
2691 | |
2692 | err = -EINVAL; | |
ed85b565 | 2693 | if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) |
1da177e4 LT |
2694 | goto out; |
2695 | ||
2696 | #if 0 | |
2697 | /* What error should we return now? EUNATTACH? */ | |
2698 | if (pkt_sk(sk)->ifindex < 0) | |
2699 | return -ENODEV; | |
2700 | #endif | |
2701 | ||
ed85b565 RC |
2702 | if (flags & MSG_ERRQUEUE) { |
2703 | err = packet_recv_error(sk, msg, len); | |
2704 | goto out; | |
2705 | } | |
2706 | ||
1da177e4 LT |
2707 | /* |
2708 | * Call the generic datagram receiver. This handles all sorts | |
2709 | * of horrible races and re-entrancy so we can forget about it | |
2710 | * in the protocol layers. | |
2711 | * | |
2712 | * Now it will return ENETDOWN, if device have just gone down, | |
2713 | * but then it will block. | |
2714 | */ | |
2715 | ||
40d4e3df | 2716 | skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); |
1da177e4 LT |
2717 | |
2718 | /* | |
1ce4f28b | 2719 | * An error occurred so return it. Because skb_recv_datagram() |
1da177e4 LT |
2720 | * handles the blocking we don't see and worry about blocking |
2721 | * retries. | |
2722 | */ | |
2723 | ||
8ae55f04 | 2724 | if (skb == NULL) |
1da177e4 LT |
2725 | goto out; |
2726 | ||
bfd5f4a3 SS |
2727 | if (pkt_sk(sk)->has_vnet_hdr) { |
2728 | struct virtio_net_hdr vnet_hdr = { 0 }; | |
2729 | ||
2730 | err = -EINVAL; | |
2731 | vnet_hdr_len = sizeof(vnet_hdr); | |
1f18b717 | 2732 | if (len < vnet_hdr_len) |
bfd5f4a3 SS |
2733 | goto out_free; |
2734 | ||
1f18b717 MK |
2735 | len -= vnet_hdr_len; |
2736 | ||
bfd5f4a3 SS |
2737 | if (skb_is_gso(skb)) { |
2738 | struct skb_shared_info *sinfo = skb_shinfo(skb); | |
2739 | ||
2740 | /* This is a hint as to how much should be linear. */ | |
2741 | vnet_hdr.hdr_len = skb_headlen(skb); | |
2742 | vnet_hdr.gso_size = sinfo->gso_size; | |
2743 | if (sinfo->gso_type & SKB_GSO_TCPV4) | |
2744 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | |
2745 | else if (sinfo->gso_type & SKB_GSO_TCPV6) | |
2746 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | |
2747 | else if (sinfo->gso_type & SKB_GSO_UDP) | |
2748 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; | |
2749 | else if (sinfo->gso_type & SKB_GSO_FCOE) | |
2750 | goto out_free; | |
2751 | else | |
2752 | BUG(); | |
2753 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) | |
2754 | vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; | |
2755 | } else | |
2756 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; | |
2757 | ||
2758 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
2759 | vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; | |
55508d60 | 2760 | vnet_hdr.csum_start = skb_checksum_start_offset(skb); |
bfd5f4a3 | 2761 | vnet_hdr.csum_offset = skb->csum_offset; |
10a8d94a JW |
2762 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
2763 | vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; | |
bfd5f4a3 SS |
2764 | } /* else everything is zero */ |
2765 | ||
2766 | err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr, | |
2767 | vnet_hdr_len); | |
2768 | if (err < 0) | |
2769 | goto out_free; | |
2770 | } | |
2771 | ||
0fb375fb EB |
2772 | /* |
2773 | * If the address length field is there to be filled in, we fill | |
2774 | * it in now. | |
2775 | */ | |
2776 | ||
ffbc6111 | 2777 | sll = &PACKET_SKB_CB(skb)->sa.ll; |
0fb375fb EB |
2778 | if (sock->type == SOCK_PACKET) |
2779 | msg->msg_namelen = sizeof(struct sockaddr_pkt); | |
2780 | else | |
2781 | msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr); | |
2782 | ||
1da177e4 LT |
2783 | /* |
2784 | * You lose any data beyond the buffer you gave. If it worries a | |
2785 | * user program they can ask the device for its MTU anyway. | |
2786 | */ | |
2787 | ||
2788 | copied = skb->len; | |
40d4e3df ED |
2789 | if (copied > len) { |
2790 | copied = len; | |
2791 | msg->msg_flags |= MSG_TRUNC; | |
1da177e4 LT |
2792 | } |
2793 | ||
2794 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | |
2795 | if (err) | |
2796 | goto out_free; | |
2797 | ||
3b885787 | 2798 | sock_recv_ts_and_drops(msg, sk, skb); |
1da177e4 LT |
2799 | |
2800 | if (msg->msg_name) | |
ffbc6111 HX |
2801 | memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, |
2802 | msg->msg_namelen); | |
1da177e4 | 2803 | |
8dc41944 | 2804 | if (pkt_sk(sk)->auxdata) { |
ffbc6111 HX |
2805 | struct tpacket_auxdata aux; |
2806 | ||
2807 | aux.tp_status = TP_STATUS_USER; | |
2808 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
2809 | aux.tp_status |= TP_STATUS_CSUMNOTREADY; | |
2810 | aux.tp_len = PACKET_SKB_CB(skb)->origlen; | |
2811 | aux.tp_snaplen = skb->len; | |
2812 | aux.tp_mac = 0; | |
bbe735e4 | 2813 | aux.tp_net = skb_network_offset(skb); |
a3bcc23e BG |
2814 | if (vlan_tx_tag_present(skb)) { |
2815 | aux.tp_vlan_tci = vlan_tx_tag_get(skb); | |
2816 | aux.tp_status |= TP_STATUS_VLAN_VALID; | |
2817 | } else { | |
2818 | aux.tp_vlan_tci = 0; | |
2819 | } | |
13fcb7bd | 2820 | aux.tp_padding = 0; |
ffbc6111 | 2821 | put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); |
8dc41944 HX |
2822 | } |
2823 | ||
1da177e4 LT |
2824 | /* |
2825 | * Free or return the buffer as appropriate. Again this | |
2826 | * hides all the races and re-entrancy issues from us. | |
2827 | */ | |
bfd5f4a3 | 2828 | err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); |
1da177e4 LT |
2829 | |
2830 | out_free: | |
2831 | skb_free_datagram(sk, skb); | |
2832 | out: | |
2833 | return err; | |
2834 | } | |
2835 | ||
1da177e4 LT |
2836 | static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, |
2837 | int *uaddr_len, int peer) | |
2838 | { | |
2839 | struct net_device *dev; | |
2840 | struct sock *sk = sock->sk; | |
2841 | ||
2842 | if (peer) | |
2843 | return -EOPNOTSUPP; | |
2844 | ||
2845 | uaddr->sa_family = AF_PACKET; | |
654d1f8a ED |
2846 | rcu_read_lock(); |
2847 | dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); | |
2848 | if (dev) | |
67286640 | 2849 | strncpy(uaddr->sa_data, dev->name, 14); |
654d1f8a | 2850 | else |
1da177e4 | 2851 | memset(uaddr->sa_data, 0, 14); |
654d1f8a | 2852 | rcu_read_unlock(); |
1da177e4 LT |
2853 | *uaddr_len = sizeof(*uaddr); |
2854 | ||
2855 | return 0; | |
2856 | } | |
1da177e4 LT |
2857 | |
2858 | static int packet_getname(struct socket *sock, struct sockaddr *uaddr, | |
2859 | int *uaddr_len, int peer) | |
2860 | { | |
2861 | struct net_device *dev; | |
2862 | struct sock *sk = sock->sk; | |
2863 | struct packet_sock *po = pkt_sk(sk); | |
13cfa97b | 2864 | DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); |
1da177e4 LT |
2865 | |
2866 | if (peer) | |
2867 | return -EOPNOTSUPP; | |
2868 | ||
2869 | sll->sll_family = AF_PACKET; | |
2870 | sll->sll_ifindex = po->ifindex; | |
2871 | sll->sll_protocol = po->num; | |
67286640 | 2872 | sll->sll_pkttype = 0; |
654d1f8a ED |
2873 | rcu_read_lock(); |
2874 | dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); | |
1da177e4 LT |
2875 | if (dev) { |
2876 | sll->sll_hatype = dev->type; | |
2877 | sll->sll_halen = dev->addr_len; | |
2878 | memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); | |
1da177e4 LT |
2879 | } else { |
2880 | sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ | |
2881 | sll->sll_halen = 0; | |
2882 | } | |
654d1f8a | 2883 | rcu_read_unlock(); |
0fb375fb | 2884 | *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; |
1da177e4 LT |
2885 | |
2886 | return 0; | |
2887 | } | |
2888 | ||
2aeb0b88 WC |
2889 | static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, |
2890 | int what) | |
1da177e4 LT |
2891 | { |
2892 | switch (i->type) { | |
2893 | case PACKET_MR_MULTICAST: | |
1162563f JP |
2894 | if (i->alen != dev->addr_len) |
2895 | return -EINVAL; | |
1da177e4 | 2896 | if (what > 0) |
22bedad3 | 2897 | return dev_mc_add(dev, i->addr); |
1da177e4 | 2898 | else |
22bedad3 | 2899 | return dev_mc_del(dev, i->addr); |
1da177e4 LT |
2900 | break; |
2901 | case PACKET_MR_PROMISC: | |
2aeb0b88 | 2902 | return dev_set_promiscuity(dev, what); |
1da177e4 LT |
2903 | break; |
2904 | case PACKET_MR_ALLMULTI: | |
2aeb0b88 | 2905 | return dev_set_allmulti(dev, what); |
1da177e4 | 2906 | break; |
d95ed927 | 2907 | case PACKET_MR_UNICAST: |
1162563f JP |
2908 | if (i->alen != dev->addr_len) |
2909 | return -EINVAL; | |
d95ed927 | 2910 | if (what > 0) |
a748ee24 | 2911 | return dev_uc_add(dev, i->addr); |
d95ed927 | 2912 | else |
a748ee24 | 2913 | return dev_uc_del(dev, i->addr); |
d95ed927 | 2914 | break; |
40d4e3df ED |
2915 | default: |
2916 | break; | |
1da177e4 | 2917 | } |
2aeb0b88 | 2918 | return 0; |
1da177e4 LT |
2919 | } |
2920 | ||
2921 | static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) | |
2922 | { | |
40d4e3df | 2923 | for ( ; i; i = i->next) { |
1da177e4 LT |
2924 | if (i->ifindex == dev->ifindex) |
2925 | packet_dev_mc(dev, i, what); | |
2926 | } | |
2927 | } | |
2928 | ||
0fb375fb | 2929 | static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) |
1da177e4 LT |
2930 | { |
2931 | struct packet_sock *po = pkt_sk(sk); | |
2932 | struct packet_mclist *ml, *i; | |
2933 | struct net_device *dev; | |
2934 | int err; | |
2935 | ||
2936 | rtnl_lock(); | |
2937 | ||
2938 | err = -ENODEV; | |
3b1e0a65 | 2939 | dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); |
1da177e4 LT |
2940 | if (!dev) |
2941 | goto done; | |
2942 | ||
2943 | err = -EINVAL; | |
1162563f | 2944 | if (mreq->mr_alen > dev->addr_len) |
1da177e4 LT |
2945 | goto done; |
2946 | ||
2947 | err = -ENOBUFS; | |
8b3a7005 | 2948 | i = kmalloc(sizeof(*i), GFP_KERNEL); |
1da177e4 LT |
2949 | if (i == NULL) |
2950 | goto done; | |
2951 | ||
2952 | err = 0; | |
2953 | for (ml = po->mclist; ml; ml = ml->next) { | |
2954 | if (ml->ifindex == mreq->mr_ifindex && | |
2955 | ml->type == mreq->mr_type && | |
2956 | ml->alen == mreq->mr_alen && | |
2957 | memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { | |
2958 | ml->count++; | |
2959 | /* Free the new element ... */ | |
2960 | kfree(i); | |
2961 | goto done; | |
2962 | } | |
2963 | } | |
2964 | ||
2965 | i->type = mreq->mr_type; | |
2966 | i->ifindex = mreq->mr_ifindex; | |
2967 | i->alen = mreq->mr_alen; | |
2968 | memcpy(i->addr, mreq->mr_address, i->alen); | |
2969 | i->count = 1; | |
2970 | i->next = po->mclist; | |
2971 | po->mclist = i; | |
2aeb0b88 WC |
2972 | err = packet_dev_mc(dev, i, 1); |
2973 | if (err) { | |
2974 | po->mclist = i->next; | |
2975 | kfree(i); | |
2976 | } | |
1da177e4 LT |
2977 | |
2978 | done: | |
2979 | rtnl_unlock(); | |
2980 | return err; | |
2981 | } | |
2982 | ||
0fb375fb | 2983 | static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) |
1da177e4 LT |
2984 | { |
2985 | struct packet_mclist *ml, **mlp; | |
2986 | ||
2987 | rtnl_lock(); | |
2988 | ||
2989 | for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { | |
2990 | if (ml->ifindex == mreq->mr_ifindex && | |
2991 | ml->type == mreq->mr_type && | |
2992 | ml->alen == mreq->mr_alen && | |
2993 | memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { | |
2994 | if (--ml->count == 0) { | |
2995 | struct net_device *dev; | |
2996 | *mlp = ml->next; | |
ad959e76 ED |
2997 | dev = __dev_get_by_index(sock_net(sk), ml->ifindex); |
2998 | if (dev) | |
1da177e4 | 2999 | packet_dev_mc(dev, ml, -1); |
1da177e4 LT |
3000 | kfree(ml); |
3001 | } | |
3002 | rtnl_unlock(); | |
3003 | return 0; | |
3004 | } | |
3005 | } | |
3006 | rtnl_unlock(); | |
3007 | return -EADDRNOTAVAIL; | |
3008 | } | |
3009 | ||
3010 | static void packet_flush_mclist(struct sock *sk) | |
3011 | { | |
3012 | struct packet_sock *po = pkt_sk(sk); | |
3013 | struct packet_mclist *ml; | |
3014 | ||
3015 | if (!po->mclist) | |
3016 | return; | |
3017 | ||
3018 | rtnl_lock(); | |
3019 | while ((ml = po->mclist) != NULL) { | |
3020 | struct net_device *dev; | |
3021 | ||
3022 | po->mclist = ml->next; | |
ad959e76 ED |
3023 | dev = __dev_get_by_index(sock_net(sk), ml->ifindex); |
3024 | if (dev != NULL) | |
1da177e4 | 3025 | packet_dev_mc(dev, ml, -1); |
1da177e4 LT |
3026 | kfree(ml); |
3027 | } | |
3028 | rtnl_unlock(); | |
3029 | } | |
1da177e4 LT |
3030 | |
3031 | static int | |
b7058842 | 3032 | packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) |
1da177e4 LT |
3033 | { |
3034 | struct sock *sk = sock->sk; | |
8dc41944 | 3035 | struct packet_sock *po = pkt_sk(sk); |
1da177e4 LT |
3036 | int ret; |
3037 | ||
3038 | if (level != SOL_PACKET) | |
3039 | return -ENOPROTOOPT; | |
3040 | ||
69e3c75f | 3041 | switch (optname) { |
1ce4f28b | 3042 | case PACKET_ADD_MEMBERSHIP: |
1da177e4 LT |
3043 | case PACKET_DROP_MEMBERSHIP: |
3044 | { | |
0fb375fb EB |
3045 | struct packet_mreq_max mreq; |
3046 | int len = optlen; | |
3047 | memset(&mreq, 0, sizeof(mreq)); | |
3048 | if (len < sizeof(struct packet_mreq)) | |
1da177e4 | 3049 | return -EINVAL; |
0fb375fb EB |
3050 | if (len > sizeof(mreq)) |
3051 | len = sizeof(mreq); | |
40d4e3df | 3052 | if (copy_from_user(&mreq, optval, len)) |
1da177e4 | 3053 | return -EFAULT; |
0fb375fb EB |
3054 | if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) |
3055 | return -EINVAL; | |
1da177e4 LT |
3056 | if (optname == PACKET_ADD_MEMBERSHIP) |
3057 | ret = packet_mc_add(sk, &mreq); | |
3058 | else | |
3059 | ret = packet_mc_drop(sk, &mreq); | |
3060 | return ret; | |
3061 | } | |
a2efcfa0 | 3062 | |
1da177e4 | 3063 | case PACKET_RX_RING: |
69e3c75f | 3064 | case PACKET_TX_RING: |
1da177e4 | 3065 | { |
f6fb8f10 | 3066 | union tpacket_req_u req_u; |
3067 | int len; | |
1da177e4 | 3068 | |
f6fb8f10 | 3069 | switch (po->tp_version) { |
3070 | case TPACKET_V1: | |
3071 | case TPACKET_V2: | |
3072 | len = sizeof(req_u.req); | |
3073 | break; | |
3074 | case TPACKET_V3: | |
3075 | default: | |
3076 | len = sizeof(req_u.req3); | |
3077 | break; | |
3078 | } | |
3079 | if (optlen < len) | |
1da177e4 | 3080 | return -EINVAL; |
bfd5f4a3 SS |
3081 | if (pkt_sk(sk)->has_vnet_hdr) |
3082 | return -EINVAL; | |
f6fb8f10 | 3083 | if (copy_from_user(&req_u.req, optval, len)) |
1da177e4 | 3084 | return -EFAULT; |
f6fb8f10 | 3085 | return packet_set_ring(sk, &req_u, 0, |
3086 | optname == PACKET_TX_RING); | |
1da177e4 LT |
3087 | } |
3088 | case PACKET_COPY_THRESH: | |
3089 | { | |
3090 | int val; | |
3091 | ||
40d4e3df | 3092 | if (optlen != sizeof(val)) |
1da177e4 | 3093 | return -EINVAL; |
40d4e3df | 3094 | if (copy_from_user(&val, optval, sizeof(val))) |
1da177e4 LT |
3095 | return -EFAULT; |
3096 | ||
3097 | pkt_sk(sk)->copy_thresh = val; | |
3098 | return 0; | |
3099 | } | |
bbd6ef87 PM |
3100 | case PACKET_VERSION: |
3101 | { | |
3102 | int val; | |
3103 | ||
3104 | if (optlen != sizeof(val)) | |
3105 | return -EINVAL; | |
69e3c75f | 3106 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) |
bbd6ef87 PM |
3107 | return -EBUSY; |
3108 | if (copy_from_user(&val, optval, sizeof(val))) | |
3109 | return -EFAULT; | |
3110 | switch (val) { | |
3111 | case TPACKET_V1: | |
3112 | case TPACKET_V2: | |
f6fb8f10 | 3113 | case TPACKET_V3: |
bbd6ef87 PM |
3114 | po->tp_version = val; |
3115 | return 0; | |
3116 | default: | |
3117 | return -EINVAL; | |
3118 | } | |
3119 | } | |
8913336a PM |
3120 | case PACKET_RESERVE: |
3121 | { | |
3122 | unsigned int val; | |
3123 | ||
3124 | if (optlen != sizeof(val)) | |
3125 | return -EINVAL; | |
69e3c75f | 3126 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) |
8913336a PM |
3127 | return -EBUSY; |
3128 | if (copy_from_user(&val, optval, sizeof(val))) | |
3129 | return -EFAULT; | |
3130 | po->tp_reserve = val; | |
3131 | return 0; | |
3132 | } | |
69e3c75f JB |
3133 | case PACKET_LOSS: |
3134 | { | |
3135 | unsigned int val; | |
3136 | ||
3137 | if (optlen != sizeof(val)) | |
3138 | return -EINVAL; | |
3139 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) | |
3140 | return -EBUSY; | |
3141 | if (copy_from_user(&val, optval, sizeof(val))) | |
3142 | return -EFAULT; | |
3143 | po->tp_loss = !!val; | |
3144 | return 0; | |
3145 | } | |
8dc41944 HX |
3146 | case PACKET_AUXDATA: |
3147 | { | |
3148 | int val; | |
3149 | ||
3150 | if (optlen < sizeof(val)) | |
3151 | return -EINVAL; | |
3152 | if (copy_from_user(&val, optval, sizeof(val))) | |
3153 | return -EFAULT; | |
3154 | ||
3155 | po->auxdata = !!val; | |
3156 | return 0; | |
3157 | } | |
80feaacb PWJ |
3158 | case PACKET_ORIGDEV: |
3159 | { | |
3160 | int val; | |
3161 | ||
3162 | if (optlen < sizeof(val)) | |
3163 | return -EINVAL; | |
3164 | if (copy_from_user(&val, optval, sizeof(val))) | |
3165 | return -EFAULT; | |
3166 | ||
3167 | po->origdev = !!val; | |
3168 | return 0; | |
3169 | } | |
bfd5f4a3 SS |
3170 | case PACKET_VNET_HDR: |
3171 | { | |
3172 | int val; | |
3173 | ||
3174 | if (sock->type != SOCK_RAW) | |
3175 | return -EINVAL; | |
3176 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) | |
3177 | return -EBUSY; | |
3178 | if (optlen < sizeof(val)) | |
3179 | return -EINVAL; | |
3180 | if (copy_from_user(&val, optval, sizeof(val))) | |
3181 | return -EFAULT; | |
3182 | ||
3183 | po->has_vnet_hdr = !!val; | |
3184 | return 0; | |
3185 | } | |
614f60fa SM |
3186 | case PACKET_TIMESTAMP: |
3187 | { | |
3188 | int val; | |
3189 | ||
3190 | if (optlen != sizeof(val)) | |
3191 | return -EINVAL; | |
3192 | if (copy_from_user(&val, optval, sizeof(val))) | |
3193 | return -EFAULT; | |
3194 | ||
3195 | po->tp_tstamp = val; | |
3196 | return 0; | |
3197 | } | |
dc99f600 DM |
3198 | case PACKET_FANOUT: |
3199 | { | |
3200 | int val; | |
3201 | ||
3202 | if (optlen != sizeof(val)) | |
3203 | return -EINVAL; | |
3204 | if (copy_from_user(&val, optval, sizeof(val))) | |
3205 | return -EFAULT; | |
3206 | ||
3207 | return fanout_add(sk, val & 0xffff, val >> 16); | |
3208 | } | |
5920cd3a PC |
3209 | case PACKET_TX_HAS_OFF: |
3210 | { | |
3211 | unsigned int val; | |
3212 | ||
3213 | if (optlen != sizeof(val)) | |
3214 | return -EINVAL; | |
3215 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) | |
3216 | return -EBUSY; | |
3217 | if (copy_from_user(&val, optval, sizeof(val))) | |
3218 | return -EFAULT; | |
3219 | po->tp_tx_has_off = !!val; | |
3220 | return 0; | |
3221 | } | |
1da177e4 LT |
3222 | default: |
3223 | return -ENOPROTOOPT; | |
3224 | } | |
3225 | } | |
3226 | ||
3227 | static int packet_getsockopt(struct socket *sock, int level, int optname, | |
3228 | char __user *optval, int __user *optlen) | |
3229 | { | |
3230 | int len; | |
c06fff6e | 3231 | int val, lv = sizeof(val); |
1da177e4 LT |
3232 | struct sock *sk = sock->sk; |
3233 | struct packet_sock *po = pkt_sk(sk); | |
c06fff6e | 3234 | void *data = &val; |
8dc41944 | 3235 | struct tpacket_stats st; |
f6fb8f10 | 3236 | union tpacket_stats_u st_u; |
1da177e4 LT |
3237 | |
3238 | if (level != SOL_PACKET) | |
3239 | return -ENOPROTOOPT; | |
3240 | ||
8ae55f04 KK |
3241 | if (get_user(len, optlen)) |
3242 | return -EFAULT; | |
1da177e4 LT |
3243 | |
3244 | if (len < 0) | |
3245 | return -EINVAL; | |
1ce4f28b | 3246 | |
69e3c75f | 3247 | switch (optname) { |
1da177e4 | 3248 | case PACKET_STATISTICS: |
1da177e4 | 3249 | spin_lock_bh(&sk->sk_receive_queue.lock); |
f6fb8f10 | 3250 | if (po->tp_version == TPACKET_V3) { |
c06fff6e | 3251 | lv = sizeof(struct tpacket_stats_v3); |
f6fb8f10 | 3252 | memcpy(&st_u.stats3, &po->stats, |
c06fff6e | 3253 | sizeof(struct tpacket_stats)); |
f6fb8f10 | 3254 | st_u.stats3.tp_freeze_q_cnt = |
c06fff6e | 3255 | po->stats_u.stats3.tp_freeze_q_cnt; |
f6fb8f10 | 3256 | st_u.stats3.tp_packets += po->stats.tp_drops; |
3257 | data = &st_u.stats3; | |
3258 | } else { | |
c06fff6e | 3259 | lv = sizeof(struct tpacket_stats); |
f6fb8f10 | 3260 | st = po->stats; |
3261 | st.tp_packets += st.tp_drops; | |
3262 | data = &st; | |
3263 | } | |
1da177e4 LT |
3264 | memset(&po->stats, 0, sizeof(st)); |
3265 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
8dc41944 HX |
3266 | break; |
3267 | case PACKET_AUXDATA: | |
8dc41944 | 3268 | val = po->auxdata; |
80feaacb PWJ |
3269 | break; |
3270 | case PACKET_ORIGDEV: | |
80feaacb | 3271 | val = po->origdev; |
bfd5f4a3 SS |
3272 | break; |
3273 | case PACKET_VNET_HDR: | |
bfd5f4a3 | 3274 | val = po->has_vnet_hdr; |
1da177e4 | 3275 | break; |
bbd6ef87 | 3276 | case PACKET_VERSION: |
bbd6ef87 | 3277 | val = po->tp_version; |
bbd6ef87 PM |
3278 | break; |
3279 | case PACKET_HDRLEN: | |
3280 | if (len > sizeof(int)) | |
3281 | len = sizeof(int); | |
3282 | if (copy_from_user(&val, optval, len)) | |
3283 | return -EFAULT; | |
3284 | switch (val) { | |
3285 | case TPACKET_V1: | |
3286 | val = sizeof(struct tpacket_hdr); | |
3287 | break; | |
3288 | case TPACKET_V2: | |
3289 | val = sizeof(struct tpacket2_hdr); | |
3290 | break; | |
f6fb8f10 | 3291 | case TPACKET_V3: |
3292 | val = sizeof(struct tpacket3_hdr); | |
3293 | break; | |
bbd6ef87 PM |
3294 | default: |
3295 | return -EINVAL; | |
3296 | } | |
bbd6ef87 | 3297 | break; |
8913336a | 3298 | case PACKET_RESERVE: |
8913336a | 3299 | val = po->tp_reserve; |
8913336a | 3300 | break; |
69e3c75f | 3301 | case PACKET_LOSS: |
69e3c75f | 3302 | val = po->tp_loss; |
69e3c75f | 3303 | break; |
614f60fa | 3304 | case PACKET_TIMESTAMP: |
614f60fa | 3305 | val = po->tp_tstamp; |
614f60fa | 3306 | break; |
dc99f600 | 3307 | case PACKET_FANOUT: |
dc99f600 DM |
3308 | val = (po->fanout ? |
3309 | ((u32)po->fanout->id | | |
77f65ebd WB |
3310 | ((u32)po->fanout->type << 16) | |
3311 | ((u32)po->fanout->flags << 24)) : | |
dc99f600 | 3312 | 0); |
dc99f600 | 3313 | break; |
5920cd3a PC |
3314 | case PACKET_TX_HAS_OFF: |
3315 | val = po->tp_tx_has_off; | |
3316 | break; | |
1da177e4 LT |
3317 | default: |
3318 | return -ENOPROTOOPT; | |
3319 | } | |
3320 | ||
c06fff6e ED |
3321 | if (len > lv) |
3322 | len = lv; | |
8ae55f04 KK |
3323 | if (put_user(len, optlen)) |
3324 | return -EFAULT; | |
8dc41944 HX |
3325 | if (copy_to_user(optval, data, len)) |
3326 | return -EFAULT; | |
8ae55f04 | 3327 | return 0; |
1da177e4 LT |
3328 | } |
3329 | ||
3330 | ||
3331 | static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data) | |
3332 | { | |
3333 | struct sock *sk; | |
ad930650 | 3334 | struct net_device *dev = data; |
c346dca1 | 3335 | struct net *net = dev_net(dev); |
1da177e4 | 3336 | |
808f5114 | 3337 | rcu_read_lock(); |
b67bfe0d | 3338 | sk_for_each_rcu(sk, &net->packet.sklist) { |
1da177e4 LT |
3339 | struct packet_sock *po = pkt_sk(sk); |
3340 | ||
3341 | switch (msg) { | |
3342 | case NETDEV_UNREGISTER: | |
1da177e4 LT |
3343 | if (po->mclist) |
3344 | packet_dev_mclist(dev, po->mclist, -1); | |
a2efcfa0 DM |
3345 | /* fallthrough */ |
3346 | ||
1da177e4 LT |
3347 | case NETDEV_DOWN: |
3348 | if (dev->ifindex == po->ifindex) { | |
3349 | spin_lock(&po->bind_lock); | |
3350 | if (po->running) { | |
ce06b03e | 3351 | __unregister_prot_hook(sk, false); |
1da177e4 LT |
3352 | sk->sk_err = ENETDOWN; |
3353 | if (!sock_flag(sk, SOCK_DEAD)) | |
3354 | sk->sk_error_report(sk); | |
3355 | } | |
3356 | if (msg == NETDEV_UNREGISTER) { | |
3357 | po->ifindex = -1; | |
160ff18a BG |
3358 | if (po->prot_hook.dev) |
3359 | dev_put(po->prot_hook.dev); | |
1da177e4 LT |
3360 | po->prot_hook.dev = NULL; |
3361 | } | |
3362 | spin_unlock(&po->bind_lock); | |
3363 | } | |
3364 | break; | |
3365 | case NETDEV_UP: | |
808f5114 | 3366 | if (dev->ifindex == po->ifindex) { |
3367 | spin_lock(&po->bind_lock); | |
ce06b03e DM |
3368 | if (po->num) |
3369 | register_prot_hook(sk); | |
808f5114 | 3370 | spin_unlock(&po->bind_lock); |
1da177e4 | 3371 | } |
1da177e4 LT |
3372 | break; |
3373 | } | |
3374 | } | |
808f5114 | 3375 | rcu_read_unlock(); |
1da177e4 LT |
3376 | return NOTIFY_DONE; |
3377 | } | |
3378 | ||
3379 | ||
3380 | static int packet_ioctl(struct socket *sock, unsigned int cmd, | |
3381 | unsigned long arg) | |
3382 | { | |
3383 | struct sock *sk = sock->sk; | |
3384 | ||
69e3c75f | 3385 | switch (cmd) { |
40d4e3df ED |
3386 | case SIOCOUTQ: |
3387 | { | |
3388 | int amount = sk_wmem_alloc_get(sk); | |
31e6d363 | 3389 | |
40d4e3df ED |
3390 | return put_user(amount, (int __user *)arg); |
3391 | } | |
3392 | case SIOCINQ: | |
3393 | { | |
3394 | struct sk_buff *skb; | |
3395 | int amount = 0; | |
3396 | ||
3397 | spin_lock_bh(&sk->sk_receive_queue.lock); | |
3398 | skb = skb_peek(&sk->sk_receive_queue); | |
3399 | if (skb) | |
3400 | amount = skb->len; | |
3401 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
3402 | return put_user(amount, (int __user *)arg); | |
3403 | } | |
3404 | case SIOCGSTAMP: | |
3405 | return sock_get_timestamp(sk, (struct timeval __user *)arg); | |
3406 | case SIOCGSTAMPNS: | |
3407 | return sock_get_timestampns(sk, (struct timespec __user *)arg); | |
1ce4f28b | 3408 | |
1da177e4 | 3409 | #ifdef CONFIG_INET |
40d4e3df ED |
3410 | case SIOCADDRT: |
3411 | case SIOCDELRT: | |
3412 | case SIOCDARP: | |
3413 | case SIOCGARP: | |
3414 | case SIOCSARP: | |
3415 | case SIOCGIFADDR: | |
3416 | case SIOCSIFADDR: | |
3417 | case SIOCGIFBRDADDR: | |
3418 | case SIOCSIFBRDADDR: | |
3419 | case SIOCGIFNETMASK: | |
3420 | case SIOCSIFNETMASK: | |
3421 | case SIOCGIFDSTADDR: | |
3422 | case SIOCSIFDSTADDR: | |
3423 | case SIOCSIFFLAGS: | |
40d4e3df | 3424 | return inet_dgram_ops.ioctl(sock, cmd, arg); |
1da177e4 LT |
3425 | #endif |
3426 | ||
40d4e3df ED |
3427 | default: |
3428 | return -ENOIOCTLCMD; | |
1da177e4 LT |
3429 | } |
3430 | return 0; | |
3431 | } | |
3432 | ||
40d4e3df | 3433 | static unsigned int packet_poll(struct file *file, struct socket *sock, |
1da177e4 LT |
3434 | poll_table *wait) |
3435 | { | |
3436 | struct sock *sk = sock->sk; | |
3437 | struct packet_sock *po = pkt_sk(sk); | |
3438 | unsigned int mask = datagram_poll(file, sock, wait); | |
3439 | ||
3440 | spin_lock_bh(&sk->sk_receive_queue.lock); | |
69e3c75f | 3441 | if (po->rx_ring.pg_vec) { |
f6fb8f10 | 3442 | if (!packet_previous_rx_frame(po, &po->rx_ring, |
3443 | TP_STATUS_KERNEL)) | |
1da177e4 LT |
3444 | mask |= POLLIN | POLLRDNORM; |
3445 | } | |
3446 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
69e3c75f JB |
3447 | spin_lock_bh(&sk->sk_write_queue.lock); |
3448 | if (po->tx_ring.pg_vec) { | |
3449 | if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) | |
3450 | mask |= POLLOUT | POLLWRNORM; | |
3451 | } | |
3452 | spin_unlock_bh(&sk->sk_write_queue.lock); | |
1da177e4 LT |
3453 | return mask; |
3454 | } | |
3455 | ||
3456 | ||
3457 | /* Dirty? Well, I still did not learn better way to account | |
3458 | * for user mmaps. | |
3459 | */ | |
3460 | ||
3461 | static void packet_mm_open(struct vm_area_struct *vma) | |
3462 | { | |
3463 | struct file *file = vma->vm_file; | |
40d4e3df | 3464 | struct socket *sock = file->private_data; |
1da177e4 | 3465 | struct sock *sk = sock->sk; |
1ce4f28b | 3466 | |
1da177e4 LT |
3467 | if (sk) |
3468 | atomic_inc(&pkt_sk(sk)->mapped); | |
3469 | } | |
3470 | ||
3471 | static void packet_mm_close(struct vm_area_struct *vma) | |
3472 | { | |
3473 | struct file *file = vma->vm_file; | |
40d4e3df | 3474 | struct socket *sock = file->private_data; |
1da177e4 | 3475 | struct sock *sk = sock->sk; |
1ce4f28b | 3476 | |
1da177e4 LT |
3477 | if (sk) |
3478 | atomic_dec(&pkt_sk(sk)->mapped); | |
3479 | } | |
3480 | ||
f0f37e2f | 3481 | static const struct vm_operations_struct packet_mmap_ops = { |
40d4e3df ED |
3482 | .open = packet_mm_open, |
3483 | .close = packet_mm_close, | |
1da177e4 LT |
3484 | }; |
3485 | ||
0e3125c7 NH |
3486 | static void free_pg_vec(struct pgv *pg_vec, unsigned int order, |
3487 | unsigned int len) | |
1da177e4 LT |
3488 | { |
3489 | int i; | |
3490 | ||
4ebf0ae2 | 3491 | for (i = 0; i < len; i++) { |
0e3125c7 | 3492 | if (likely(pg_vec[i].buffer)) { |
c56b4d90 | 3493 | if (is_vmalloc_addr(pg_vec[i].buffer)) |
0e3125c7 NH |
3494 | vfree(pg_vec[i].buffer); |
3495 | else | |
3496 | free_pages((unsigned long)pg_vec[i].buffer, | |
3497 | order); | |
3498 | pg_vec[i].buffer = NULL; | |
3499 | } | |
1da177e4 LT |
3500 | } |
3501 | kfree(pg_vec); | |
3502 | } | |
3503 | ||
eea49cc9 | 3504 | static char *alloc_one_pg_vec_page(unsigned long order) |
4ebf0ae2 | 3505 | { |
0e3125c7 NH |
3506 | char *buffer = NULL; |
3507 | gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | | |
3508 | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; | |
3509 | ||
3510 | buffer = (char *) __get_free_pages(gfp_flags, order); | |
3511 | ||
3512 | if (buffer) | |
3513 | return buffer; | |
3514 | ||
3515 | /* | |
3516 | * __get_free_pages failed, fall back to vmalloc | |
3517 | */ | |
bbce5a59 | 3518 | buffer = vzalloc((1 << order) * PAGE_SIZE); |
719bfeaa | 3519 | |
0e3125c7 NH |
3520 | if (buffer) |
3521 | return buffer; | |
3522 | ||
3523 | /* | |
3524 | * vmalloc failed, lets dig into swap here | |
3525 | */ | |
0e3125c7 NH |
3526 | gfp_flags &= ~__GFP_NORETRY; |
3527 | buffer = (char *)__get_free_pages(gfp_flags, order); | |
3528 | if (buffer) | |
3529 | return buffer; | |
3530 | ||
3531 | /* | |
3532 | * complete and utter failure | |
3533 | */ | |
3534 | return NULL; | |
4ebf0ae2 DM |
3535 | } |
3536 | ||
0e3125c7 | 3537 | static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) |
4ebf0ae2 DM |
3538 | { |
3539 | unsigned int block_nr = req->tp_block_nr; | |
0e3125c7 | 3540 | struct pgv *pg_vec; |
4ebf0ae2 DM |
3541 | int i; |
3542 | ||
0e3125c7 | 3543 | pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); |
4ebf0ae2 DM |
3544 | if (unlikely(!pg_vec)) |
3545 | goto out; | |
3546 | ||
3547 | for (i = 0; i < block_nr; i++) { | |
c56b4d90 | 3548 | pg_vec[i].buffer = alloc_one_pg_vec_page(order); |
0e3125c7 | 3549 | if (unlikely(!pg_vec[i].buffer)) |
4ebf0ae2 DM |
3550 | goto out_free_pgvec; |
3551 | } | |
3552 | ||
3553 | out: | |
3554 | return pg_vec; | |
3555 | ||
3556 | out_free_pgvec: | |
3557 | free_pg_vec(pg_vec, order, block_nr); | |
3558 | pg_vec = NULL; | |
3559 | goto out; | |
3560 | } | |
1da177e4 | 3561 | |
f6fb8f10 | 3562 | static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
69e3c75f | 3563 | int closing, int tx_ring) |
1da177e4 | 3564 | { |
0e3125c7 | 3565 | struct pgv *pg_vec = NULL; |
1da177e4 | 3566 | struct packet_sock *po = pkt_sk(sk); |
0e11c91e | 3567 | int was_running, order = 0; |
69e3c75f JB |
3568 | struct packet_ring_buffer *rb; |
3569 | struct sk_buff_head *rb_queue; | |
0e11c91e | 3570 | __be16 num; |
f6fb8f10 | 3571 | int err = -EINVAL; |
3572 | /* Added to avoid minimal code churn */ | |
3573 | struct tpacket_req *req = &req_u->req; | |
3574 | ||
3575 | /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ | |
3576 | if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { | |
3577 | WARN(1, "Tx-ring is not supported.\n"); | |
3578 | goto out; | |
3579 | } | |
1ce4f28b | 3580 | |
69e3c75f JB |
3581 | rb = tx_ring ? &po->tx_ring : &po->rx_ring; |
3582 | rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; | |
1da177e4 | 3583 | |
69e3c75f JB |
3584 | err = -EBUSY; |
3585 | if (!closing) { | |
3586 | if (atomic_read(&po->mapped)) | |
3587 | goto out; | |
3588 | if (atomic_read(&rb->pending)) | |
3589 | goto out; | |
3590 | } | |
1da177e4 | 3591 | |
69e3c75f JB |
3592 | if (req->tp_block_nr) { |
3593 | /* Sanity tests and some calculations */ | |
3594 | err = -EBUSY; | |
3595 | if (unlikely(rb->pg_vec)) | |
3596 | goto out; | |
1da177e4 | 3597 | |
bbd6ef87 PM |
3598 | switch (po->tp_version) { |
3599 | case TPACKET_V1: | |
3600 | po->tp_hdrlen = TPACKET_HDRLEN; | |
3601 | break; | |
3602 | case TPACKET_V2: | |
3603 | po->tp_hdrlen = TPACKET2_HDRLEN; | |
3604 | break; | |
f6fb8f10 | 3605 | case TPACKET_V3: |
3606 | po->tp_hdrlen = TPACKET3_HDRLEN; | |
3607 | break; | |
bbd6ef87 PM |
3608 | } |
3609 | ||
69e3c75f | 3610 | err = -EINVAL; |
4ebf0ae2 | 3611 | if (unlikely((int)req->tp_block_size <= 0)) |
69e3c75f | 3612 | goto out; |
4ebf0ae2 | 3613 | if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) |
69e3c75f | 3614 | goto out; |
8913336a | 3615 | if (unlikely(req->tp_frame_size < po->tp_hdrlen + |
69e3c75f JB |
3616 | po->tp_reserve)) |
3617 | goto out; | |
4ebf0ae2 | 3618 | if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) |
69e3c75f | 3619 | goto out; |
1da177e4 | 3620 | |
69e3c75f JB |
3621 | rb->frames_per_block = req->tp_block_size/req->tp_frame_size; |
3622 | if (unlikely(rb->frames_per_block <= 0)) | |
3623 | goto out; | |
3624 | if (unlikely((rb->frames_per_block * req->tp_block_nr) != | |
3625 | req->tp_frame_nr)) | |
3626 | goto out; | |
1da177e4 LT |
3627 | |
3628 | err = -ENOMEM; | |
4ebf0ae2 DM |
3629 | order = get_order(req->tp_block_size); |
3630 | pg_vec = alloc_pg_vec(req, order); | |
3631 | if (unlikely(!pg_vec)) | |
1da177e4 | 3632 | goto out; |
f6fb8f10 | 3633 | switch (po->tp_version) { |
3634 | case TPACKET_V3: | |
3635 | /* Transmit path is not supported. We checked | |
3636 | * it above but just being paranoid | |
3637 | */ | |
3638 | if (!tx_ring) | |
3639 | init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); | |
3640 | break; | |
3641 | default: | |
3642 | break; | |
3643 | } | |
69e3c75f JB |
3644 | } |
3645 | /* Done */ | |
3646 | else { | |
3647 | err = -EINVAL; | |
4ebf0ae2 | 3648 | if (unlikely(req->tp_frame_nr)) |
69e3c75f | 3649 | goto out; |
1da177e4 LT |
3650 | } |
3651 | ||
3652 | lock_sock(sk); | |
3653 | ||
3654 | /* Detach socket from network */ | |
3655 | spin_lock(&po->bind_lock); | |
3656 | was_running = po->running; | |
3657 | num = po->num; | |
3658 | if (was_running) { | |
1da177e4 | 3659 | po->num = 0; |
ce06b03e | 3660 | __unregister_prot_hook(sk, false); |
1da177e4 LT |
3661 | } |
3662 | spin_unlock(&po->bind_lock); | |
1ce4f28b | 3663 | |
1da177e4 LT |
3664 | synchronize_net(); |
3665 | ||
3666 | err = -EBUSY; | |
905db440 | 3667 | mutex_lock(&po->pg_vec_lock); |
1da177e4 LT |
3668 | if (closing || atomic_read(&po->mapped) == 0) { |
3669 | err = 0; | |
69e3c75f | 3670 | spin_lock_bh(&rb_queue->lock); |
c053fd96 | 3671 | swap(rb->pg_vec, pg_vec); |
69e3c75f JB |
3672 | rb->frame_max = (req->tp_frame_nr - 1); |
3673 | rb->head = 0; | |
3674 | rb->frame_size = req->tp_frame_size; | |
3675 | spin_unlock_bh(&rb_queue->lock); | |
3676 | ||
c053fd96 CG |
3677 | swap(rb->pg_vec_order, order); |
3678 | swap(rb->pg_vec_len, req->tp_block_nr); | |
69e3c75f JB |
3679 | |
3680 | rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; | |
3681 | po->prot_hook.func = (po->rx_ring.pg_vec) ? | |
3682 | tpacket_rcv : packet_rcv; | |
3683 | skb_queue_purge(rb_queue); | |
1da177e4 | 3684 | if (atomic_read(&po->mapped)) |
40d4e3df ED |
3685 | pr_err("packet_mmap: vma is busy: %d\n", |
3686 | atomic_read(&po->mapped)); | |
1da177e4 | 3687 | } |
905db440 | 3688 | mutex_unlock(&po->pg_vec_lock); |
1da177e4 LT |
3689 | |
3690 | spin_lock(&po->bind_lock); | |
ce06b03e | 3691 | if (was_running) { |
1da177e4 | 3692 | po->num = num; |
ce06b03e | 3693 | register_prot_hook(sk); |
1da177e4 LT |
3694 | } |
3695 | spin_unlock(&po->bind_lock); | |
f6fb8f10 | 3696 | if (closing && (po->tp_version > TPACKET_V2)) { |
3697 | /* Because we don't support block-based V3 on tx-ring */ | |
3698 | if (!tx_ring) | |
3699 | prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue); | |
3700 | } | |
1da177e4 LT |
3701 | release_sock(sk); |
3702 | ||
1da177e4 LT |
3703 | if (pg_vec) |
3704 | free_pg_vec(pg_vec, order, req->tp_block_nr); | |
3705 | out: | |
3706 | return err; | |
3707 | } | |
3708 | ||
69e3c75f JB |
3709 | static int packet_mmap(struct file *file, struct socket *sock, |
3710 | struct vm_area_struct *vma) | |
1da177e4 LT |
3711 | { |
3712 | struct sock *sk = sock->sk; | |
3713 | struct packet_sock *po = pkt_sk(sk); | |
69e3c75f JB |
3714 | unsigned long size, expected_size; |
3715 | struct packet_ring_buffer *rb; | |
1da177e4 LT |
3716 | unsigned long start; |
3717 | int err = -EINVAL; | |
3718 | int i; | |
3719 | ||
3720 | if (vma->vm_pgoff) | |
3721 | return -EINVAL; | |
3722 | ||
905db440 | 3723 | mutex_lock(&po->pg_vec_lock); |
69e3c75f JB |
3724 | |
3725 | expected_size = 0; | |
3726 | for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { | |
3727 | if (rb->pg_vec) { | |
3728 | expected_size += rb->pg_vec_len | |
3729 | * rb->pg_vec_pages | |
3730 | * PAGE_SIZE; | |
3731 | } | |
3732 | } | |
3733 | ||
3734 | if (expected_size == 0) | |
1da177e4 | 3735 | goto out; |
69e3c75f JB |
3736 | |
3737 | size = vma->vm_end - vma->vm_start; | |
3738 | if (size != expected_size) | |
1da177e4 LT |
3739 | goto out; |
3740 | ||
1da177e4 | 3741 | start = vma->vm_start; |
69e3c75f JB |
3742 | for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { |
3743 | if (rb->pg_vec == NULL) | |
3744 | continue; | |
3745 | ||
3746 | for (i = 0; i < rb->pg_vec_len; i++) { | |
0e3125c7 NH |
3747 | struct page *page; |
3748 | void *kaddr = rb->pg_vec[i].buffer; | |
69e3c75f JB |
3749 | int pg_num; |
3750 | ||
c56b4d90 CG |
3751 | for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { |
3752 | page = pgv_to_page(kaddr); | |
69e3c75f JB |
3753 | err = vm_insert_page(vma, start, page); |
3754 | if (unlikely(err)) | |
3755 | goto out; | |
3756 | start += PAGE_SIZE; | |
0e3125c7 | 3757 | kaddr += PAGE_SIZE; |
69e3c75f | 3758 | } |
4ebf0ae2 | 3759 | } |
1da177e4 | 3760 | } |
69e3c75f | 3761 | |
4ebf0ae2 | 3762 | atomic_inc(&po->mapped); |
1da177e4 LT |
3763 | vma->vm_ops = &packet_mmap_ops; |
3764 | err = 0; | |
3765 | ||
3766 | out: | |
905db440 | 3767 | mutex_unlock(&po->pg_vec_lock); |
1da177e4 LT |
3768 | return err; |
3769 | } | |
1da177e4 | 3770 | |
90ddc4f0 | 3771 | static const struct proto_ops packet_ops_spkt = { |
1da177e4 LT |
3772 | .family = PF_PACKET, |
3773 | .owner = THIS_MODULE, | |
3774 | .release = packet_release, | |
3775 | .bind = packet_bind_spkt, | |
3776 | .connect = sock_no_connect, | |
3777 | .socketpair = sock_no_socketpair, | |
3778 | .accept = sock_no_accept, | |
3779 | .getname = packet_getname_spkt, | |
3780 | .poll = datagram_poll, | |
3781 | .ioctl = packet_ioctl, | |
3782 | .listen = sock_no_listen, | |
3783 | .shutdown = sock_no_shutdown, | |
3784 | .setsockopt = sock_no_setsockopt, | |
3785 | .getsockopt = sock_no_getsockopt, | |
3786 | .sendmsg = packet_sendmsg_spkt, | |
3787 | .recvmsg = packet_recvmsg, | |
3788 | .mmap = sock_no_mmap, | |
3789 | .sendpage = sock_no_sendpage, | |
3790 | }; | |
1da177e4 | 3791 | |
90ddc4f0 | 3792 | static const struct proto_ops packet_ops = { |
1da177e4 LT |
3793 | .family = PF_PACKET, |
3794 | .owner = THIS_MODULE, | |
3795 | .release = packet_release, | |
3796 | .bind = packet_bind, | |
3797 | .connect = sock_no_connect, | |
3798 | .socketpair = sock_no_socketpair, | |
3799 | .accept = sock_no_accept, | |
1ce4f28b | 3800 | .getname = packet_getname, |
1da177e4 LT |
3801 | .poll = packet_poll, |
3802 | .ioctl = packet_ioctl, | |
3803 | .listen = sock_no_listen, | |
3804 | .shutdown = sock_no_shutdown, | |
3805 | .setsockopt = packet_setsockopt, | |
3806 | .getsockopt = packet_getsockopt, | |
3807 | .sendmsg = packet_sendmsg, | |
3808 | .recvmsg = packet_recvmsg, | |
3809 | .mmap = packet_mmap, | |
3810 | .sendpage = sock_no_sendpage, | |
3811 | }; | |
3812 | ||
ec1b4cf7 | 3813 | static const struct net_proto_family packet_family_ops = { |
1da177e4 LT |
3814 | .family = PF_PACKET, |
3815 | .create = packet_create, | |
3816 | .owner = THIS_MODULE, | |
3817 | }; | |
3818 | ||
3819 | static struct notifier_block packet_netdev_notifier = { | |
40d4e3df | 3820 | .notifier_call = packet_notifier, |
1da177e4 LT |
3821 | }; |
3822 | ||
3823 | #ifdef CONFIG_PROC_FS | |
1da177e4 LT |
3824 | |
3825 | static void *packet_seq_start(struct seq_file *seq, loff_t *pos) | |
808f5114 | 3826 | __acquires(RCU) |
1da177e4 | 3827 | { |
e372c414 | 3828 | struct net *net = seq_file_net(seq); |
808f5114 | 3829 | |
3830 | rcu_read_lock(); | |
3831 | return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); | |
1da177e4 LT |
3832 | } |
3833 | ||
3834 | static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
3835 | { | |
1bf40954 | 3836 | struct net *net = seq_file_net(seq); |
808f5114 | 3837 | return seq_hlist_next_rcu(v, &net->packet.sklist, pos); |
1da177e4 LT |
3838 | } |
3839 | ||
3840 | static void packet_seq_stop(struct seq_file *seq, void *v) | |
808f5114 | 3841 | __releases(RCU) |
1da177e4 | 3842 | { |
808f5114 | 3843 | rcu_read_unlock(); |
1da177e4 LT |
3844 | } |
3845 | ||
1ce4f28b | 3846 | static int packet_seq_show(struct seq_file *seq, void *v) |
1da177e4 LT |
3847 | { |
3848 | if (v == SEQ_START_TOKEN) | |
3849 | seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); | |
3850 | else { | |
b7ceabd9 | 3851 | struct sock *s = sk_entry(v); |
1da177e4 LT |
3852 | const struct packet_sock *po = pkt_sk(s); |
3853 | ||
3854 | seq_printf(seq, | |
71338aa7 | 3855 | "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", |
1da177e4 LT |
3856 | s, |
3857 | atomic_read(&s->sk_refcnt), | |
3858 | s->sk_type, | |
3859 | ntohs(po->num), | |
3860 | po->ifindex, | |
3861 | po->running, | |
3862 | atomic_read(&s->sk_rmem_alloc), | |
a7cb5a49 | 3863 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), |
40d4e3df | 3864 | sock_i_ino(s)); |
1da177e4 LT |
3865 | } |
3866 | ||
3867 | return 0; | |
3868 | } | |
3869 | ||
56b3d975 | 3870 | static const struct seq_operations packet_seq_ops = { |
1da177e4 LT |
3871 | .start = packet_seq_start, |
3872 | .next = packet_seq_next, | |
3873 | .stop = packet_seq_stop, | |
3874 | .show = packet_seq_show, | |
3875 | }; | |
3876 | ||
3877 | static int packet_seq_open(struct inode *inode, struct file *file) | |
3878 | { | |
e372c414 DL |
3879 | return seq_open_net(inode, file, &packet_seq_ops, |
3880 | sizeof(struct seq_net_private)); | |
1da177e4 LT |
3881 | } |
3882 | ||
da7071d7 | 3883 | static const struct file_operations packet_seq_fops = { |
1da177e4 LT |
3884 | .owner = THIS_MODULE, |
3885 | .open = packet_seq_open, | |
3886 | .read = seq_read, | |
3887 | .llseek = seq_lseek, | |
e372c414 | 3888 | .release = seq_release_net, |
1da177e4 LT |
3889 | }; |
3890 | ||
3891 | #endif | |
3892 | ||
2c8c1e72 | 3893 | static int __net_init packet_net_init(struct net *net) |
d12d01d6 | 3894 | { |
0fa7fa98 | 3895 | mutex_init(&net->packet.sklist_lock); |
2aaef4e4 | 3896 | INIT_HLIST_HEAD(&net->packet.sklist); |
d12d01d6 | 3897 | |
d4beaa66 | 3898 | if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops)) |
d12d01d6 DL |
3899 | return -ENOMEM; |
3900 | ||
3901 | return 0; | |
3902 | } | |
3903 | ||
2c8c1e72 | 3904 | static void __net_exit packet_net_exit(struct net *net) |
d12d01d6 | 3905 | { |
ece31ffd | 3906 | remove_proc_entry("packet", net->proc_net); |
d12d01d6 DL |
3907 | } |
3908 | ||
3909 | static struct pernet_operations packet_net_ops = { | |
3910 | .init = packet_net_init, | |
3911 | .exit = packet_net_exit, | |
3912 | }; | |
3913 | ||
3914 | ||
1da177e4 LT |
3915 | static void __exit packet_exit(void) |
3916 | { | |
1da177e4 | 3917 | unregister_netdevice_notifier(&packet_netdev_notifier); |
d12d01d6 | 3918 | unregister_pernet_subsys(&packet_net_ops); |
1da177e4 LT |
3919 | sock_unregister(PF_PACKET); |
3920 | proto_unregister(&packet_proto); | |
3921 | } | |
3922 | ||
3923 | static int __init packet_init(void) | |
3924 | { | |
3925 | int rc = proto_register(&packet_proto, 0); | |
3926 | ||
3927 | if (rc != 0) | |
3928 | goto out; | |
3929 | ||
3930 | sock_register(&packet_family_ops); | |
d12d01d6 | 3931 | register_pernet_subsys(&packet_net_ops); |
1da177e4 | 3932 | register_netdevice_notifier(&packet_netdev_notifier); |
1da177e4 LT |
3933 | out: |
3934 | return rc; | |
3935 | } | |
3936 | ||
3937 | module_init(packet_init); | |
3938 | module_exit(packet_exit); | |
3939 | MODULE_LICENSE("GPL"); | |
3940 | MODULE_ALIAS_NETPROTO(PF_PACKET); |