]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * PACKET - implements raw packet sockets. | |
7 | * | |
02c30a84 | 8 | * Authors: Ross Biro |
1da177e4 LT |
9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
10 | * Alan Cox, <gw4pts@gw4pts.ampr.org> | |
11 | * | |
1ce4f28b | 12 | * Fixes: |
1da177e4 LT |
13 | * Alan Cox : verify_area() now used correctly |
14 | * Alan Cox : new skbuff lists, look ma no backlogs! | |
15 | * Alan Cox : tidied skbuff lists. | |
16 | * Alan Cox : Now uses generic datagram routines I | |
17 | * added. Also fixed the peek/read crash | |
18 | * from all old Linux datagram code. | |
19 | * Alan Cox : Uses the improved datagram code. | |
20 | * Alan Cox : Added NULL's for socket options. | |
21 | * Alan Cox : Re-commented the code. | |
22 | * Alan Cox : Use new kernel side addressing | |
23 | * Rob Janssen : Correct MTU usage. | |
24 | * Dave Platt : Counter leaks caused by incorrect | |
25 | * interrupt locking and some slightly | |
26 | * dubious gcc output. Can you read | |
27 | * compiler: it said _VOLATILE_ | |
28 | * Richard Kooijman : Timestamp fixes. | |
29 | * Alan Cox : New buffers. Use sk->mac.raw. | |
30 | * Alan Cox : sendmsg/recvmsg support. | |
31 | * Alan Cox : Protocol setting support | |
32 | * Alexey Kuznetsov : Untied from IPv4 stack. | |
33 | * Cyrus Durgin : Fixed kerneld for kmod. | |
34 | * Michal Ostrowski : Module initialization cleanup. | |
1ce4f28b | 35 | * Ulises Alonso : Frame number limit removal and |
1da177e4 | 36 | * packet_set_ring memory leak. |
0fb375fb EB |
37 | * Eric Biederman : Allow for > 8 byte hardware addresses. |
38 | * The convention is that longer addresses | |
39 | * will simply extend the hardware address | |
1ce4f28b | 40 | * byte arrays at the end of sockaddr_ll |
0fb375fb | 41 | * and packet_mreq. |
69e3c75f | 42 | * Johann Baudy : Added TX RING. |
f6fb8f10 | 43 | * Chetan Loke : Implemented TPACKET_V3 block abstraction |
44 | * layer. | |
45 | * Copyright (C) 2011, <lokec@ccs.neu.edu> | |
46 | * | |
1da177e4 LT |
47 | * |
48 | * This program is free software; you can redistribute it and/or | |
49 | * modify it under the terms of the GNU General Public License | |
50 | * as published by the Free Software Foundation; either version | |
51 | * 2 of the License, or (at your option) any later version. | |
52 | * | |
53 | */ | |
1ce4f28b | 54 | |
1da177e4 | 55 | #include <linux/types.h> |
1da177e4 | 56 | #include <linux/mm.h> |
4fc268d2 | 57 | #include <linux/capability.h> |
1da177e4 LT |
58 | #include <linux/fcntl.h> |
59 | #include <linux/socket.h> | |
60 | #include <linux/in.h> | |
61 | #include <linux/inet.h> | |
62 | #include <linux/netdevice.h> | |
63 | #include <linux/if_packet.h> | |
64 | #include <linux/wireless.h> | |
ffbc6111 | 65 | #include <linux/kernel.h> |
1da177e4 | 66 | #include <linux/kmod.h> |
5a0e3ad6 | 67 | #include <linux/slab.h> |
0e3125c7 | 68 | #include <linux/vmalloc.h> |
457c4cbc | 69 | #include <net/net_namespace.h> |
1da177e4 LT |
70 | #include <net/ip.h> |
71 | #include <net/protocol.h> | |
72 | #include <linux/skbuff.h> | |
73 | #include <net/sock.h> | |
74 | #include <linux/errno.h> | |
75 | #include <linux/timer.h> | |
76 | #include <asm/system.h> | |
77 | #include <asm/uaccess.h> | |
78 | #include <asm/ioctls.h> | |
79 | #include <asm/page.h> | |
a1f8e7f7 | 80 | #include <asm/cacheflush.h> |
1da177e4 LT |
81 | #include <asm/io.h> |
82 | #include <linux/proc_fs.h> | |
83 | #include <linux/seq_file.h> | |
84 | #include <linux/poll.h> | |
85 | #include <linux/module.h> | |
86 | #include <linux/init.h> | |
905db440 | 87 | #include <linux/mutex.h> |
05423b24 | 88 | #include <linux/if_vlan.h> |
bfd5f4a3 | 89 | #include <linux/virtio_net.h> |
ed85b565 | 90 | #include <linux/errqueue.h> |
614f60fa | 91 | #include <linux/net_tstamp.h> |
1da177e4 LT |
92 | |
93 | #ifdef CONFIG_INET | |
94 | #include <net/inet_common.h> | |
95 | #endif | |
96 | ||
1da177e4 LT |
97 | /* |
98 | Assumptions: | |
99 | - if device has no dev->hard_header routine, it adds and removes ll header | |
100 | inside itself. In this case ll header is invisible outside of device, | |
101 | but higher levels still should reserve dev->hard_header_len. | |
102 | Some devices are enough clever to reallocate skb, when header | |
103 | will not fit to reserved space (tunnel), another ones are silly | |
104 | (PPP). | |
105 | - packet socket receives packets with pulled ll header, | |
106 | so that SOCK_RAW should push it back. | |
107 | ||
108 | On receive: | |
109 | ----------- | |
110 | ||
111 | Incoming, dev->hard_header!=NULL | |
b0e380b1 ACM |
112 | mac_header -> ll header |
113 | data -> data | |
1da177e4 LT |
114 | |
115 | Outgoing, dev->hard_header!=NULL | |
b0e380b1 ACM |
116 | mac_header -> ll header |
117 | data -> ll header | |
1da177e4 LT |
118 | |
119 | Incoming, dev->hard_header==NULL | |
b0e380b1 ACM |
120 | mac_header -> UNKNOWN position. It is very likely, that it points to ll |
121 | header. PPP makes it, that is wrong, because introduce | |
db0c58f9 | 122 | assymetry between rx and tx paths. |
b0e380b1 | 123 | data -> data |
1da177e4 LT |
124 | |
125 | Outgoing, dev->hard_header==NULL | |
b0e380b1 ACM |
126 | mac_header -> data. ll header is still not built! |
127 | data -> data | |
1da177e4 LT |
128 | |
129 | Resume | |
130 | If dev->hard_header==NULL we are unlikely to restore sensible ll header. | |
131 | ||
132 | ||
133 | On transmit: | |
134 | ------------ | |
135 | ||
136 | dev->hard_header != NULL | |
b0e380b1 ACM |
137 | mac_header -> ll header |
138 | data -> ll header | |
1da177e4 LT |
139 | |
140 | dev->hard_header == NULL (ll header is added by device, we cannot control it) | |
b0e380b1 ACM |
141 | mac_header -> data |
142 | data -> data | |
1da177e4 LT |
143 | |
144 | We should set nh.raw on output to correct posistion, | |
145 | packet classifier depends on it. | |
146 | */ | |
147 | ||
1da177e4 LT |
148 | /* Private packet socket structures. */ |
149 | ||
40d4e3df | 150 | struct packet_mclist { |
1da177e4 LT |
151 | struct packet_mclist *next; |
152 | int ifindex; | |
153 | int count; | |
154 | unsigned short type; | |
155 | unsigned short alen; | |
0fb375fb EB |
156 | unsigned char addr[MAX_ADDR_LEN]; |
157 | }; | |
158 | /* identical to struct packet_mreq except it has | |
159 | * a longer address field. | |
160 | */ | |
40d4e3df | 161 | struct packet_mreq_max { |
0fb375fb EB |
162 | int mr_ifindex; |
163 | unsigned short mr_type; | |
164 | unsigned short mr_alen; | |
165 | unsigned char mr_address[MAX_ADDR_LEN]; | |
1da177e4 | 166 | }; |
a2efcfa0 | 167 | |
f6fb8f10 | 168 | static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
69e3c75f JB |
169 | int closing, int tx_ring); |
170 | ||
f6fb8f10 | 171 | |
172 | #define V3_ALIGNMENT (8) | |
173 | ||
bc59ba39 | 174 | #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) |
f6fb8f10 | 175 | |
176 | #define BLK_PLUS_PRIV(sz_of_priv) \ | |
177 | (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) | |
178 | ||
179 | /* kbdq - kernel block descriptor queue */ | |
bc59ba39 | 180 | struct tpacket_kbdq_core { |
f6fb8f10 | 181 | struct pgv *pkbdq; |
182 | unsigned int feature_req_word; | |
183 | unsigned int hdrlen; | |
184 | unsigned char reset_pending_on_curr_blk; | |
185 | unsigned char delete_blk_timer; | |
186 | unsigned short kactive_blk_num; | |
187 | unsigned short blk_sizeof_priv; | |
188 | ||
189 | /* last_kactive_blk_num: | |
190 | * trick to see if user-space has caught up | |
191 | * in order to avoid refreshing timer when every single pkt arrives. | |
192 | */ | |
193 | unsigned short last_kactive_blk_num; | |
194 | ||
195 | char *pkblk_start; | |
196 | char *pkblk_end; | |
197 | int kblk_size; | |
198 | unsigned int knum_blocks; | |
199 | uint64_t knxt_seq_num; | |
200 | char *prev; | |
201 | char *nxt_offset; | |
202 | struct sk_buff *skb; | |
203 | ||
204 | atomic_t blk_fill_in_prog; | |
205 | ||
206 | /* Default is set to 8ms */ | |
207 | #define DEFAULT_PRB_RETIRE_TOV (8) | |
208 | ||
209 | unsigned short retire_blk_tov; | |
210 | unsigned short version; | |
211 | unsigned long tov_in_jiffies; | |
212 | ||
213 | /* timer to retire an outstanding block */ | |
214 | struct timer_list retire_blk_timer; | |
215 | }; | |
216 | ||
217 | #define PGV_FROM_VMALLOC 1 | |
0e3125c7 NH |
218 | struct pgv { |
219 | char *buffer; | |
0e3125c7 NH |
220 | }; |
221 | ||
69e3c75f | 222 | struct packet_ring_buffer { |
0e3125c7 | 223 | struct pgv *pg_vec; |
69e3c75f JB |
224 | unsigned int head; |
225 | unsigned int frames_per_block; | |
226 | unsigned int frame_size; | |
227 | unsigned int frame_max; | |
228 | ||
229 | unsigned int pg_vec_order; | |
230 | unsigned int pg_vec_pages; | |
231 | unsigned int pg_vec_len; | |
232 | ||
bc59ba39 | 233 | struct tpacket_kbdq_core prb_bdqc; |
69e3c75f JB |
234 | atomic_t pending; |
235 | }; | |
236 | ||
f6fb8f10 | 237 | #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) |
238 | #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) | |
239 | #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) | |
240 | #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) | |
241 | #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) | |
242 | #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) | |
243 | #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x))) | |
244 | ||
69e3c75f JB |
245 | struct packet_sock; |
246 | static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); | |
1da177e4 | 247 | |
f6fb8f10 | 248 | static void *packet_previous_frame(struct packet_sock *po, |
249 | struct packet_ring_buffer *rb, | |
250 | int status); | |
251 | static void packet_increment_head(struct packet_ring_buffer *buff); | |
bc59ba39 | 252 | static int prb_curr_blk_in_use(struct tpacket_kbdq_core *, |
253 | struct tpacket_block_desc *); | |
254 | static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, | |
f6fb8f10 | 255 | struct packet_sock *); |
bc59ba39 | 256 | static void prb_retire_current_block(struct tpacket_kbdq_core *, |
f6fb8f10 | 257 | struct packet_sock *, unsigned int status); |
bc59ba39 | 258 | static int prb_queue_frozen(struct tpacket_kbdq_core *); |
259 | static void prb_open_block(struct tpacket_kbdq_core *, | |
260 | struct tpacket_block_desc *); | |
f6fb8f10 | 261 | static void prb_retire_rx_blk_timer_expired(unsigned long); |
bc59ba39 | 262 | static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); |
263 | static void prb_init_blk_timer(struct packet_sock *, | |
264 | struct tpacket_kbdq_core *, | |
265 | void (*func) (unsigned long)); | |
266 | static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); | |
267 | static void prb_clear_rxhash(struct tpacket_kbdq_core *, | |
268 | struct tpacket3_hdr *); | |
269 | static void prb_fill_vlan_info(struct tpacket_kbdq_core *, | |
270 | struct tpacket3_hdr *); | |
1da177e4 LT |
271 | static void packet_flush_mclist(struct sock *sk); |
272 | ||
dc99f600 | 273 | struct packet_fanout; |
1da177e4 LT |
274 | struct packet_sock { |
275 | /* struct sock has to be the first member of packet_sock */ | |
276 | struct sock sk; | |
dc99f600 | 277 | struct packet_fanout *fanout; |
1da177e4 | 278 | struct tpacket_stats stats; |
f6fb8f10 | 279 | union tpacket_stats_u stats_u; |
69e3c75f JB |
280 | struct packet_ring_buffer rx_ring; |
281 | struct packet_ring_buffer tx_ring; | |
1da177e4 | 282 | int copy_thresh; |
1da177e4 | 283 | spinlock_t bind_lock; |
905db440 | 284 | struct mutex pg_vec_lock; |
8dc41944 | 285 | unsigned int running:1, /* prot_hook is attached*/ |
80feaacb | 286 | auxdata:1, |
bfd5f4a3 SS |
287 | origdev:1, |
288 | has_vnet_hdr:1; | |
1da177e4 | 289 | int ifindex; /* bound device */ |
0e11c91e | 290 | __be16 num; |
1da177e4 | 291 | struct packet_mclist *mclist; |
1da177e4 | 292 | atomic_t mapped; |
bbd6ef87 PM |
293 | enum tpacket_versions tp_version; |
294 | unsigned int tp_hdrlen; | |
8913336a | 295 | unsigned int tp_reserve; |
69e3c75f | 296 | unsigned int tp_loss:1; |
614f60fa | 297 | unsigned int tp_tstamp; |
94b05952 | 298 | struct packet_type prot_hook ____cacheline_aligned_in_smp; |
1da177e4 LT |
299 | }; |
300 | ||
dc99f600 DM |
301 | #define PACKET_FANOUT_MAX 256 |
302 | ||
303 | struct packet_fanout { | |
304 | #ifdef CONFIG_NET_NS | |
305 | struct net *net; | |
306 | #endif | |
307 | unsigned int num_members; | |
308 | u16 id; | |
309 | u8 type; | |
7736d33f | 310 | u8 defrag; |
dc99f600 DM |
311 | atomic_t rr_cur; |
312 | struct list_head list; | |
313 | struct sock *arr[PACKET_FANOUT_MAX]; | |
314 | spinlock_t lock; | |
315 | atomic_t sk_ref; | |
316 | struct packet_type prot_hook ____cacheline_aligned_in_smp; | |
317 | }; | |
318 | ||
ffbc6111 HX |
319 | struct packet_skb_cb { |
320 | unsigned int origlen; | |
321 | union { | |
322 | struct sockaddr_pkt pkt; | |
323 | struct sockaddr_ll ll; | |
324 | } sa; | |
325 | }; | |
326 | ||
327 | #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) | |
8dc41944 | 328 | |
bc59ba39 | 329 | #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) |
f6fb8f10 | 330 | #define GET_PBLOCK_DESC(x, bid) \ |
bc59ba39 | 331 | ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) |
f6fb8f10 | 332 | #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ |
bc59ba39 | 333 | ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) |
f6fb8f10 | 334 | #define GET_NEXT_PRB_BLK_NUM(x) \ |
335 | (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ | |
336 | ((x)->kactive_blk_num+1) : 0) | |
337 | ||
ce06b03e DM |
338 | static inline struct packet_sock *pkt_sk(struct sock *sk) |
339 | { | |
340 | return (struct packet_sock *)sk; | |
341 | } | |
342 | ||
dc99f600 DM |
343 | static void __fanout_unlink(struct sock *sk, struct packet_sock *po); |
344 | static void __fanout_link(struct sock *sk, struct packet_sock *po); | |
345 | ||
ce06b03e DM |
346 | /* register_prot_hook must be invoked with the po->bind_lock held, |
347 | * or from a context in which asynchronous accesses to the packet | |
348 | * socket is not possible (packet_create()). | |
349 | */ | |
350 | static void register_prot_hook(struct sock *sk) | |
351 | { | |
352 | struct packet_sock *po = pkt_sk(sk); | |
353 | if (!po->running) { | |
dc99f600 DM |
354 | if (po->fanout) |
355 | __fanout_link(sk, po); | |
356 | else | |
357 | dev_add_pack(&po->prot_hook); | |
ce06b03e DM |
358 | sock_hold(sk); |
359 | po->running = 1; | |
360 | } | |
361 | } | |
362 | ||
363 | /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock | |
364 | * held. If the sync parameter is true, we will temporarily drop | |
365 | * the po->bind_lock and do a synchronize_net to make sure no | |
366 | * asynchronous packet processing paths still refer to the elements | |
367 | * of po->prot_hook. If the sync parameter is false, it is the | |
368 | * callers responsibility to take care of this. | |
369 | */ | |
370 | static void __unregister_prot_hook(struct sock *sk, bool sync) | |
371 | { | |
372 | struct packet_sock *po = pkt_sk(sk); | |
373 | ||
374 | po->running = 0; | |
dc99f600 DM |
375 | if (po->fanout) |
376 | __fanout_unlink(sk, po); | |
377 | else | |
378 | __dev_remove_pack(&po->prot_hook); | |
ce06b03e DM |
379 | __sock_put(sk); |
380 | ||
381 | if (sync) { | |
382 | spin_unlock(&po->bind_lock); | |
383 | synchronize_net(); | |
384 | spin_lock(&po->bind_lock); | |
385 | } | |
386 | } | |
387 | ||
388 | static void unregister_prot_hook(struct sock *sk, bool sync) | |
389 | { | |
390 | struct packet_sock *po = pkt_sk(sk); | |
391 | ||
392 | if (po->running) | |
393 | __unregister_prot_hook(sk, sync); | |
394 | } | |
395 | ||
f6dafa95 | 396 | static inline __pure struct page *pgv_to_page(void *addr) |
0af55bb5 CG |
397 | { |
398 | if (is_vmalloc_addr(addr)) | |
399 | return vmalloc_to_page(addr); | |
400 | return virt_to_page(addr); | |
401 | } | |
402 | ||
69e3c75f | 403 | static void __packet_set_status(struct packet_sock *po, void *frame, int status) |
1da177e4 | 404 | { |
bbd6ef87 PM |
405 | union { |
406 | struct tpacket_hdr *h1; | |
407 | struct tpacket2_hdr *h2; | |
408 | void *raw; | |
409 | } h; | |
1da177e4 | 410 | |
69e3c75f | 411 | h.raw = frame; |
bbd6ef87 PM |
412 | switch (po->tp_version) { |
413 | case TPACKET_V1: | |
69e3c75f | 414 | h.h1->tp_status = status; |
0af55bb5 | 415 | flush_dcache_page(pgv_to_page(&h.h1->tp_status)); |
bbd6ef87 PM |
416 | break; |
417 | case TPACKET_V2: | |
69e3c75f | 418 | h.h2->tp_status = status; |
0af55bb5 | 419 | flush_dcache_page(pgv_to_page(&h.h2->tp_status)); |
bbd6ef87 | 420 | break; |
f6fb8f10 | 421 | case TPACKET_V3: |
69e3c75f | 422 | default: |
f6fb8f10 | 423 | WARN(1, "TPACKET version not supported.\n"); |
69e3c75f | 424 | BUG(); |
bbd6ef87 | 425 | } |
69e3c75f JB |
426 | |
427 | smp_wmb(); | |
bbd6ef87 PM |
428 | } |
429 | ||
69e3c75f | 430 | static int __packet_get_status(struct packet_sock *po, void *frame) |
bbd6ef87 PM |
431 | { |
432 | union { | |
433 | struct tpacket_hdr *h1; | |
434 | struct tpacket2_hdr *h2; | |
435 | void *raw; | |
436 | } h; | |
437 | ||
69e3c75f JB |
438 | smp_rmb(); |
439 | ||
bbd6ef87 PM |
440 | h.raw = frame; |
441 | switch (po->tp_version) { | |
442 | case TPACKET_V1: | |
0af55bb5 | 443 | flush_dcache_page(pgv_to_page(&h.h1->tp_status)); |
69e3c75f | 444 | return h.h1->tp_status; |
bbd6ef87 | 445 | case TPACKET_V2: |
0af55bb5 | 446 | flush_dcache_page(pgv_to_page(&h.h2->tp_status)); |
69e3c75f | 447 | return h.h2->tp_status; |
f6fb8f10 | 448 | case TPACKET_V3: |
69e3c75f | 449 | default: |
f6fb8f10 | 450 | WARN(1, "TPACKET version not supported.\n"); |
69e3c75f JB |
451 | BUG(); |
452 | return 0; | |
bbd6ef87 | 453 | } |
1da177e4 | 454 | } |
69e3c75f JB |
455 | |
456 | static void *packet_lookup_frame(struct packet_sock *po, | |
457 | struct packet_ring_buffer *rb, | |
458 | unsigned int position, | |
459 | int status) | |
460 | { | |
461 | unsigned int pg_vec_pos, frame_offset; | |
462 | union { | |
463 | struct tpacket_hdr *h1; | |
464 | struct tpacket2_hdr *h2; | |
465 | void *raw; | |
466 | } h; | |
467 | ||
468 | pg_vec_pos = position / rb->frames_per_block; | |
469 | frame_offset = position % rb->frames_per_block; | |
470 | ||
0e3125c7 NH |
471 | h.raw = rb->pg_vec[pg_vec_pos].buffer + |
472 | (frame_offset * rb->frame_size); | |
69e3c75f JB |
473 | |
474 | if (status != __packet_get_status(po, h.raw)) | |
475 | return NULL; | |
476 | ||
477 | return h.raw; | |
478 | } | |
479 | ||
480 | static inline void *packet_current_frame(struct packet_sock *po, | |
481 | struct packet_ring_buffer *rb, | |
482 | int status) | |
483 | { | |
484 | return packet_lookup_frame(po, rb, rb->head, status); | |
485 | } | |
486 | ||
bc59ba39 | 487 | static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 488 | { |
489 | del_timer_sync(&pkc->retire_blk_timer); | |
490 | } | |
491 | ||
492 | static void prb_shutdown_retire_blk_timer(struct packet_sock *po, | |
493 | int tx_ring, | |
494 | struct sk_buff_head *rb_queue) | |
495 | { | |
bc59ba39 | 496 | struct tpacket_kbdq_core *pkc; |
f6fb8f10 | 497 | |
498 | pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc; | |
499 | ||
500 | spin_lock(&rb_queue->lock); | |
501 | pkc->delete_blk_timer = 1; | |
502 | spin_unlock(&rb_queue->lock); | |
503 | ||
504 | prb_del_retire_blk_timer(pkc); | |
505 | } | |
506 | ||
507 | static void prb_init_blk_timer(struct packet_sock *po, | |
bc59ba39 | 508 | struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 509 | void (*func) (unsigned long)) |
510 | { | |
511 | init_timer(&pkc->retire_blk_timer); | |
512 | pkc->retire_blk_timer.data = (long)po; | |
513 | pkc->retire_blk_timer.function = func; | |
514 | pkc->retire_blk_timer.expires = jiffies; | |
515 | } | |
516 | ||
517 | static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring) | |
518 | { | |
bc59ba39 | 519 | struct tpacket_kbdq_core *pkc; |
f6fb8f10 | 520 | |
521 | if (tx_ring) | |
522 | BUG(); | |
523 | ||
524 | pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc; | |
525 | prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired); | |
526 | } | |
527 | ||
528 | static int prb_calc_retire_blk_tmo(struct packet_sock *po, | |
529 | int blk_size_in_bytes) | |
530 | { | |
531 | struct net_device *dev; | |
532 | unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; | |
4bc71cb9 JP |
533 | struct ethtool_cmd ecmd; |
534 | int err; | |
f6fb8f10 | 535 | |
4bc71cb9 JP |
536 | rtnl_lock(); |
537 | dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); | |
538 | if (unlikely(!dev)) { | |
539 | rtnl_unlock(); | |
f6fb8f10 | 540 | return DEFAULT_PRB_RETIRE_TOV; |
4bc71cb9 JP |
541 | } |
542 | err = __ethtool_get_settings(dev, &ecmd); | |
543 | rtnl_unlock(); | |
544 | if (!err) { | |
545 | switch (ecmd.speed) { | |
546 | case SPEED_10000: | |
547 | msec = 1; | |
548 | div = 10000/1000; | |
549 | break; | |
550 | case SPEED_1000: | |
551 | msec = 1; | |
552 | div = 1000/1000; | |
553 | break; | |
554 | /* | |
555 | * If the link speed is so slow you don't really | |
556 | * need to worry about perf anyways | |
557 | */ | |
558 | case SPEED_100: | |
559 | case SPEED_10: | |
560 | default: | |
561 | return DEFAULT_PRB_RETIRE_TOV; | |
f6fb8f10 | 562 | } |
563 | } | |
564 | ||
565 | mbits = (blk_size_in_bytes * 8) / (1024 * 1024); | |
566 | ||
567 | if (div) | |
568 | mbits /= div; | |
569 | ||
570 | tmo = mbits * msec; | |
571 | ||
572 | if (div) | |
573 | return tmo+1; | |
574 | return tmo; | |
575 | } | |
576 | ||
bc59ba39 | 577 | static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, |
f6fb8f10 | 578 | union tpacket_req_u *req_u) |
579 | { | |
580 | p1->feature_req_word = req_u->req3.tp_feature_req_word; | |
581 | } | |
582 | ||
583 | static void init_prb_bdqc(struct packet_sock *po, | |
584 | struct packet_ring_buffer *rb, | |
585 | struct pgv *pg_vec, | |
586 | union tpacket_req_u *req_u, int tx_ring) | |
587 | { | |
bc59ba39 | 588 | struct tpacket_kbdq_core *p1 = &rb->prb_bdqc; |
589 | struct tpacket_block_desc *pbd; | |
f6fb8f10 | 590 | |
591 | memset(p1, 0x0, sizeof(*p1)); | |
592 | ||
593 | p1->knxt_seq_num = 1; | |
594 | p1->pkbdq = pg_vec; | |
bc59ba39 | 595 | pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; |
f6fb8f10 | 596 | p1->pkblk_start = (char *)pg_vec[0].buffer; |
597 | p1->kblk_size = req_u->req3.tp_block_size; | |
598 | p1->knum_blocks = req_u->req3.tp_block_nr; | |
599 | p1->hdrlen = po->tp_hdrlen; | |
600 | p1->version = po->tp_version; | |
601 | p1->last_kactive_blk_num = 0; | |
602 | po->stats_u.stats3.tp_freeze_q_cnt = 0; | |
603 | if (req_u->req3.tp_retire_blk_tov) | |
604 | p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; | |
605 | else | |
606 | p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, | |
607 | req_u->req3.tp_block_size); | |
608 | p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); | |
609 | p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; | |
610 | ||
611 | prb_init_ft_ops(p1, req_u); | |
612 | prb_setup_retire_blk_timer(po, tx_ring); | |
613 | prb_open_block(p1, pbd); | |
614 | } | |
615 | ||
616 | /* Do NOT update the last_blk_num first. | |
617 | * Assumes sk_buff_head lock is held. | |
618 | */ | |
bc59ba39 | 619 | static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 620 | { |
621 | mod_timer(&pkc->retire_blk_timer, | |
622 | jiffies + pkc->tov_in_jiffies); | |
623 | pkc->last_kactive_blk_num = pkc->kactive_blk_num; | |
624 | } | |
625 | ||
626 | /* | |
627 | * Timer logic: | |
628 | * 1) We refresh the timer only when we open a block. | |
629 | * By doing this we don't waste cycles refreshing the timer | |
630 | * on packet-by-packet basis. | |
631 | * | |
632 | * With a 1MB block-size, on a 1Gbps line, it will take | |
633 | * i) ~8 ms to fill a block + ii) memcpy etc. | |
634 | * In this cut we are not accounting for the memcpy time. | |
635 | * | |
636 | * So, if the user sets the 'tmo' to 10ms then the timer | |
637 | * will never fire while the block is still getting filled | |
638 | * (which is what we want). However, the user could choose | |
639 | * to close a block early and that's fine. | |
640 | * | |
641 | * But when the timer does fire, we check whether or not to refresh it. | |
642 | * Since the tmo granularity is in msecs, it is not too expensive | |
643 | * to refresh the timer, lets say every '8' msecs. | |
644 | * Either the user can set the 'tmo' or we can derive it based on | |
645 | * a) line-speed and b) block-size. | |
646 | * prb_calc_retire_blk_tmo() calculates the tmo. | |
647 | * | |
648 | */ | |
649 | static void prb_retire_rx_blk_timer_expired(unsigned long data) | |
650 | { | |
651 | struct packet_sock *po = (struct packet_sock *)data; | |
bc59ba39 | 652 | struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc; |
f6fb8f10 | 653 | unsigned int frozen; |
bc59ba39 | 654 | struct tpacket_block_desc *pbd; |
f6fb8f10 | 655 | |
656 | spin_lock(&po->sk.sk_receive_queue.lock); | |
657 | ||
658 | frozen = prb_queue_frozen(pkc); | |
659 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | |
660 | ||
661 | if (unlikely(pkc->delete_blk_timer)) | |
662 | goto out; | |
663 | ||
664 | /* We only need to plug the race when the block is partially filled. | |
665 | * tpacket_rcv: | |
666 | * lock(); increment BLOCK_NUM_PKTS; unlock() | |
667 | * copy_bits() is in progress ... | |
668 | * timer fires on other cpu: | |
669 | * we can't retire the current block because copy_bits | |
670 | * is in progress. | |
671 | * | |
672 | */ | |
673 | if (BLOCK_NUM_PKTS(pbd)) { | |
674 | while (atomic_read(&pkc->blk_fill_in_prog)) { | |
675 | /* Waiting for skb_copy_bits to finish... */ | |
676 | cpu_relax(); | |
677 | } | |
678 | } | |
679 | ||
680 | if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { | |
681 | if (!frozen) { | |
682 | prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); | |
683 | if (!prb_dispatch_next_block(pkc, po)) | |
684 | goto refresh_timer; | |
685 | else | |
686 | goto out; | |
687 | } else { | |
688 | /* Case 1. Queue was frozen because user-space was | |
689 | * lagging behind. | |
690 | */ | |
691 | if (prb_curr_blk_in_use(pkc, pbd)) { | |
692 | /* | |
693 | * Ok, user-space is still behind. | |
694 | * So just refresh the timer. | |
695 | */ | |
696 | goto refresh_timer; | |
697 | } else { | |
698 | /* Case 2. queue was frozen,user-space caught up, | |
699 | * now the link went idle && the timer fired. | |
700 | * We don't have a block to close.So we open this | |
701 | * block and restart the timer. | |
702 | * opening a block thaws the queue,restarts timer | |
703 | * Thawing/timer-refresh is a side effect. | |
704 | */ | |
705 | prb_open_block(pkc, pbd); | |
706 | goto out; | |
707 | } | |
708 | } | |
709 | } | |
710 | ||
711 | refresh_timer: | |
712 | _prb_refresh_rx_retire_blk_timer(pkc); | |
713 | ||
714 | out: | |
715 | spin_unlock(&po->sk.sk_receive_queue.lock); | |
716 | } | |
717 | ||
bc59ba39 | 718 | static inline void prb_flush_block(struct tpacket_kbdq_core *pkc1, |
719 | struct tpacket_block_desc *pbd1, __u32 status) | |
f6fb8f10 | 720 | { |
721 | /* Flush everything minus the block header */ | |
722 | ||
723 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 | |
724 | u8 *start, *end; | |
725 | ||
726 | start = (u8 *)pbd1; | |
727 | ||
728 | /* Skip the block header(we know header WILL fit in 4K) */ | |
729 | start += PAGE_SIZE; | |
730 | ||
731 | end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); | |
732 | for (; start < end; start += PAGE_SIZE) | |
733 | flush_dcache_page(pgv_to_page(start)); | |
734 | ||
735 | smp_wmb(); | |
736 | #endif | |
737 | ||
738 | /* Now update the block status. */ | |
739 | ||
740 | BLOCK_STATUS(pbd1) = status; | |
741 | ||
742 | /* Flush the block header */ | |
743 | ||
744 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 | |
745 | start = (u8 *)pbd1; | |
746 | flush_dcache_page(pgv_to_page(start)); | |
747 | ||
748 | smp_wmb(); | |
749 | #endif | |
750 | } | |
751 | ||
752 | /* | |
753 | * Side effect: | |
754 | * | |
755 | * 1) flush the block | |
756 | * 2) Increment active_blk_num | |
757 | * | |
758 | * Note:We DONT refresh the timer on purpose. | |
759 | * Because almost always the next block will be opened. | |
760 | */ | |
bc59ba39 | 761 | static void prb_close_block(struct tpacket_kbdq_core *pkc1, |
762 | struct tpacket_block_desc *pbd1, | |
f6fb8f10 | 763 | struct packet_sock *po, unsigned int stat) |
764 | { | |
765 | __u32 status = TP_STATUS_USER | stat; | |
766 | ||
767 | struct tpacket3_hdr *last_pkt; | |
bc59ba39 | 768 | struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; |
f6fb8f10 | 769 | |
770 | if (po->stats.tp_drops) | |
771 | status |= TP_STATUS_LOSING; | |
772 | ||
773 | last_pkt = (struct tpacket3_hdr *)pkc1->prev; | |
774 | last_pkt->tp_next_offset = 0; | |
775 | ||
776 | /* Get the ts of the last pkt */ | |
777 | if (BLOCK_NUM_PKTS(pbd1)) { | |
778 | h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; | |
779 | h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; | |
780 | } else { | |
781 | /* Ok, we tmo'd - so get the current time */ | |
782 | struct timespec ts; | |
783 | getnstimeofday(&ts); | |
784 | h1->ts_last_pkt.ts_sec = ts.tv_sec; | |
785 | h1->ts_last_pkt.ts_nsec = ts.tv_nsec; | |
786 | } | |
787 | ||
788 | smp_wmb(); | |
789 | ||
790 | /* Flush the block */ | |
791 | prb_flush_block(pkc1, pbd1, status); | |
792 | ||
793 | pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); | |
794 | } | |
795 | ||
bc59ba39 | 796 | static inline void prb_thaw_queue(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 797 | { |
798 | pkc->reset_pending_on_curr_blk = 0; | |
799 | } | |
800 | ||
801 | /* | |
802 | * Side effect of opening a block: | |
803 | * | |
804 | * 1) prb_queue is thawed. | |
805 | * 2) retire_blk_timer is refreshed. | |
806 | * | |
807 | */ | |
bc59ba39 | 808 | static void prb_open_block(struct tpacket_kbdq_core *pkc1, |
809 | struct tpacket_block_desc *pbd1) | |
f6fb8f10 | 810 | { |
811 | struct timespec ts; | |
bc59ba39 | 812 | struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; |
f6fb8f10 | 813 | |
814 | smp_rmb(); | |
815 | ||
816 | if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) { | |
817 | ||
818 | /* We could have just memset this but we will lose the | |
819 | * flexibility of making the priv area sticky | |
820 | */ | |
821 | BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; | |
822 | BLOCK_NUM_PKTS(pbd1) = 0; | |
823 | BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); | |
824 | getnstimeofday(&ts); | |
825 | h1->ts_first_pkt.ts_sec = ts.tv_sec; | |
826 | h1->ts_first_pkt.ts_nsec = ts.tv_nsec; | |
827 | pkc1->pkblk_start = (char *)pbd1; | |
828 | pkc1->nxt_offset = (char *)(pkc1->pkblk_start + | |
829 | BLK_PLUS_PRIV(pkc1->blk_sizeof_priv)); | |
830 | BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); | |
831 | BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; | |
832 | pbd1->version = pkc1->version; | |
833 | pkc1->prev = pkc1->nxt_offset; | |
834 | pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; | |
835 | prb_thaw_queue(pkc1); | |
836 | _prb_refresh_rx_retire_blk_timer(pkc1); | |
837 | ||
838 | smp_wmb(); | |
839 | ||
840 | return; | |
841 | } | |
842 | ||
843 | WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n", | |
844 | pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num); | |
845 | dump_stack(); | |
846 | BUG(); | |
847 | } | |
848 | ||
849 | /* | |
850 | * Queue freeze logic: | |
851 | * 1) Assume tp_block_nr = 8 blocks. | |
852 | * 2) At time 't0', user opens Rx ring. | |
853 | * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 | |
854 | * 4) user-space is either sleeping or processing block '0'. | |
855 | * 5) tpacket_rcv is currently filling block '7', since there is no space left, | |
856 | * it will close block-7,loop around and try to fill block '0'. | |
857 | * call-flow: | |
858 | * __packet_lookup_frame_in_block | |
859 | * prb_retire_current_block() | |
860 | * prb_dispatch_next_block() | |
861 | * |->(BLOCK_STATUS == USER) evaluates to true | |
862 | * 5.1) Since block-0 is currently in-use, we just freeze the queue. | |
863 | * 6) Now there are two cases: | |
864 | * 6.1) Link goes idle right after the queue is frozen. | |
865 | * But remember, the last open_block() refreshed the timer. | |
866 | * When this timer expires,it will refresh itself so that we can | |
867 | * re-open block-0 in near future. | |
868 | * 6.2) Link is busy and keeps on receiving packets. This is a simple | |
869 | * case and __packet_lookup_frame_in_block will check if block-0 | |
870 | * is free and can now be re-used. | |
871 | */ | |
bc59ba39 | 872 | static inline void prb_freeze_queue(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 873 | struct packet_sock *po) |
874 | { | |
875 | pkc->reset_pending_on_curr_blk = 1; | |
876 | po->stats_u.stats3.tp_freeze_q_cnt++; | |
877 | } | |
878 | ||
879 | #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) | |
880 | ||
881 | /* | |
882 | * If the next block is free then we will dispatch it | |
883 | * and return a good offset. | |
884 | * Else, we will freeze the queue. | |
885 | * So, caller must check the return value. | |
886 | */ | |
bc59ba39 | 887 | static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 888 | struct packet_sock *po) |
889 | { | |
bc59ba39 | 890 | struct tpacket_block_desc *pbd; |
f6fb8f10 | 891 | |
892 | smp_rmb(); | |
893 | ||
894 | /* 1. Get current block num */ | |
895 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | |
896 | ||
897 | /* 2. If this block is currently in_use then freeze the queue */ | |
898 | if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { | |
899 | prb_freeze_queue(pkc, po); | |
900 | return NULL; | |
901 | } | |
902 | ||
903 | /* | |
904 | * 3. | |
905 | * open this block and return the offset where the first packet | |
906 | * needs to get stored. | |
907 | */ | |
908 | prb_open_block(pkc, pbd); | |
909 | return (void *)pkc->nxt_offset; | |
910 | } | |
911 | ||
bc59ba39 | 912 | static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 913 | struct packet_sock *po, unsigned int status) |
914 | { | |
bc59ba39 | 915 | struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); |
f6fb8f10 | 916 | |
917 | /* retire/close the current block */ | |
918 | if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { | |
919 | /* | |
920 | * Plug the case where copy_bits() is in progress on | |
921 | * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't | |
922 | * have space to copy the pkt in the current block and | |
923 | * called prb_retire_current_block() | |
924 | * | |
925 | * We don't need to worry about the TMO case because | |
926 | * the timer-handler already handled this case. | |
927 | */ | |
928 | if (!(status & TP_STATUS_BLK_TMO)) { | |
929 | while (atomic_read(&pkc->blk_fill_in_prog)) { | |
930 | /* Waiting for skb_copy_bits to finish... */ | |
931 | cpu_relax(); | |
932 | } | |
933 | } | |
934 | prb_close_block(pkc, pbd, po, status); | |
935 | return; | |
936 | } | |
937 | ||
938 | WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd); | |
939 | dump_stack(); | |
940 | BUG(); | |
941 | } | |
942 | ||
bc59ba39 | 943 | static inline int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc, |
944 | struct tpacket_block_desc *pbd) | |
f6fb8f10 | 945 | { |
946 | return TP_STATUS_USER & BLOCK_STATUS(pbd); | |
947 | } | |
948 | ||
bc59ba39 | 949 | static inline int prb_queue_frozen(struct tpacket_kbdq_core *pkc) |
f6fb8f10 | 950 | { |
951 | return pkc->reset_pending_on_curr_blk; | |
952 | } | |
953 | ||
954 | static inline void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) | |
955 | { | |
bc59ba39 | 956 | struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); |
f6fb8f10 | 957 | atomic_dec(&pkc->blk_fill_in_prog); |
958 | } | |
959 | ||
bc59ba39 | 960 | static inline void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 961 | struct tpacket3_hdr *ppd) |
962 | { | |
963 | ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb); | |
964 | } | |
965 | ||
bc59ba39 | 966 | static inline void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 967 | struct tpacket3_hdr *ppd) |
968 | { | |
969 | ppd->hv1.tp_rxhash = 0; | |
970 | } | |
971 | ||
bc59ba39 | 972 | static inline void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 973 | struct tpacket3_hdr *ppd) |
974 | { | |
975 | if (vlan_tx_tag_present(pkc->skb)) { | |
976 | ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb); | |
977 | ppd->tp_status = TP_STATUS_VLAN_VALID; | |
978 | } else { | |
979 | ppd->hv1.tp_vlan_tci = ppd->tp_status = 0; | |
980 | } | |
981 | } | |
982 | ||
bc59ba39 | 983 | static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, |
f6fb8f10 | 984 | struct tpacket3_hdr *ppd) |
985 | { | |
986 | prb_fill_vlan_info(pkc, ppd); | |
987 | ||
988 | if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) | |
989 | prb_fill_rxhash(pkc, ppd); | |
990 | else | |
991 | prb_clear_rxhash(pkc, ppd); | |
992 | } | |
993 | ||
bc59ba39 | 994 | static inline void prb_fill_curr_block(char *curr, |
995 | struct tpacket_kbdq_core *pkc, | |
996 | struct tpacket_block_desc *pbd, | |
f6fb8f10 | 997 | unsigned int len) |
998 | { | |
999 | struct tpacket3_hdr *ppd; | |
1000 | ||
1001 | ppd = (struct tpacket3_hdr *)curr; | |
1002 | ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); | |
1003 | pkc->prev = curr; | |
1004 | pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); | |
1005 | BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); | |
1006 | BLOCK_NUM_PKTS(pbd) += 1; | |
1007 | atomic_inc(&pkc->blk_fill_in_prog); | |
1008 | prb_run_all_ft_ops(pkc, ppd); | |
1009 | } | |
1010 | ||
1011 | /* Assumes caller has the sk->rx_queue.lock */ | |
1012 | static void *__packet_lookup_frame_in_block(struct packet_sock *po, | |
1013 | struct sk_buff *skb, | |
1014 | int status, | |
1015 | unsigned int len | |
1016 | ) | |
1017 | { | |
bc59ba39 | 1018 | struct tpacket_kbdq_core *pkc; |
1019 | struct tpacket_block_desc *pbd; | |
f6fb8f10 | 1020 | char *curr, *end; |
1021 | ||
1022 | pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring)); | |
1023 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | |
1024 | ||
1025 | /* Queue is frozen when user space is lagging behind */ | |
1026 | if (prb_queue_frozen(pkc)) { | |
1027 | /* | |
1028 | * Check if that last block which caused the queue to freeze, | |
1029 | * is still in_use by user-space. | |
1030 | */ | |
1031 | if (prb_curr_blk_in_use(pkc, pbd)) { | |
1032 | /* Can't record this packet */ | |
1033 | return NULL; | |
1034 | } else { | |
1035 | /* | |
1036 | * Ok, the block was released by user-space. | |
1037 | * Now let's open that block. | |
1038 | * opening a block also thaws the queue. | |
1039 | * Thawing is a side effect. | |
1040 | */ | |
1041 | prb_open_block(pkc, pbd); | |
1042 | } | |
1043 | } | |
1044 | ||
1045 | smp_mb(); | |
1046 | curr = pkc->nxt_offset; | |
1047 | pkc->skb = skb; | |
1048 | end = (char *) ((char *)pbd + pkc->kblk_size); | |
1049 | ||
1050 | /* first try the current block */ | |
1051 | if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { | |
1052 | prb_fill_curr_block(curr, pkc, pbd, len); | |
1053 | return (void *)curr; | |
1054 | } | |
1055 | ||
1056 | /* Ok, close the current block */ | |
1057 | prb_retire_current_block(pkc, po, 0); | |
1058 | ||
1059 | /* Now, try to dispatch the next block */ | |
1060 | curr = (char *)prb_dispatch_next_block(pkc, po); | |
1061 | if (curr) { | |
1062 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | |
1063 | prb_fill_curr_block(curr, pkc, pbd, len); | |
1064 | return (void *)curr; | |
1065 | } | |
1066 | ||
1067 | /* | |
1068 | * No free blocks are available.user_space hasn't caught up yet. | |
1069 | * Queue was just frozen and now this packet will get dropped. | |
1070 | */ | |
1071 | return NULL; | |
1072 | } | |
1073 | ||
1074 | static inline void *packet_current_rx_frame(struct packet_sock *po, | |
1075 | struct sk_buff *skb, | |
1076 | int status, unsigned int len) | |
1077 | { | |
1078 | char *curr = NULL; | |
1079 | switch (po->tp_version) { | |
1080 | case TPACKET_V1: | |
1081 | case TPACKET_V2: | |
1082 | curr = packet_lookup_frame(po, &po->rx_ring, | |
1083 | po->rx_ring.head, status); | |
1084 | return curr; | |
1085 | case TPACKET_V3: | |
1086 | return __packet_lookup_frame_in_block(po, skb, status, len); | |
1087 | default: | |
1088 | WARN(1, "TPACKET version not supported\n"); | |
1089 | BUG(); | |
1090 | return 0; | |
1091 | } | |
1092 | } | |
1093 | ||
1094 | static inline void *prb_lookup_block(struct packet_sock *po, | |
1095 | struct packet_ring_buffer *rb, | |
1096 | unsigned int previous, | |
1097 | int status) | |
1098 | { | |
bc59ba39 | 1099 | struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); |
1100 | struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous); | |
f6fb8f10 | 1101 | |
1102 | if (status != BLOCK_STATUS(pbd)) | |
1103 | return NULL; | |
1104 | return pbd; | |
1105 | } | |
1106 | ||
1107 | static inline int prb_previous_blk_num(struct packet_ring_buffer *rb) | |
1108 | { | |
1109 | unsigned int prev; | |
1110 | if (rb->prb_bdqc.kactive_blk_num) | |
1111 | prev = rb->prb_bdqc.kactive_blk_num-1; | |
1112 | else | |
1113 | prev = rb->prb_bdqc.knum_blocks-1; | |
1114 | return prev; | |
1115 | } | |
1116 | ||
1117 | /* Assumes caller has held the rx_queue.lock */ | |
1118 | static inline void *__prb_previous_block(struct packet_sock *po, | |
1119 | struct packet_ring_buffer *rb, | |
1120 | int status) | |
1121 | { | |
1122 | unsigned int previous = prb_previous_blk_num(rb); | |
1123 | return prb_lookup_block(po, rb, previous, status); | |
1124 | } | |
1125 | ||
1126 | static inline void *packet_previous_rx_frame(struct packet_sock *po, | |
1127 | struct packet_ring_buffer *rb, | |
1128 | int status) | |
1129 | { | |
1130 | if (po->tp_version <= TPACKET_V2) | |
1131 | return packet_previous_frame(po, rb, status); | |
1132 | ||
1133 | return __prb_previous_block(po, rb, status); | |
1134 | } | |
1135 | ||
1136 | static inline void packet_increment_rx_head(struct packet_sock *po, | |
1137 | struct packet_ring_buffer *rb) | |
1138 | { | |
1139 | switch (po->tp_version) { | |
1140 | case TPACKET_V1: | |
1141 | case TPACKET_V2: | |
1142 | return packet_increment_head(rb); | |
1143 | case TPACKET_V3: | |
1144 | default: | |
1145 | WARN(1, "TPACKET version not supported.\n"); | |
1146 | BUG(); | |
1147 | return; | |
1148 | } | |
1149 | } | |
1150 | ||
69e3c75f JB |
1151 | static inline void *packet_previous_frame(struct packet_sock *po, |
1152 | struct packet_ring_buffer *rb, | |
1153 | int status) | |
1154 | { | |
1155 | unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; | |
1156 | return packet_lookup_frame(po, rb, previous, status); | |
1157 | } | |
1158 | ||
1159 | static inline void packet_increment_head(struct packet_ring_buffer *buff) | |
1160 | { | |
1161 | buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; | |
1162 | } | |
1163 | ||
1da177e4 LT |
1164 | static void packet_sock_destruct(struct sock *sk) |
1165 | { | |
ed85b565 RC |
1166 | skb_queue_purge(&sk->sk_error_queue); |
1167 | ||
547b792c IJ |
1168 | WARN_ON(atomic_read(&sk->sk_rmem_alloc)); |
1169 | WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | |
1da177e4 LT |
1170 | |
1171 | if (!sock_flag(sk, SOCK_DEAD)) { | |
40d4e3df | 1172 | pr_err("Attempt to release alive packet socket: %p\n", sk); |
1da177e4 LT |
1173 | return; |
1174 | } | |
1175 | ||
17ab56a2 | 1176 | sk_refcnt_debug_dec(sk); |
1da177e4 LT |
1177 | } |
1178 | ||
dc99f600 DM |
1179 | static int fanout_rr_next(struct packet_fanout *f, unsigned int num) |
1180 | { | |
1181 | int x = atomic_read(&f->rr_cur) + 1; | |
1182 | ||
1183 | if (x >= num) | |
1184 | x = 0; | |
1185 | ||
1186 | return x; | |
1187 | } | |
1188 | ||
1189 | static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) | |
1190 | { | |
1191 | u32 idx, hash = skb->rxhash; | |
1192 | ||
1193 | idx = ((u64)hash * num) >> 32; | |
1194 | ||
1195 | return f->arr[idx]; | |
1196 | } | |
1197 | ||
1198 | static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) | |
1199 | { | |
1200 | int cur, old; | |
1201 | ||
1202 | cur = atomic_read(&f->rr_cur); | |
1203 | while ((old = atomic_cmpxchg(&f->rr_cur, cur, | |
1204 | fanout_rr_next(f, num))) != cur) | |
1205 | cur = old; | |
1206 | return f->arr[cur]; | |
1207 | } | |
1208 | ||
95ec3eb4 DM |
1209 | static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) |
1210 | { | |
1211 | unsigned int cpu = smp_processor_id(); | |
1212 | ||
1213 | return f->arr[cpu % num]; | |
1214 | } | |
1215 | ||
95ec3eb4 DM |
1216 | static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, |
1217 | struct packet_type *pt, struct net_device *orig_dev) | |
dc99f600 DM |
1218 | { |
1219 | struct packet_fanout *f = pt->af_packet_priv; | |
1220 | unsigned int num = f->num_members; | |
1221 | struct packet_sock *po; | |
1222 | struct sock *sk; | |
1223 | ||
1224 | if (!net_eq(dev_net(dev), read_pnet(&f->net)) || | |
1225 | !num) { | |
1226 | kfree_skb(skb); | |
1227 | return 0; | |
1228 | } | |
1229 | ||
95ec3eb4 DM |
1230 | switch (f->type) { |
1231 | case PACKET_FANOUT_HASH: | |
1232 | default: | |
1233 | if (f->defrag) { | |
bc416d97 | 1234 | skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET); |
95ec3eb4 DM |
1235 | if (!skb) |
1236 | return 0; | |
1237 | } | |
1238 | skb_get_rxhash(skb); | |
1239 | sk = fanout_demux_hash(f, skb, num); | |
1240 | break; | |
1241 | case PACKET_FANOUT_LB: | |
1242 | sk = fanout_demux_lb(f, skb, num); | |
1243 | break; | |
1244 | case PACKET_FANOUT_CPU: | |
1245 | sk = fanout_demux_cpu(f, skb, num); | |
1246 | break; | |
dc99f600 DM |
1247 | } |
1248 | ||
dc99f600 DM |
1249 | po = pkt_sk(sk); |
1250 | ||
1251 | return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); | |
1252 | } | |
1253 | ||
1254 | static DEFINE_MUTEX(fanout_mutex); | |
1255 | static LIST_HEAD(fanout_list); | |
1256 | ||
1257 | static void __fanout_link(struct sock *sk, struct packet_sock *po) | |
1258 | { | |
1259 | struct packet_fanout *f = po->fanout; | |
1260 | ||
1261 | spin_lock(&f->lock); | |
1262 | f->arr[f->num_members] = sk; | |
1263 | smp_wmb(); | |
1264 | f->num_members++; | |
1265 | spin_unlock(&f->lock); | |
1266 | } | |
1267 | ||
1268 | static void __fanout_unlink(struct sock *sk, struct packet_sock *po) | |
1269 | { | |
1270 | struct packet_fanout *f = po->fanout; | |
1271 | int i; | |
1272 | ||
1273 | spin_lock(&f->lock); | |
1274 | for (i = 0; i < f->num_members; i++) { | |
1275 | if (f->arr[i] == sk) | |
1276 | break; | |
1277 | } | |
1278 | BUG_ON(i >= f->num_members); | |
1279 | f->arr[i] = f->arr[f->num_members - 1]; | |
1280 | f->num_members--; | |
1281 | spin_unlock(&f->lock); | |
1282 | } | |
1283 | ||
7736d33f | 1284 | static int fanout_add(struct sock *sk, u16 id, u16 type_flags) |
dc99f600 DM |
1285 | { |
1286 | struct packet_sock *po = pkt_sk(sk); | |
1287 | struct packet_fanout *f, *match; | |
7736d33f DM |
1288 | u8 type = type_flags & 0xff; |
1289 | u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0; | |
dc99f600 DM |
1290 | int err; |
1291 | ||
1292 | switch (type) { | |
1293 | case PACKET_FANOUT_HASH: | |
1294 | case PACKET_FANOUT_LB: | |
95ec3eb4 | 1295 | case PACKET_FANOUT_CPU: |
dc99f600 DM |
1296 | break; |
1297 | default: | |
1298 | return -EINVAL; | |
1299 | } | |
1300 | ||
1301 | if (!po->running) | |
1302 | return -EINVAL; | |
1303 | ||
1304 | if (po->fanout) | |
1305 | return -EALREADY; | |
1306 | ||
1307 | mutex_lock(&fanout_mutex); | |
1308 | match = NULL; | |
1309 | list_for_each_entry(f, &fanout_list, list) { | |
1310 | if (f->id == id && | |
1311 | read_pnet(&f->net) == sock_net(sk)) { | |
1312 | match = f; | |
1313 | break; | |
1314 | } | |
1315 | } | |
afe62c68 | 1316 | err = -EINVAL; |
7736d33f | 1317 | if (match && match->defrag != defrag) |
afe62c68 | 1318 | goto out; |
dc99f600 | 1319 | if (!match) { |
afe62c68 | 1320 | err = -ENOMEM; |
dc99f600 | 1321 | match = kzalloc(sizeof(*match), GFP_KERNEL); |
afe62c68 ED |
1322 | if (!match) |
1323 | goto out; | |
1324 | write_pnet(&match->net, sock_net(sk)); | |
1325 | match->id = id; | |
1326 | match->type = type; | |
1327 | match->defrag = defrag; | |
1328 | atomic_set(&match->rr_cur, 0); | |
1329 | INIT_LIST_HEAD(&match->list); | |
1330 | spin_lock_init(&match->lock); | |
1331 | atomic_set(&match->sk_ref, 0); | |
1332 | match->prot_hook.type = po->prot_hook.type; | |
1333 | match->prot_hook.dev = po->prot_hook.dev; | |
1334 | match->prot_hook.func = packet_rcv_fanout; | |
1335 | match->prot_hook.af_packet_priv = match; | |
1336 | dev_add_pack(&match->prot_hook); | |
1337 | list_add(&match->list, &fanout_list); | |
dc99f600 | 1338 | } |
afe62c68 ED |
1339 | err = -EINVAL; |
1340 | if (match->type == type && | |
1341 | match->prot_hook.type == po->prot_hook.type && | |
1342 | match->prot_hook.dev == po->prot_hook.dev) { | |
1343 | err = -ENOSPC; | |
1344 | if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) { | |
1345 | __dev_remove_pack(&po->prot_hook); | |
1346 | po->fanout = match; | |
1347 | atomic_inc(&match->sk_ref); | |
1348 | __fanout_link(sk, po); | |
1349 | err = 0; | |
dc99f600 DM |
1350 | } |
1351 | } | |
afe62c68 | 1352 | out: |
dc99f600 DM |
1353 | mutex_unlock(&fanout_mutex); |
1354 | return err; | |
1355 | } | |
1356 | ||
1357 | static void fanout_release(struct sock *sk) | |
1358 | { | |
1359 | struct packet_sock *po = pkt_sk(sk); | |
1360 | struct packet_fanout *f; | |
1361 | ||
1362 | f = po->fanout; | |
1363 | if (!f) | |
1364 | return; | |
1365 | ||
1366 | po->fanout = NULL; | |
1367 | ||
1368 | mutex_lock(&fanout_mutex); | |
1369 | if (atomic_dec_and_test(&f->sk_ref)) { | |
1370 | list_del(&f->list); | |
1371 | dev_remove_pack(&f->prot_hook); | |
1372 | kfree(f); | |
1373 | } | |
1374 | mutex_unlock(&fanout_mutex); | |
1375 | } | |
1da177e4 | 1376 | |
90ddc4f0 | 1377 | static const struct proto_ops packet_ops; |
1da177e4 | 1378 | |
90ddc4f0 | 1379 | static const struct proto_ops packet_ops_spkt; |
1da177e4 | 1380 | |
40d4e3df ED |
1381 | static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, |
1382 | struct packet_type *pt, struct net_device *orig_dev) | |
1da177e4 LT |
1383 | { |
1384 | struct sock *sk; | |
1385 | struct sockaddr_pkt *spkt; | |
1386 | ||
1387 | /* | |
1388 | * When we registered the protocol we saved the socket in the data | |
1389 | * field for just this event. | |
1390 | */ | |
1391 | ||
1392 | sk = pt->af_packet_priv; | |
1ce4f28b | 1393 | |
1da177e4 LT |
1394 | /* |
1395 | * Yank back the headers [hope the device set this | |
1396 | * right or kerboom...] | |
1397 | * | |
1398 | * Incoming packets have ll header pulled, | |
1399 | * push it back. | |
1400 | * | |
98e399f8 | 1401 | * For outgoing ones skb->data == skb_mac_header(skb) |
1da177e4 LT |
1402 | * so that this procedure is noop. |
1403 | */ | |
1404 | ||
1405 | if (skb->pkt_type == PACKET_LOOPBACK) | |
1406 | goto out; | |
1407 | ||
09ad9bc7 | 1408 | if (!net_eq(dev_net(dev), sock_net(sk))) |
d12d01d6 DL |
1409 | goto out; |
1410 | ||
40d4e3df ED |
1411 | skb = skb_share_check(skb, GFP_ATOMIC); |
1412 | if (skb == NULL) | |
1da177e4 LT |
1413 | goto oom; |
1414 | ||
1415 | /* drop any routing info */ | |
adf30907 | 1416 | skb_dst_drop(skb); |
1da177e4 | 1417 | |
84531c24 PO |
1418 | /* drop conntrack reference */ |
1419 | nf_reset(skb); | |
1420 | ||
ffbc6111 | 1421 | spkt = &PACKET_SKB_CB(skb)->sa.pkt; |
1da177e4 | 1422 | |
98e399f8 | 1423 | skb_push(skb, skb->data - skb_mac_header(skb)); |
1da177e4 LT |
1424 | |
1425 | /* | |
1426 | * The SOCK_PACKET socket receives _all_ frames. | |
1427 | */ | |
1428 | ||
1429 | spkt->spkt_family = dev->type; | |
1430 | strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); | |
1431 | spkt->spkt_protocol = skb->protocol; | |
1432 | ||
1433 | /* | |
1434 | * Charge the memory to the socket. This is done specifically | |
1435 | * to prevent sockets using all the memory up. | |
1436 | */ | |
1437 | ||
40d4e3df | 1438 | if (sock_queue_rcv_skb(sk, skb) == 0) |
1da177e4 LT |
1439 | return 0; |
1440 | ||
1441 | out: | |
1442 | kfree_skb(skb); | |
1443 | oom: | |
1444 | return 0; | |
1445 | } | |
1446 | ||
1447 | ||
1448 | /* | |
1449 | * Output a raw packet to a device layer. This bypasses all the other | |
1450 | * protocol layers and you must therefore supply it with a complete frame | |
1451 | */ | |
1ce4f28b | 1452 | |
1da177e4 LT |
1453 | static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, |
1454 | struct msghdr *msg, size_t len) | |
1455 | { | |
1456 | struct sock *sk = sock->sk; | |
40d4e3df | 1457 | struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name; |
1a35ca80 | 1458 | struct sk_buff *skb = NULL; |
1da177e4 | 1459 | struct net_device *dev; |
40d4e3df | 1460 | __be16 proto = 0; |
1da177e4 | 1461 | int err; |
1ce4f28b | 1462 | |
1da177e4 | 1463 | /* |
1ce4f28b | 1464 | * Get and verify the address. |
1da177e4 LT |
1465 | */ |
1466 | ||
40d4e3df | 1467 | if (saddr) { |
1da177e4 | 1468 | if (msg->msg_namelen < sizeof(struct sockaddr)) |
40d4e3df ED |
1469 | return -EINVAL; |
1470 | if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) | |
1471 | proto = saddr->spkt_protocol; | |
1472 | } else | |
1473 | return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ | |
1da177e4 LT |
1474 | |
1475 | /* | |
1ce4f28b | 1476 | * Find the device first to size check it |
1da177e4 LT |
1477 | */ |
1478 | ||
1479 | saddr->spkt_device[13] = 0; | |
1a35ca80 | 1480 | retry: |
654d1f8a ED |
1481 | rcu_read_lock(); |
1482 | dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); | |
1da177e4 LT |
1483 | err = -ENODEV; |
1484 | if (dev == NULL) | |
1485 | goto out_unlock; | |
1ce4f28b | 1486 | |
d5e76b0a DM |
1487 | err = -ENETDOWN; |
1488 | if (!(dev->flags & IFF_UP)) | |
1489 | goto out_unlock; | |
1490 | ||
1da177e4 | 1491 | /* |
40d4e3df ED |
1492 | * You may not queue a frame bigger than the mtu. This is the lowest level |
1493 | * raw protocol and you must do your own fragmentation at this level. | |
1da177e4 | 1494 | */ |
1ce4f28b | 1495 | |
1da177e4 | 1496 | err = -EMSGSIZE; |
57f89bfa | 1497 | if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN) |
1da177e4 LT |
1498 | goto out_unlock; |
1499 | ||
1a35ca80 ED |
1500 | if (!skb) { |
1501 | size_t reserved = LL_RESERVED_SPACE(dev); | |
1502 | unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; | |
1503 | ||
1504 | rcu_read_unlock(); | |
1505 | skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL); | |
1506 | if (skb == NULL) | |
1507 | return -ENOBUFS; | |
1508 | /* FIXME: Save some space for broken drivers that write a hard | |
1509 | * header at transmission time by themselves. PPP is the notable | |
1510 | * one here. This should really be fixed at the driver level. | |
1511 | */ | |
1512 | skb_reserve(skb, reserved); | |
1513 | skb_reset_network_header(skb); | |
1514 | ||
1515 | /* Try to align data part correctly */ | |
1516 | if (hhlen) { | |
1517 | skb->data -= hhlen; | |
1518 | skb->tail -= hhlen; | |
1519 | if (len < hhlen) | |
1520 | skb_reset_network_header(skb); | |
1521 | } | |
1522 | err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); | |
1523 | if (err) | |
1524 | goto out_free; | |
1525 | goto retry; | |
1da177e4 LT |
1526 | } |
1527 | ||
57f89bfa BG |
1528 | if (len > (dev->mtu + dev->hard_header_len)) { |
1529 | /* Earlier code assumed this would be a VLAN pkt, | |
1530 | * double-check this now that we have the actual | |
1531 | * packet in hand. | |
1532 | */ | |
1533 | struct ethhdr *ehdr; | |
1534 | skb_reset_mac_header(skb); | |
1535 | ehdr = eth_hdr(skb); | |
1536 | if (ehdr->h_proto != htons(ETH_P_8021Q)) { | |
1537 | err = -EMSGSIZE; | |
1538 | goto out_unlock; | |
1539 | } | |
1540 | } | |
1a35ca80 | 1541 | |
1da177e4 LT |
1542 | skb->protocol = proto; |
1543 | skb->dev = dev; | |
1544 | skb->priority = sk->sk_priority; | |
2d37a186 | 1545 | skb->mark = sk->sk_mark; |
2244d07b | 1546 | err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); |
ed85b565 RC |
1547 | if (err < 0) |
1548 | goto out_unlock; | |
1da177e4 LT |
1549 | |
1550 | dev_queue_xmit(skb); | |
654d1f8a | 1551 | rcu_read_unlock(); |
40d4e3df | 1552 | return len; |
1da177e4 | 1553 | |
1da177e4 | 1554 | out_unlock: |
654d1f8a | 1555 | rcu_read_unlock(); |
1a35ca80 ED |
1556 | out_free: |
1557 | kfree_skb(skb); | |
1da177e4 LT |
1558 | return err; |
1559 | } | |
1da177e4 | 1560 | |
62ab0812 ED |
1561 | static inline unsigned int run_filter(const struct sk_buff *skb, |
1562 | const struct sock *sk, | |
dbcb5855 | 1563 | unsigned int res) |
1da177e4 LT |
1564 | { |
1565 | struct sk_filter *filter; | |
fda9ef5d | 1566 | |
80f8f102 ED |
1567 | rcu_read_lock(); |
1568 | filter = rcu_dereference(sk->sk_filter); | |
dbcb5855 | 1569 | if (filter != NULL) |
0a14842f | 1570 | res = SK_RUN_FILTER(filter, skb); |
80f8f102 | 1571 | rcu_read_unlock(); |
1da177e4 | 1572 | |
dbcb5855 | 1573 | return res; |
1da177e4 LT |
1574 | } |
1575 | ||
1576 | /* | |
62ab0812 ED |
1577 | * This function makes lazy skb cloning in hope that most of packets |
1578 | * are discarded by BPF. | |
1579 | * | |
1580 | * Note tricky part: we DO mangle shared skb! skb->data, skb->len | |
1581 | * and skb->cb are mangled. It works because (and until) packets | |
1582 | * falling here are owned by current CPU. Output packets are cloned | |
1583 | * by dev_queue_xmit_nit(), input packets are processed by net_bh | |
1584 | * sequencially, so that if we return skb to original state on exit, | |
1585 | * we will not harm anyone. | |
1da177e4 LT |
1586 | */ |
1587 | ||
40d4e3df ED |
1588 | static int packet_rcv(struct sk_buff *skb, struct net_device *dev, |
1589 | struct packet_type *pt, struct net_device *orig_dev) | |
1da177e4 LT |
1590 | { |
1591 | struct sock *sk; | |
1592 | struct sockaddr_ll *sll; | |
1593 | struct packet_sock *po; | |
40d4e3df | 1594 | u8 *skb_head = skb->data; |
1da177e4 | 1595 | int skb_len = skb->len; |
dbcb5855 | 1596 | unsigned int snaplen, res; |
1da177e4 LT |
1597 | |
1598 | if (skb->pkt_type == PACKET_LOOPBACK) | |
1599 | goto drop; | |
1600 | ||
1601 | sk = pt->af_packet_priv; | |
1602 | po = pkt_sk(sk); | |
1603 | ||
09ad9bc7 | 1604 | if (!net_eq(dev_net(dev), sock_net(sk))) |
d12d01d6 DL |
1605 | goto drop; |
1606 | ||
1da177e4 LT |
1607 | skb->dev = dev; |
1608 | ||
3b04ddde | 1609 | if (dev->header_ops) { |
1da177e4 | 1610 | /* The device has an explicit notion of ll header, |
62ab0812 ED |
1611 | * exported to higher levels. |
1612 | * | |
1613 | * Otherwise, the device hides details of its frame | |
1614 | * structure, so that corresponding packet head is | |
1615 | * never delivered to user. | |
1da177e4 LT |
1616 | */ |
1617 | if (sk->sk_type != SOCK_DGRAM) | |
98e399f8 | 1618 | skb_push(skb, skb->data - skb_mac_header(skb)); |
1da177e4 LT |
1619 | else if (skb->pkt_type == PACKET_OUTGOING) { |
1620 | /* Special case: outgoing packets have ll header at head */ | |
bbe735e4 | 1621 | skb_pull(skb, skb_network_offset(skb)); |
1da177e4 LT |
1622 | } |
1623 | } | |
1624 | ||
1625 | snaplen = skb->len; | |
1626 | ||
dbcb5855 DM |
1627 | res = run_filter(skb, sk, snaplen); |
1628 | if (!res) | |
fda9ef5d | 1629 | goto drop_n_restore; |
dbcb5855 DM |
1630 | if (snaplen > res) |
1631 | snaplen = res; | |
1da177e4 LT |
1632 | |
1633 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | |
1634 | (unsigned)sk->sk_rcvbuf) | |
1635 | goto drop_n_acct; | |
1636 | ||
1637 | if (skb_shared(skb)) { | |
1638 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); | |
1639 | if (nskb == NULL) | |
1640 | goto drop_n_acct; | |
1641 | ||
1642 | if (skb_head != skb->data) { | |
1643 | skb->data = skb_head; | |
1644 | skb->len = skb_len; | |
1645 | } | |
1646 | kfree_skb(skb); | |
1647 | skb = nskb; | |
1648 | } | |
1649 | ||
ffbc6111 HX |
1650 | BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 > |
1651 | sizeof(skb->cb)); | |
1652 | ||
1653 | sll = &PACKET_SKB_CB(skb)->sa.ll; | |
1da177e4 LT |
1654 | sll->sll_family = AF_PACKET; |
1655 | sll->sll_hatype = dev->type; | |
1656 | sll->sll_protocol = skb->protocol; | |
1657 | sll->sll_pkttype = skb->pkt_type; | |
8032b464 | 1658 | if (unlikely(po->origdev)) |
80feaacb PWJ |
1659 | sll->sll_ifindex = orig_dev->ifindex; |
1660 | else | |
1661 | sll->sll_ifindex = dev->ifindex; | |
1da177e4 | 1662 | |
b95cce35 | 1663 | sll->sll_halen = dev_parse_header(skb, sll->sll_addr); |
1da177e4 | 1664 | |
ffbc6111 | 1665 | PACKET_SKB_CB(skb)->origlen = skb->len; |
8dc41944 | 1666 | |
1da177e4 LT |
1667 | if (pskb_trim(skb, snaplen)) |
1668 | goto drop_n_acct; | |
1669 | ||
1670 | skb_set_owner_r(skb, sk); | |
1671 | skb->dev = NULL; | |
adf30907 | 1672 | skb_dst_drop(skb); |
1da177e4 | 1673 | |
84531c24 PO |
1674 | /* drop conntrack reference */ |
1675 | nf_reset(skb); | |
1676 | ||
1da177e4 LT |
1677 | spin_lock(&sk->sk_receive_queue.lock); |
1678 | po->stats.tp_packets++; | |
3b885787 | 1679 | skb->dropcount = atomic_read(&sk->sk_drops); |
1da177e4 LT |
1680 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
1681 | spin_unlock(&sk->sk_receive_queue.lock); | |
1682 | sk->sk_data_ready(sk, skb->len); | |
1683 | return 0; | |
1684 | ||
1685 | drop_n_acct: | |
7091fbd8 WB |
1686 | spin_lock(&sk->sk_receive_queue.lock); |
1687 | po->stats.tp_drops++; | |
1688 | atomic_inc(&sk->sk_drops); | |
1689 | spin_unlock(&sk->sk_receive_queue.lock); | |
1da177e4 LT |
1690 | |
1691 | drop_n_restore: | |
1692 | if (skb_head != skb->data && skb_shared(skb)) { | |
1693 | skb->data = skb_head; | |
1694 | skb->len = skb_len; | |
1695 | } | |
1696 | drop: | |
ead2ceb0 | 1697 | consume_skb(skb); |
1da177e4 LT |
1698 | return 0; |
1699 | } | |
1700 | ||
40d4e3df ED |
1701 | static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, |
1702 | struct packet_type *pt, struct net_device *orig_dev) | |
1da177e4 LT |
1703 | { |
1704 | struct sock *sk; | |
1705 | struct packet_sock *po; | |
1706 | struct sockaddr_ll *sll; | |
bbd6ef87 PM |
1707 | union { |
1708 | struct tpacket_hdr *h1; | |
1709 | struct tpacket2_hdr *h2; | |
f6fb8f10 | 1710 | struct tpacket3_hdr *h3; |
bbd6ef87 PM |
1711 | void *raw; |
1712 | } h; | |
40d4e3df | 1713 | u8 *skb_head = skb->data; |
1da177e4 | 1714 | int skb_len = skb->len; |
dbcb5855 | 1715 | unsigned int snaplen, res; |
f6fb8f10 | 1716 | unsigned long status = TP_STATUS_USER; |
bbd6ef87 | 1717 | unsigned short macoff, netoff, hdrlen; |
1da177e4 | 1718 | struct sk_buff *copy_skb = NULL; |
b7aa0bf7 | 1719 | struct timeval tv; |
bbd6ef87 | 1720 | struct timespec ts; |
614f60fa | 1721 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); |
1da177e4 LT |
1722 | |
1723 | if (skb->pkt_type == PACKET_LOOPBACK) | |
1724 | goto drop; | |
1725 | ||
1726 | sk = pt->af_packet_priv; | |
1727 | po = pkt_sk(sk); | |
1728 | ||
09ad9bc7 | 1729 | if (!net_eq(dev_net(dev), sock_net(sk))) |
d12d01d6 DL |
1730 | goto drop; |
1731 | ||
3b04ddde | 1732 | if (dev->header_ops) { |
1da177e4 | 1733 | if (sk->sk_type != SOCK_DGRAM) |
98e399f8 | 1734 | skb_push(skb, skb->data - skb_mac_header(skb)); |
1da177e4 LT |
1735 | else if (skb->pkt_type == PACKET_OUTGOING) { |
1736 | /* Special case: outgoing packets have ll header at head */ | |
bbe735e4 | 1737 | skb_pull(skb, skb_network_offset(skb)); |
1da177e4 LT |
1738 | } |
1739 | } | |
1740 | ||
8dc41944 HX |
1741 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
1742 | status |= TP_STATUS_CSUMNOTREADY; | |
1743 | ||
1da177e4 LT |
1744 | snaplen = skb->len; |
1745 | ||
dbcb5855 DM |
1746 | res = run_filter(skb, sk, snaplen); |
1747 | if (!res) | |
fda9ef5d | 1748 | goto drop_n_restore; |
dbcb5855 DM |
1749 | if (snaplen > res) |
1750 | snaplen = res; | |
1da177e4 LT |
1751 | |
1752 | if (sk->sk_type == SOCK_DGRAM) { | |
8913336a PM |
1753 | macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + |
1754 | po->tp_reserve; | |
1da177e4 | 1755 | } else { |
bbe735e4 | 1756 | unsigned maclen = skb_network_offset(skb); |
bbd6ef87 | 1757 | netoff = TPACKET_ALIGN(po->tp_hdrlen + |
8913336a PM |
1758 | (maclen < 16 ? 16 : maclen)) + |
1759 | po->tp_reserve; | |
1da177e4 LT |
1760 | macoff = netoff - maclen; |
1761 | } | |
f6fb8f10 | 1762 | if (po->tp_version <= TPACKET_V2) { |
1763 | if (macoff + snaplen > po->rx_ring.frame_size) { | |
1764 | if (po->copy_thresh && | |
1765 | atomic_read(&sk->sk_rmem_alloc) + skb->truesize | |
1766 | < (unsigned)sk->sk_rcvbuf) { | |
1767 | if (skb_shared(skb)) { | |
1768 | copy_skb = skb_clone(skb, GFP_ATOMIC); | |
1769 | } else { | |
1770 | copy_skb = skb_get(skb); | |
1771 | skb_head = skb->data; | |
1772 | } | |
1773 | if (copy_skb) | |
1774 | skb_set_owner_r(copy_skb, sk); | |
1da177e4 | 1775 | } |
f6fb8f10 | 1776 | snaplen = po->rx_ring.frame_size - macoff; |
1777 | if ((int)snaplen < 0) | |
1778 | snaplen = 0; | |
1da177e4 | 1779 | } |
1da177e4 | 1780 | } |
1da177e4 | 1781 | spin_lock(&sk->sk_receive_queue.lock); |
f6fb8f10 | 1782 | h.raw = packet_current_rx_frame(po, skb, |
1783 | TP_STATUS_KERNEL, (macoff+snaplen)); | |
bbd6ef87 | 1784 | if (!h.raw) |
1da177e4 | 1785 | goto ring_is_full; |
f6fb8f10 | 1786 | if (po->tp_version <= TPACKET_V2) { |
1787 | packet_increment_rx_head(po, &po->rx_ring); | |
1788 | /* | |
1789 | * LOSING will be reported till you read the stats, | |
1790 | * because it's COR - Clear On Read. | |
1791 | * Anyways, moving it for V1/V2 only as V3 doesn't need this | |
1792 | * at packet level. | |
1793 | */ | |
1794 | if (po->stats.tp_drops) | |
1795 | status |= TP_STATUS_LOSING; | |
1796 | } | |
1da177e4 LT |
1797 | po->stats.tp_packets++; |
1798 | if (copy_skb) { | |
1799 | status |= TP_STATUS_COPY; | |
1800 | __skb_queue_tail(&sk->sk_receive_queue, copy_skb); | |
1801 | } | |
1da177e4 LT |
1802 | spin_unlock(&sk->sk_receive_queue.lock); |
1803 | ||
bbd6ef87 | 1804 | skb_copy_bits(skb, 0, h.raw + macoff, snaplen); |
1da177e4 | 1805 | |
bbd6ef87 PM |
1806 | switch (po->tp_version) { |
1807 | case TPACKET_V1: | |
1808 | h.h1->tp_len = skb->len; | |
1809 | h.h1->tp_snaplen = snaplen; | |
1810 | h.h1->tp_mac = macoff; | |
1811 | h.h1->tp_net = netoff; | |
614f60fa SM |
1812 | if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) |
1813 | && shhwtstamps->syststamp.tv64) | |
1814 | tv = ktime_to_timeval(shhwtstamps->syststamp); | |
1815 | else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) | |
1816 | && shhwtstamps->hwtstamp.tv64) | |
1817 | tv = ktime_to_timeval(shhwtstamps->hwtstamp); | |
1818 | else if (skb->tstamp.tv64) | |
bbd6ef87 PM |
1819 | tv = ktime_to_timeval(skb->tstamp); |
1820 | else | |
1821 | do_gettimeofday(&tv); | |
1822 | h.h1->tp_sec = tv.tv_sec; | |
1823 | h.h1->tp_usec = tv.tv_usec; | |
1824 | hdrlen = sizeof(*h.h1); | |
1825 | break; | |
1826 | case TPACKET_V2: | |
1827 | h.h2->tp_len = skb->len; | |
1828 | h.h2->tp_snaplen = snaplen; | |
1829 | h.h2->tp_mac = macoff; | |
1830 | h.h2->tp_net = netoff; | |
614f60fa SM |
1831 | if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) |
1832 | && shhwtstamps->syststamp.tv64) | |
1833 | ts = ktime_to_timespec(shhwtstamps->syststamp); | |
1834 | else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) | |
1835 | && shhwtstamps->hwtstamp.tv64) | |
1836 | ts = ktime_to_timespec(shhwtstamps->hwtstamp); | |
1837 | else if (skb->tstamp.tv64) | |
bbd6ef87 PM |
1838 | ts = ktime_to_timespec(skb->tstamp); |
1839 | else | |
1840 | getnstimeofday(&ts); | |
1841 | h.h2->tp_sec = ts.tv_sec; | |
1842 | h.h2->tp_nsec = ts.tv_nsec; | |
a3bcc23e BG |
1843 | if (vlan_tx_tag_present(skb)) { |
1844 | h.h2->tp_vlan_tci = vlan_tx_tag_get(skb); | |
1845 | status |= TP_STATUS_VLAN_VALID; | |
1846 | } else { | |
1847 | h.h2->tp_vlan_tci = 0; | |
1848 | } | |
13fcb7bd | 1849 | h.h2->tp_padding = 0; |
bbd6ef87 PM |
1850 | hdrlen = sizeof(*h.h2); |
1851 | break; | |
f6fb8f10 | 1852 | case TPACKET_V3: |
1853 | /* tp_nxt_offset,vlan are already populated above. | |
1854 | * So DONT clear those fields here | |
1855 | */ | |
1856 | h.h3->tp_status |= status; | |
1857 | h.h3->tp_len = skb->len; | |
1858 | h.h3->tp_snaplen = snaplen; | |
1859 | h.h3->tp_mac = macoff; | |
1860 | h.h3->tp_net = netoff; | |
1861 | if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) | |
1862 | && shhwtstamps->syststamp.tv64) | |
1863 | ts = ktime_to_timespec(shhwtstamps->syststamp); | |
1864 | else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) | |
1865 | && shhwtstamps->hwtstamp.tv64) | |
1866 | ts = ktime_to_timespec(shhwtstamps->hwtstamp); | |
1867 | else if (skb->tstamp.tv64) | |
1868 | ts = ktime_to_timespec(skb->tstamp); | |
1869 | else | |
1870 | getnstimeofday(&ts); | |
1871 | h.h3->tp_sec = ts.tv_sec; | |
1872 | h.h3->tp_nsec = ts.tv_nsec; | |
1873 | hdrlen = sizeof(*h.h3); | |
1874 | break; | |
bbd6ef87 PM |
1875 | default: |
1876 | BUG(); | |
1877 | } | |
1da177e4 | 1878 | |
bbd6ef87 | 1879 | sll = h.raw + TPACKET_ALIGN(hdrlen); |
b95cce35 | 1880 | sll->sll_halen = dev_parse_header(skb, sll->sll_addr); |
1da177e4 LT |
1881 | sll->sll_family = AF_PACKET; |
1882 | sll->sll_hatype = dev->type; | |
1883 | sll->sll_protocol = skb->protocol; | |
1884 | sll->sll_pkttype = skb->pkt_type; | |
8032b464 | 1885 | if (unlikely(po->origdev)) |
80feaacb PWJ |
1886 | sll->sll_ifindex = orig_dev->ifindex; |
1887 | else | |
1888 | sll->sll_ifindex = dev->ifindex; | |
1da177e4 | 1889 | |
e16aa207 | 1890 | smp_mb(); |
f6dafa95 | 1891 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 |
1da177e4 | 1892 | { |
0af55bb5 CG |
1893 | u8 *start, *end; |
1894 | ||
f6fb8f10 | 1895 | if (po->tp_version <= TPACKET_V2) { |
1896 | end = (u8 *)PAGE_ALIGN((unsigned long)h.raw | |
1897 | + macoff + snaplen); | |
1898 | for (start = h.raw; start < end; start += PAGE_SIZE) | |
1899 | flush_dcache_page(pgv_to_page(start)); | |
1900 | } | |
cc9f01b2 | 1901 | smp_wmb(); |
1da177e4 | 1902 | } |
f6dafa95 | 1903 | #endif |
f6fb8f10 | 1904 | if (po->tp_version <= TPACKET_V2) |
1905 | __packet_set_status(po, h.raw, status); | |
1906 | else | |
1907 | prb_clear_blk_fill_status(&po->rx_ring); | |
1da177e4 LT |
1908 | |
1909 | sk->sk_data_ready(sk, 0); | |
1910 | ||
1911 | drop_n_restore: | |
1912 | if (skb_head != skb->data && skb_shared(skb)) { | |
1913 | skb->data = skb_head; | |
1914 | skb->len = skb_len; | |
1915 | } | |
1916 | drop: | |
1ce4f28b | 1917 | kfree_skb(skb); |
1da177e4 LT |
1918 | return 0; |
1919 | ||
1920 | ring_is_full: | |
1921 | po->stats.tp_drops++; | |
1922 | spin_unlock(&sk->sk_receive_queue.lock); | |
1923 | ||
1924 | sk->sk_data_ready(sk, 0); | |
acb5d75b | 1925 | kfree_skb(copy_skb); |
1da177e4 LT |
1926 | goto drop_n_restore; |
1927 | } | |
1928 | ||
69e3c75f JB |
1929 | static void tpacket_destruct_skb(struct sk_buff *skb) |
1930 | { | |
1931 | struct packet_sock *po = pkt_sk(skb->sk); | |
40d4e3df | 1932 | void *ph; |
1da177e4 | 1933 | |
69e3c75f JB |
1934 | if (likely(po->tx_ring.pg_vec)) { |
1935 | ph = skb_shinfo(skb)->destructor_arg; | |
1936 | BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING); | |
1937 | BUG_ON(atomic_read(&po->tx_ring.pending) == 0); | |
1938 | atomic_dec(&po->tx_ring.pending); | |
1939 | __packet_set_status(po, ph, TP_STATUS_AVAILABLE); | |
1940 | } | |
1941 | ||
1942 | sock_wfree(skb); | |
1943 | } | |
1944 | ||
40d4e3df ED |
1945 | static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, |
1946 | void *frame, struct net_device *dev, int size_max, | |
1947 | __be16 proto, unsigned char *addr) | |
69e3c75f JB |
1948 | { |
1949 | union { | |
1950 | struct tpacket_hdr *h1; | |
1951 | struct tpacket2_hdr *h2; | |
1952 | void *raw; | |
1953 | } ph; | |
1954 | int to_write, offset, len, tp_len, nr_frags, len_max; | |
1955 | struct socket *sock = po->sk.sk_socket; | |
1956 | struct page *page; | |
1957 | void *data; | |
1958 | int err; | |
1959 | ||
1960 | ph.raw = frame; | |
1961 | ||
1962 | skb->protocol = proto; | |
1963 | skb->dev = dev; | |
1964 | skb->priority = po->sk.sk_priority; | |
2d37a186 | 1965 | skb->mark = po->sk.sk_mark; |
69e3c75f JB |
1966 | skb_shinfo(skb)->destructor_arg = ph.raw; |
1967 | ||
1968 | switch (po->tp_version) { | |
1969 | case TPACKET_V2: | |
1970 | tp_len = ph.h2->tp_len; | |
1971 | break; | |
1972 | default: | |
1973 | tp_len = ph.h1->tp_len; | |
1974 | break; | |
1975 | } | |
1976 | if (unlikely(tp_len > size_max)) { | |
40d4e3df | 1977 | pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); |
69e3c75f JB |
1978 | return -EMSGSIZE; |
1979 | } | |
1980 | ||
1981 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); | |
1982 | skb_reset_network_header(skb); | |
1983 | ||
1984 | data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll); | |
1985 | to_write = tp_len; | |
1986 | ||
1987 | if (sock->type == SOCK_DGRAM) { | |
1988 | err = dev_hard_header(skb, dev, ntohs(proto), addr, | |
1989 | NULL, tp_len); | |
1990 | if (unlikely(err < 0)) | |
1991 | return -EINVAL; | |
40d4e3df | 1992 | } else if (dev->hard_header_len) { |
69e3c75f JB |
1993 | /* net device doesn't like empty head */ |
1994 | if (unlikely(tp_len <= dev->hard_header_len)) { | |
40d4e3df ED |
1995 | pr_err("packet size is too short (%d < %d)\n", |
1996 | tp_len, dev->hard_header_len); | |
69e3c75f JB |
1997 | return -EINVAL; |
1998 | } | |
1999 | ||
2000 | skb_push(skb, dev->hard_header_len); | |
2001 | err = skb_store_bits(skb, 0, data, | |
2002 | dev->hard_header_len); | |
2003 | if (unlikely(err)) | |
2004 | return err; | |
2005 | ||
2006 | data += dev->hard_header_len; | |
2007 | to_write -= dev->hard_header_len; | |
2008 | } | |
2009 | ||
2010 | err = -EFAULT; | |
69e3c75f JB |
2011 | offset = offset_in_page(data); |
2012 | len_max = PAGE_SIZE - offset; | |
2013 | len = ((to_write > len_max) ? len_max : to_write); | |
2014 | ||
2015 | skb->data_len = to_write; | |
2016 | skb->len += to_write; | |
2017 | skb->truesize += to_write; | |
2018 | atomic_add(to_write, &po->sk.sk_wmem_alloc); | |
2019 | ||
2020 | while (likely(to_write)) { | |
2021 | nr_frags = skb_shinfo(skb)->nr_frags; | |
2022 | ||
2023 | if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { | |
40d4e3df ED |
2024 | pr_err("Packet exceed the number of skb frags(%lu)\n", |
2025 | MAX_SKB_FRAGS); | |
69e3c75f JB |
2026 | return -EFAULT; |
2027 | } | |
2028 | ||
0af55bb5 CG |
2029 | page = pgv_to_page(data); |
2030 | data += len; | |
69e3c75f JB |
2031 | flush_dcache_page(page); |
2032 | get_page(page); | |
0af55bb5 | 2033 | skb_fill_page_desc(skb, nr_frags, page, offset, len); |
69e3c75f JB |
2034 | to_write -= len; |
2035 | offset = 0; | |
2036 | len_max = PAGE_SIZE; | |
2037 | len = ((to_write > len_max) ? len_max : to_write); | |
2038 | } | |
2039 | ||
2040 | return tp_len; | |
2041 | } | |
2042 | ||
2043 | static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |
2044 | { | |
69e3c75f JB |
2045 | struct sk_buff *skb; |
2046 | struct net_device *dev; | |
2047 | __be16 proto; | |
827d9780 BG |
2048 | bool need_rls_dev = false; |
2049 | int err, reserve = 0; | |
40d4e3df ED |
2050 | void *ph; |
2051 | struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name; | |
69e3c75f JB |
2052 | int tp_len, size_max; |
2053 | unsigned char *addr; | |
2054 | int len_sum = 0; | |
2055 | int status = 0; | |
2056 | ||
69e3c75f JB |
2057 | mutex_lock(&po->pg_vec_lock); |
2058 | ||
2059 | err = -EBUSY; | |
2060 | if (saddr == NULL) { | |
827d9780 | 2061 | dev = po->prot_hook.dev; |
69e3c75f JB |
2062 | proto = po->num; |
2063 | addr = NULL; | |
2064 | } else { | |
2065 | err = -EINVAL; | |
2066 | if (msg->msg_namelen < sizeof(struct sockaddr_ll)) | |
2067 | goto out; | |
2068 | if (msg->msg_namelen < (saddr->sll_halen | |
2069 | + offsetof(struct sockaddr_ll, | |
2070 | sll_addr))) | |
2071 | goto out; | |
69e3c75f JB |
2072 | proto = saddr->sll_protocol; |
2073 | addr = saddr->sll_addr; | |
827d9780 BG |
2074 | dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); |
2075 | need_rls_dev = true; | |
69e3c75f JB |
2076 | } |
2077 | ||
69e3c75f JB |
2078 | err = -ENXIO; |
2079 | if (unlikely(dev == NULL)) | |
2080 | goto out; | |
2081 | ||
2082 | reserve = dev->hard_header_len; | |
2083 | ||
2084 | err = -ENETDOWN; | |
2085 | if (unlikely(!(dev->flags & IFF_UP))) | |
2086 | goto out_put; | |
2087 | ||
2088 | size_max = po->tx_ring.frame_size | |
b5dd884e | 2089 | - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); |
69e3c75f JB |
2090 | |
2091 | if (size_max > dev->mtu + reserve) | |
2092 | size_max = dev->mtu + reserve; | |
2093 | ||
2094 | do { | |
2095 | ph = packet_current_frame(po, &po->tx_ring, | |
2096 | TP_STATUS_SEND_REQUEST); | |
2097 | ||
2098 | if (unlikely(ph == NULL)) { | |
2099 | schedule(); | |
2100 | continue; | |
2101 | } | |
2102 | ||
2103 | status = TP_STATUS_SEND_REQUEST; | |
2104 | skb = sock_alloc_send_skb(&po->sk, | |
2105 | LL_ALLOCATED_SPACE(dev) | |
2106 | + sizeof(struct sockaddr_ll), | |
2107 | 0, &err); | |
2108 | ||
2109 | if (unlikely(skb == NULL)) | |
2110 | goto out_status; | |
2111 | ||
2112 | tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, | |
2113 | addr); | |
2114 | ||
2115 | if (unlikely(tp_len < 0)) { | |
2116 | if (po->tp_loss) { | |
2117 | __packet_set_status(po, ph, | |
2118 | TP_STATUS_AVAILABLE); | |
2119 | packet_increment_head(&po->tx_ring); | |
2120 | kfree_skb(skb); | |
2121 | continue; | |
2122 | } else { | |
2123 | status = TP_STATUS_WRONG_FORMAT; | |
2124 | err = tp_len; | |
2125 | goto out_status; | |
2126 | } | |
2127 | } | |
2128 | ||
2129 | skb->destructor = tpacket_destruct_skb; | |
2130 | __packet_set_status(po, ph, TP_STATUS_SENDING); | |
2131 | atomic_inc(&po->tx_ring.pending); | |
2132 | ||
2133 | status = TP_STATUS_SEND_REQUEST; | |
2134 | err = dev_queue_xmit(skb); | |
eb70df13 JP |
2135 | if (unlikely(err > 0)) { |
2136 | err = net_xmit_errno(err); | |
2137 | if (err && __packet_get_status(po, ph) == | |
2138 | TP_STATUS_AVAILABLE) { | |
2139 | /* skb was destructed already */ | |
2140 | skb = NULL; | |
2141 | goto out_status; | |
2142 | } | |
2143 | /* | |
2144 | * skb was dropped but not destructed yet; | |
2145 | * let's treat it like congestion or err < 0 | |
2146 | */ | |
2147 | err = 0; | |
2148 | } | |
69e3c75f JB |
2149 | packet_increment_head(&po->tx_ring); |
2150 | len_sum += tp_len; | |
f64f9e71 JP |
2151 | } while (likely((ph != NULL) || |
2152 | ((!(msg->msg_flags & MSG_DONTWAIT)) && | |
2153 | (atomic_read(&po->tx_ring.pending)))) | |
2154 | ); | |
69e3c75f JB |
2155 | |
2156 | err = len_sum; | |
2157 | goto out_put; | |
2158 | ||
69e3c75f JB |
2159 | out_status: |
2160 | __packet_set_status(po, ph, status); | |
2161 | kfree_skb(skb); | |
2162 | out_put: | |
827d9780 BG |
2163 | if (need_rls_dev) |
2164 | dev_put(dev); | |
69e3c75f JB |
2165 | out: |
2166 | mutex_unlock(&po->pg_vec_lock); | |
2167 | return err; | |
2168 | } | |
69e3c75f | 2169 | |
bfd5f4a3 SS |
2170 | static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, |
2171 | size_t reserve, size_t len, | |
2172 | size_t linear, int noblock, | |
2173 | int *err) | |
2174 | { | |
2175 | struct sk_buff *skb; | |
2176 | ||
2177 | /* Under a page? Don't bother with paged skb. */ | |
2178 | if (prepad + len < PAGE_SIZE || !linear) | |
2179 | linear = len; | |
2180 | ||
2181 | skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, | |
2182 | err); | |
2183 | if (!skb) | |
2184 | return NULL; | |
2185 | ||
2186 | skb_reserve(skb, reserve); | |
2187 | skb_put(skb, linear); | |
2188 | skb->data_len = len - linear; | |
2189 | skb->len += len - linear; | |
2190 | ||
2191 | return skb; | |
2192 | } | |
2193 | ||
69e3c75f | 2194 | static int packet_snd(struct socket *sock, |
1da177e4 LT |
2195 | struct msghdr *msg, size_t len) |
2196 | { | |
2197 | struct sock *sk = sock->sk; | |
40d4e3df | 2198 | struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name; |
1da177e4 LT |
2199 | struct sk_buff *skb; |
2200 | struct net_device *dev; | |
0e11c91e | 2201 | __be16 proto; |
827d9780 | 2202 | bool need_rls_dev = false; |
1da177e4 | 2203 | unsigned char *addr; |
827d9780 | 2204 | int err, reserve = 0; |
bfd5f4a3 SS |
2205 | struct virtio_net_hdr vnet_hdr = { 0 }; |
2206 | int offset = 0; | |
2207 | int vnet_hdr_len; | |
2208 | struct packet_sock *po = pkt_sk(sk); | |
2209 | unsigned short gso_type = 0; | |
1da177e4 LT |
2210 | |
2211 | /* | |
1ce4f28b | 2212 | * Get and verify the address. |
1da177e4 | 2213 | */ |
1ce4f28b | 2214 | |
1da177e4 | 2215 | if (saddr == NULL) { |
827d9780 | 2216 | dev = po->prot_hook.dev; |
1da177e4 LT |
2217 | proto = po->num; |
2218 | addr = NULL; | |
2219 | } else { | |
2220 | err = -EINVAL; | |
2221 | if (msg->msg_namelen < sizeof(struct sockaddr_ll)) | |
2222 | goto out; | |
0fb375fb EB |
2223 | if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) |
2224 | goto out; | |
1da177e4 LT |
2225 | proto = saddr->sll_protocol; |
2226 | addr = saddr->sll_addr; | |
827d9780 BG |
2227 | dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); |
2228 | need_rls_dev = true; | |
1da177e4 LT |
2229 | } |
2230 | ||
1da177e4 LT |
2231 | err = -ENXIO; |
2232 | if (dev == NULL) | |
2233 | goto out_unlock; | |
2234 | if (sock->type == SOCK_RAW) | |
2235 | reserve = dev->hard_header_len; | |
2236 | ||
d5e76b0a DM |
2237 | err = -ENETDOWN; |
2238 | if (!(dev->flags & IFF_UP)) | |
2239 | goto out_unlock; | |
2240 | ||
bfd5f4a3 SS |
2241 | if (po->has_vnet_hdr) { |
2242 | vnet_hdr_len = sizeof(vnet_hdr); | |
2243 | ||
2244 | err = -EINVAL; | |
2245 | if (len < vnet_hdr_len) | |
2246 | goto out_unlock; | |
2247 | ||
2248 | len -= vnet_hdr_len; | |
2249 | ||
2250 | err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov, | |
2251 | vnet_hdr_len); | |
2252 | if (err < 0) | |
2253 | goto out_unlock; | |
2254 | ||
2255 | if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && | |
2256 | (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 > | |
2257 | vnet_hdr.hdr_len)) | |
2258 | vnet_hdr.hdr_len = vnet_hdr.csum_start + | |
2259 | vnet_hdr.csum_offset + 2; | |
2260 | ||
2261 | err = -EINVAL; | |
2262 | if (vnet_hdr.hdr_len > len) | |
2263 | goto out_unlock; | |
2264 | ||
2265 | if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { | |
2266 | switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { | |
2267 | case VIRTIO_NET_HDR_GSO_TCPV4: | |
2268 | gso_type = SKB_GSO_TCPV4; | |
2269 | break; | |
2270 | case VIRTIO_NET_HDR_GSO_TCPV6: | |
2271 | gso_type = SKB_GSO_TCPV6; | |
2272 | break; | |
2273 | case VIRTIO_NET_HDR_GSO_UDP: | |
2274 | gso_type = SKB_GSO_UDP; | |
2275 | break; | |
2276 | default: | |
2277 | goto out_unlock; | |
2278 | } | |
2279 | ||
2280 | if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) | |
2281 | gso_type |= SKB_GSO_TCP_ECN; | |
2282 | ||
2283 | if (vnet_hdr.gso_size == 0) | |
2284 | goto out_unlock; | |
2285 | ||
2286 | } | |
2287 | } | |
2288 | ||
1da177e4 | 2289 | err = -EMSGSIZE; |
57f89bfa | 2290 | if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN)) |
1da177e4 LT |
2291 | goto out_unlock; |
2292 | ||
bfd5f4a3 SS |
2293 | err = -ENOBUFS; |
2294 | skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev), | |
2295 | LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len, | |
2296 | msg->msg_flags & MSG_DONTWAIT, &err); | |
40d4e3df | 2297 | if (skb == NULL) |
1da177e4 LT |
2298 | goto out_unlock; |
2299 | ||
bfd5f4a3 | 2300 | skb_set_network_header(skb, reserve); |
1da177e4 | 2301 | |
0c4e8581 SH |
2302 | err = -EINVAL; |
2303 | if (sock->type == SOCK_DGRAM && | |
bfd5f4a3 | 2304 | (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0) |
0c4e8581 | 2305 | goto out_free; |
1da177e4 LT |
2306 | |
2307 | /* Returns -EFAULT on error */ | |
bfd5f4a3 | 2308 | err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len); |
1da177e4 LT |
2309 | if (err) |
2310 | goto out_free; | |
2244d07b | 2311 | err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); |
ed85b565 RC |
2312 | if (err < 0) |
2313 | goto out_free; | |
1da177e4 | 2314 | |
57f89bfa BG |
2315 | if (!gso_type && (len > dev->mtu + reserve)) { |
2316 | /* Earlier code assumed this would be a VLAN pkt, | |
2317 | * double-check this now that we have the actual | |
2318 | * packet in hand. | |
2319 | */ | |
2320 | struct ethhdr *ehdr; | |
2321 | skb_reset_mac_header(skb); | |
2322 | ehdr = eth_hdr(skb); | |
2323 | if (ehdr->h_proto != htons(ETH_P_8021Q)) { | |
2324 | err = -EMSGSIZE; | |
2325 | goto out_free; | |
2326 | } | |
2327 | } | |
2328 | ||
1da177e4 LT |
2329 | skb->protocol = proto; |
2330 | skb->dev = dev; | |
2331 | skb->priority = sk->sk_priority; | |
2d37a186 | 2332 | skb->mark = sk->sk_mark; |
1da177e4 | 2333 | |
bfd5f4a3 SS |
2334 | if (po->has_vnet_hdr) { |
2335 | if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | |
2336 | if (!skb_partial_csum_set(skb, vnet_hdr.csum_start, | |
2337 | vnet_hdr.csum_offset)) { | |
2338 | err = -EINVAL; | |
2339 | goto out_free; | |
2340 | } | |
2341 | } | |
2342 | ||
2343 | skb_shinfo(skb)->gso_size = vnet_hdr.gso_size; | |
2344 | skb_shinfo(skb)->gso_type = gso_type; | |
2345 | ||
2346 | /* Header must be checked, and gso_segs computed. */ | |
2347 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | |
2348 | skb_shinfo(skb)->gso_segs = 0; | |
2349 | ||
2350 | len += vnet_hdr_len; | |
2351 | } | |
2352 | ||
1da177e4 LT |
2353 | /* |
2354 | * Now send it | |
2355 | */ | |
2356 | ||
2357 | err = dev_queue_xmit(skb); | |
2358 | if (err > 0 && (err = net_xmit_errno(err)) != 0) | |
2359 | goto out_unlock; | |
2360 | ||
827d9780 BG |
2361 | if (need_rls_dev) |
2362 | dev_put(dev); | |
1da177e4 | 2363 | |
40d4e3df | 2364 | return len; |
1da177e4 LT |
2365 | |
2366 | out_free: | |
2367 | kfree_skb(skb); | |
2368 | out_unlock: | |
827d9780 | 2369 | if (dev && need_rls_dev) |
1da177e4 LT |
2370 | dev_put(dev); |
2371 | out: | |
2372 | return err; | |
2373 | } | |
2374 | ||
69e3c75f JB |
2375 | static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, |
2376 | struct msghdr *msg, size_t len) | |
2377 | { | |
69e3c75f JB |
2378 | struct sock *sk = sock->sk; |
2379 | struct packet_sock *po = pkt_sk(sk); | |
2380 | if (po->tx_ring.pg_vec) | |
2381 | return tpacket_snd(po, msg); | |
2382 | else | |
69e3c75f JB |
2383 | return packet_snd(sock, msg, len); |
2384 | } | |
2385 | ||
1da177e4 LT |
2386 | /* |
2387 | * Close a PACKET socket. This is fairly simple. We immediately go | |
2388 | * to 'closed' state and remove our protocol entry in the device list. | |
2389 | */ | |
2390 | ||
2391 | static int packet_release(struct socket *sock) | |
2392 | { | |
2393 | struct sock *sk = sock->sk; | |
2394 | struct packet_sock *po; | |
d12d01d6 | 2395 | struct net *net; |
f6fb8f10 | 2396 | union tpacket_req_u req_u; |
1da177e4 LT |
2397 | |
2398 | if (!sk) | |
2399 | return 0; | |
2400 | ||
3b1e0a65 | 2401 | net = sock_net(sk); |
1da177e4 LT |
2402 | po = pkt_sk(sk); |
2403 | ||
808f5114 | 2404 | spin_lock_bh(&net->packet.sklist_lock); |
2405 | sk_del_node_init_rcu(sk); | |
920de804 | 2406 | sock_prot_inuse_add(net, sk->sk_prot, -1); |
808f5114 | 2407 | spin_unlock_bh(&net->packet.sklist_lock); |
1da177e4 | 2408 | |
808f5114 | 2409 | spin_lock(&po->bind_lock); |
ce06b03e | 2410 | unregister_prot_hook(sk, false); |
160ff18a BG |
2411 | if (po->prot_hook.dev) { |
2412 | dev_put(po->prot_hook.dev); | |
2413 | po->prot_hook.dev = NULL; | |
2414 | } | |
808f5114 | 2415 | spin_unlock(&po->bind_lock); |
1da177e4 | 2416 | |
1da177e4 | 2417 | packet_flush_mclist(sk); |
1da177e4 | 2418 | |
f6fb8f10 | 2419 | memset(&req_u, 0, sizeof(req_u)); |
69e3c75f JB |
2420 | |
2421 | if (po->rx_ring.pg_vec) | |
f6fb8f10 | 2422 | packet_set_ring(sk, &req_u, 1, 0); |
69e3c75f JB |
2423 | |
2424 | if (po->tx_ring.pg_vec) | |
f6fb8f10 | 2425 | packet_set_ring(sk, &req_u, 1, 1); |
1da177e4 | 2426 | |
dc99f600 DM |
2427 | fanout_release(sk); |
2428 | ||
808f5114 | 2429 | synchronize_net(); |
1da177e4 LT |
2430 | /* |
2431 | * Now the socket is dead. No more input will appear. | |
2432 | */ | |
1da177e4 LT |
2433 | sock_orphan(sk); |
2434 | sock->sk = NULL; | |
2435 | ||
2436 | /* Purge queues */ | |
2437 | ||
2438 | skb_queue_purge(&sk->sk_receive_queue); | |
17ab56a2 | 2439 | sk_refcnt_debug_release(sk); |
1da177e4 LT |
2440 | |
2441 | sock_put(sk); | |
2442 | return 0; | |
2443 | } | |
2444 | ||
2445 | /* | |
2446 | * Attach a packet hook. | |
2447 | */ | |
2448 | ||
0e11c91e | 2449 | static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol) |
1da177e4 LT |
2450 | { |
2451 | struct packet_sock *po = pkt_sk(sk); | |
dc99f600 DM |
2452 | |
2453 | if (po->fanout) | |
2454 | return -EINVAL; | |
1da177e4 LT |
2455 | |
2456 | lock_sock(sk); | |
2457 | ||
2458 | spin_lock(&po->bind_lock); | |
ce06b03e | 2459 | unregister_prot_hook(sk, true); |
1da177e4 LT |
2460 | po->num = protocol; |
2461 | po->prot_hook.type = protocol; | |
160ff18a BG |
2462 | if (po->prot_hook.dev) |
2463 | dev_put(po->prot_hook.dev); | |
1da177e4 LT |
2464 | po->prot_hook.dev = dev; |
2465 | ||
2466 | po->ifindex = dev ? dev->ifindex : 0; | |
2467 | ||
2468 | if (protocol == 0) | |
2469 | goto out_unlock; | |
2470 | ||
be85d4ad | 2471 | if (!dev || (dev->flags & IFF_UP)) { |
ce06b03e | 2472 | register_prot_hook(sk); |
be85d4ad UT |
2473 | } else { |
2474 | sk->sk_err = ENETDOWN; | |
2475 | if (!sock_flag(sk, SOCK_DEAD)) | |
2476 | sk->sk_error_report(sk); | |
1da177e4 LT |
2477 | } |
2478 | ||
2479 | out_unlock: | |
2480 | spin_unlock(&po->bind_lock); | |
2481 | release_sock(sk); | |
2482 | return 0; | |
2483 | } | |
2484 | ||
2485 | /* | |
2486 | * Bind a packet socket to a device | |
2487 | */ | |
2488 | ||
40d4e3df ED |
2489 | static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, |
2490 | int addr_len) | |
1da177e4 | 2491 | { |
40d4e3df | 2492 | struct sock *sk = sock->sk; |
1da177e4 LT |
2493 | char name[15]; |
2494 | struct net_device *dev; | |
2495 | int err = -ENODEV; | |
1ce4f28b | 2496 | |
1da177e4 LT |
2497 | /* |
2498 | * Check legality | |
2499 | */ | |
1ce4f28b | 2500 | |
8ae55f04 | 2501 | if (addr_len != sizeof(struct sockaddr)) |
1da177e4 | 2502 | return -EINVAL; |
40d4e3df | 2503 | strlcpy(name, uaddr->sa_data, sizeof(name)); |
1da177e4 | 2504 | |
3b1e0a65 | 2505 | dev = dev_get_by_name(sock_net(sk), name); |
160ff18a | 2506 | if (dev) |
1da177e4 | 2507 | err = packet_do_bind(sk, dev, pkt_sk(sk)->num); |
1da177e4 LT |
2508 | return err; |
2509 | } | |
1da177e4 LT |
2510 | |
2511 | static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |
2512 | { | |
40d4e3df ED |
2513 | struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; |
2514 | struct sock *sk = sock->sk; | |
1da177e4 LT |
2515 | struct net_device *dev = NULL; |
2516 | int err; | |
2517 | ||
2518 | ||
2519 | /* | |
2520 | * Check legality | |
2521 | */ | |
1ce4f28b | 2522 | |
1da177e4 LT |
2523 | if (addr_len < sizeof(struct sockaddr_ll)) |
2524 | return -EINVAL; | |
2525 | if (sll->sll_family != AF_PACKET) | |
2526 | return -EINVAL; | |
2527 | ||
2528 | if (sll->sll_ifindex) { | |
2529 | err = -ENODEV; | |
3b1e0a65 | 2530 | dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex); |
1da177e4 LT |
2531 | if (dev == NULL) |
2532 | goto out; | |
2533 | } | |
2534 | err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num); | |
1da177e4 LT |
2535 | |
2536 | out: | |
2537 | return err; | |
2538 | } | |
2539 | ||
2540 | static struct proto packet_proto = { | |
2541 | .name = "PACKET", | |
2542 | .owner = THIS_MODULE, | |
2543 | .obj_size = sizeof(struct packet_sock), | |
2544 | }; | |
2545 | ||
2546 | /* | |
1ce4f28b | 2547 | * Create a packet of type SOCK_PACKET. |
1da177e4 LT |
2548 | */ |
2549 | ||
3f378b68 EP |
2550 | static int packet_create(struct net *net, struct socket *sock, int protocol, |
2551 | int kern) | |
1da177e4 LT |
2552 | { |
2553 | struct sock *sk; | |
2554 | struct packet_sock *po; | |
0e11c91e | 2555 | __be16 proto = (__force __be16)protocol; /* weird, but documented */ |
1da177e4 LT |
2556 | int err; |
2557 | ||
2558 | if (!capable(CAP_NET_RAW)) | |
2559 | return -EPERM; | |
be02097c DM |
2560 | if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && |
2561 | sock->type != SOCK_PACKET) | |
1da177e4 LT |
2562 | return -ESOCKTNOSUPPORT; |
2563 | ||
2564 | sock->state = SS_UNCONNECTED; | |
2565 | ||
2566 | err = -ENOBUFS; | |
6257ff21 | 2567 | sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto); |
1da177e4 LT |
2568 | if (sk == NULL) |
2569 | goto out; | |
2570 | ||
2571 | sock->ops = &packet_ops; | |
1da177e4 LT |
2572 | if (sock->type == SOCK_PACKET) |
2573 | sock->ops = &packet_ops_spkt; | |
be02097c | 2574 | |
1da177e4 LT |
2575 | sock_init_data(sock, sk); |
2576 | ||
2577 | po = pkt_sk(sk); | |
2578 | sk->sk_family = PF_PACKET; | |
0e11c91e | 2579 | po->num = proto; |
1da177e4 LT |
2580 | |
2581 | sk->sk_destruct = packet_sock_destruct; | |
17ab56a2 | 2582 | sk_refcnt_debug_inc(sk); |
1da177e4 LT |
2583 | |
2584 | /* | |
2585 | * Attach a protocol block | |
2586 | */ | |
2587 | ||
2588 | spin_lock_init(&po->bind_lock); | |
905db440 | 2589 | mutex_init(&po->pg_vec_lock); |
1da177e4 | 2590 | po->prot_hook.func = packet_rcv; |
be02097c | 2591 | |
1da177e4 LT |
2592 | if (sock->type == SOCK_PACKET) |
2593 | po->prot_hook.func = packet_rcv_spkt; | |
be02097c | 2594 | |
1da177e4 LT |
2595 | po->prot_hook.af_packet_priv = sk; |
2596 | ||
0e11c91e AV |
2597 | if (proto) { |
2598 | po->prot_hook.type = proto; | |
ce06b03e | 2599 | register_prot_hook(sk); |
1da177e4 LT |
2600 | } |
2601 | ||
808f5114 | 2602 | spin_lock_bh(&net->packet.sklist_lock); |
2603 | sk_add_node_rcu(sk, &net->packet.sklist); | |
3680453c | 2604 | sock_prot_inuse_add(net, &packet_proto, 1); |
808f5114 | 2605 | spin_unlock_bh(&net->packet.sklist_lock); |
2606 | ||
40d4e3df | 2607 | return 0; |
1da177e4 LT |
2608 | out: |
2609 | return err; | |
2610 | } | |
2611 | ||
ed85b565 RC |
2612 | static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len) |
2613 | { | |
2614 | struct sock_exterr_skb *serr; | |
2615 | struct sk_buff *skb, *skb2; | |
2616 | int copied, err; | |
2617 | ||
2618 | err = -EAGAIN; | |
2619 | skb = skb_dequeue(&sk->sk_error_queue); | |
2620 | if (skb == NULL) | |
2621 | goto out; | |
2622 | ||
2623 | copied = skb->len; | |
2624 | if (copied > len) { | |
2625 | msg->msg_flags |= MSG_TRUNC; | |
2626 | copied = len; | |
2627 | } | |
2628 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | |
2629 | if (err) | |
2630 | goto out_free_skb; | |
2631 | ||
2632 | sock_recv_timestamp(msg, sk, skb); | |
2633 | ||
2634 | serr = SKB_EXT_ERR(skb); | |
2635 | put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP, | |
2636 | sizeof(serr->ee), &serr->ee); | |
2637 | ||
2638 | msg->msg_flags |= MSG_ERRQUEUE; | |
2639 | err = copied; | |
2640 | ||
2641 | /* Reset and regenerate socket error */ | |
2642 | spin_lock_bh(&sk->sk_error_queue.lock); | |
2643 | sk->sk_err = 0; | |
2644 | if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { | |
2645 | sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; | |
2646 | spin_unlock_bh(&sk->sk_error_queue.lock); | |
2647 | sk->sk_error_report(sk); | |
2648 | } else | |
2649 | spin_unlock_bh(&sk->sk_error_queue.lock); | |
2650 | ||
2651 | out_free_skb: | |
2652 | kfree_skb(skb); | |
2653 | out: | |
2654 | return err; | |
2655 | } | |
2656 | ||
1da177e4 LT |
2657 | /* |
2658 | * Pull a packet from our receive queue and hand it to the user. | |
2659 | * If necessary we block. | |
2660 | */ | |
2661 | ||
2662 | static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |
2663 | struct msghdr *msg, size_t len, int flags) | |
2664 | { | |
2665 | struct sock *sk = sock->sk; | |
2666 | struct sk_buff *skb; | |
2667 | int copied, err; | |
0fb375fb | 2668 | struct sockaddr_ll *sll; |
bfd5f4a3 | 2669 | int vnet_hdr_len = 0; |
1da177e4 LT |
2670 | |
2671 | err = -EINVAL; | |
ed85b565 | 2672 | if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) |
1da177e4 LT |
2673 | goto out; |
2674 | ||
2675 | #if 0 | |
2676 | /* What error should we return now? EUNATTACH? */ | |
2677 | if (pkt_sk(sk)->ifindex < 0) | |
2678 | return -ENODEV; | |
2679 | #endif | |
2680 | ||
ed85b565 RC |
2681 | if (flags & MSG_ERRQUEUE) { |
2682 | err = packet_recv_error(sk, msg, len); | |
2683 | goto out; | |
2684 | } | |
2685 | ||
1da177e4 LT |
2686 | /* |
2687 | * Call the generic datagram receiver. This handles all sorts | |
2688 | * of horrible races and re-entrancy so we can forget about it | |
2689 | * in the protocol layers. | |
2690 | * | |
2691 | * Now it will return ENETDOWN, if device have just gone down, | |
2692 | * but then it will block. | |
2693 | */ | |
2694 | ||
40d4e3df | 2695 | skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); |
1da177e4 LT |
2696 | |
2697 | /* | |
1ce4f28b | 2698 | * An error occurred so return it. Because skb_recv_datagram() |
1da177e4 LT |
2699 | * handles the blocking we don't see and worry about blocking |
2700 | * retries. | |
2701 | */ | |
2702 | ||
8ae55f04 | 2703 | if (skb == NULL) |
1da177e4 LT |
2704 | goto out; |
2705 | ||
bfd5f4a3 SS |
2706 | if (pkt_sk(sk)->has_vnet_hdr) { |
2707 | struct virtio_net_hdr vnet_hdr = { 0 }; | |
2708 | ||
2709 | err = -EINVAL; | |
2710 | vnet_hdr_len = sizeof(vnet_hdr); | |
1f18b717 | 2711 | if (len < vnet_hdr_len) |
bfd5f4a3 SS |
2712 | goto out_free; |
2713 | ||
1f18b717 MK |
2714 | len -= vnet_hdr_len; |
2715 | ||
bfd5f4a3 SS |
2716 | if (skb_is_gso(skb)) { |
2717 | struct skb_shared_info *sinfo = skb_shinfo(skb); | |
2718 | ||
2719 | /* This is a hint as to how much should be linear. */ | |
2720 | vnet_hdr.hdr_len = skb_headlen(skb); | |
2721 | vnet_hdr.gso_size = sinfo->gso_size; | |
2722 | if (sinfo->gso_type & SKB_GSO_TCPV4) | |
2723 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | |
2724 | else if (sinfo->gso_type & SKB_GSO_TCPV6) | |
2725 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | |
2726 | else if (sinfo->gso_type & SKB_GSO_UDP) | |
2727 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; | |
2728 | else if (sinfo->gso_type & SKB_GSO_FCOE) | |
2729 | goto out_free; | |
2730 | else | |
2731 | BUG(); | |
2732 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) | |
2733 | vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; | |
2734 | } else | |
2735 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; | |
2736 | ||
2737 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
2738 | vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; | |
55508d60 | 2739 | vnet_hdr.csum_start = skb_checksum_start_offset(skb); |
bfd5f4a3 | 2740 | vnet_hdr.csum_offset = skb->csum_offset; |
10a8d94a JW |
2741 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
2742 | vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; | |
bfd5f4a3 SS |
2743 | } /* else everything is zero */ |
2744 | ||
2745 | err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr, | |
2746 | vnet_hdr_len); | |
2747 | if (err < 0) | |
2748 | goto out_free; | |
2749 | } | |
2750 | ||
0fb375fb EB |
2751 | /* |
2752 | * If the address length field is there to be filled in, we fill | |
2753 | * it in now. | |
2754 | */ | |
2755 | ||
ffbc6111 | 2756 | sll = &PACKET_SKB_CB(skb)->sa.ll; |
0fb375fb EB |
2757 | if (sock->type == SOCK_PACKET) |
2758 | msg->msg_namelen = sizeof(struct sockaddr_pkt); | |
2759 | else | |
2760 | msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr); | |
2761 | ||
1da177e4 LT |
2762 | /* |
2763 | * You lose any data beyond the buffer you gave. If it worries a | |
2764 | * user program they can ask the device for its MTU anyway. | |
2765 | */ | |
2766 | ||
2767 | copied = skb->len; | |
40d4e3df ED |
2768 | if (copied > len) { |
2769 | copied = len; | |
2770 | msg->msg_flags |= MSG_TRUNC; | |
1da177e4 LT |
2771 | } |
2772 | ||
2773 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | |
2774 | if (err) | |
2775 | goto out_free; | |
2776 | ||
3b885787 | 2777 | sock_recv_ts_and_drops(msg, sk, skb); |
1da177e4 LT |
2778 | |
2779 | if (msg->msg_name) | |
ffbc6111 HX |
2780 | memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, |
2781 | msg->msg_namelen); | |
1da177e4 | 2782 | |
8dc41944 | 2783 | if (pkt_sk(sk)->auxdata) { |
ffbc6111 HX |
2784 | struct tpacket_auxdata aux; |
2785 | ||
2786 | aux.tp_status = TP_STATUS_USER; | |
2787 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
2788 | aux.tp_status |= TP_STATUS_CSUMNOTREADY; | |
2789 | aux.tp_len = PACKET_SKB_CB(skb)->origlen; | |
2790 | aux.tp_snaplen = skb->len; | |
2791 | aux.tp_mac = 0; | |
bbe735e4 | 2792 | aux.tp_net = skb_network_offset(skb); |
a3bcc23e BG |
2793 | if (vlan_tx_tag_present(skb)) { |
2794 | aux.tp_vlan_tci = vlan_tx_tag_get(skb); | |
2795 | aux.tp_status |= TP_STATUS_VLAN_VALID; | |
2796 | } else { | |
2797 | aux.tp_vlan_tci = 0; | |
2798 | } | |
13fcb7bd | 2799 | aux.tp_padding = 0; |
ffbc6111 | 2800 | put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); |
8dc41944 HX |
2801 | } |
2802 | ||
1da177e4 LT |
2803 | /* |
2804 | * Free or return the buffer as appropriate. Again this | |
2805 | * hides all the races and re-entrancy issues from us. | |
2806 | */ | |
bfd5f4a3 | 2807 | err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); |
1da177e4 LT |
2808 | |
2809 | out_free: | |
2810 | skb_free_datagram(sk, skb); | |
2811 | out: | |
2812 | return err; | |
2813 | } | |
2814 | ||
1da177e4 LT |
2815 | static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, |
2816 | int *uaddr_len, int peer) | |
2817 | { | |
2818 | struct net_device *dev; | |
2819 | struct sock *sk = sock->sk; | |
2820 | ||
2821 | if (peer) | |
2822 | return -EOPNOTSUPP; | |
2823 | ||
2824 | uaddr->sa_family = AF_PACKET; | |
654d1f8a ED |
2825 | rcu_read_lock(); |
2826 | dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); | |
2827 | if (dev) | |
67286640 | 2828 | strncpy(uaddr->sa_data, dev->name, 14); |
654d1f8a | 2829 | else |
1da177e4 | 2830 | memset(uaddr->sa_data, 0, 14); |
654d1f8a | 2831 | rcu_read_unlock(); |
1da177e4 LT |
2832 | *uaddr_len = sizeof(*uaddr); |
2833 | ||
2834 | return 0; | |
2835 | } | |
1da177e4 LT |
2836 | |
2837 | static int packet_getname(struct socket *sock, struct sockaddr *uaddr, | |
2838 | int *uaddr_len, int peer) | |
2839 | { | |
2840 | struct net_device *dev; | |
2841 | struct sock *sk = sock->sk; | |
2842 | struct packet_sock *po = pkt_sk(sk); | |
13cfa97b | 2843 | DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); |
1da177e4 LT |
2844 | |
2845 | if (peer) | |
2846 | return -EOPNOTSUPP; | |
2847 | ||
2848 | sll->sll_family = AF_PACKET; | |
2849 | sll->sll_ifindex = po->ifindex; | |
2850 | sll->sll_protocol = po->num; | |
67286640 | 2851 | sll->sll_pkttype = 0; |
654d1f8a ED |
2852 | rcu_read_lock(); |
2853 | dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); | |
1da177e4 LT |
2854 | if (dev) { |
2855 | sll->sll_hatype = dev->type; | |
2856 | sll->sll_halen = dev->addr_len; | |
2857 | memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); | |
1da177e4 LT |
2858 | } else { |
2859 | sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ | |
2860 | sll->sll_halen = 0; | |
2861 | } | |
654d1f8a | 2862 | rcu_read_unlock(); |
0fb375fb | 2863 | *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; |
1da177e4 LT |
2864 | |
2865 | return 0; | |
2866 | } | |
2867 | ||
2aeb0b88 WC |
2868 | static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, |
2869 | int what) | |
1da177e4 LT |
2870 | { |
2871 | switch (i->type) { | |
2872 | case PACKET_MR_MULTICAST: | |
1162563f JP |
2873 | if (i->alen != dev->addr_len) |
2874 | return -EINVAL; | |
1da177e4 | 2875 | if (what > 0) |
22bedad3 | 2876 | return dev_mc_add(dev, i->addr); |
1da177e4 | 2877 | else |
22bedad3 | 2878 | return dev_mc_del(dev, i->addr); |
1da177e4 LT |
2879 | break; |
2880 | case PACKET_MR_PROMISC: | |
2aeb0b88 | 2881 | return dev_set_promiscuity(dev, what); |
1da177e4 LT |
2882 | break; |
2883 | case PACKET_MR_ALLMULTI: | |
2aeb0b88 | 2884 | return dev_set_allmulti(dev, what); |
1da177e4 | 2885 | break; |
d95ed927 | 2886 | case PACKET_MR_UNICAST: |
1162563f JP |
2887 | if (i->alen != dev->addr_len) |
2888 | return -EINVAL; | |
d95ed927 | 2889 | if (what > 0) |
a748ee24 | 2890 | return dev_uc_add(dev, i->addr); |
d95ed927 | 2891 | else |
a748ee24 | 2892 | return dev_uc_del(dev, i->addr); |
d95ed927 | 2893 | break; |
40d4e3df ED |
2894 | default: |
2895 | break; | |
1da177e4 | 2896 | } |
2aeb0b88 | 2897 | return 0; |
1da177e4 LT |
2898 | } |
2899 | ||
2900 | static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) | |
2901 | { | |
40d4e3df | 2902 | for ( ; i; i = i->next) { |
1da177e4 LT |
2903 | if (i->ifindex == dev->ifindex) |
2904 | packet_dev_mc(dev, i, what); | |
2905 | } | |
2906 | } | |
2907 | ||
0fb375fb | 2908 | static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) |
1da177e4 LT |
2909 | { |
2910 | struct packet_sock *po = pkt_sk(sk); | |
2911 | struct packet_mclist *ml, *i; | |
2912 | struct net_device *dev; | |
2913 | int err; | |
2914 | ||
2915 | rtnl_lock(); | |
2916 | ||
2917 | err = -ENODEV; | |
3b1e0a65 | 2918 | dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); |
1da177e4 LT |
2919 | if (!dev) |
2920 | goto done; | |
2921 | ||
2922 | err = -EINVAL; | |
1162563f | 2923 | if (mreq->mr_alen > dev->addr_len) |
1da177e4 LT |
2924 | goto done; |
2925 | ||
2926 | err = -ENOBUFS; | |
8b3a7005 | 2927 | i = kmalloc(sizeof(*i), GFP_KERNEL); |
1da177e4 LT |
2928 | if (i == NULL) |
2929 | goto done; | |
2930 | ||
2931 | err = 0; | |
2932 | for (ml = po->mclist; ml; ml = ml->next) { | |
2933 | if (ml->ifindex == mreq->mr_ifindex && | |
2934 | ml->type == mreq->mr_type && | |
2935 | ml->alen == mreq->mr_alen && | |
2936 | memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { | |
2937 | ml->count++; | |
2938 | /* Free the new element ... */ | |
2939 | kfree(i); | |
2940 | goto done; | |
2941 | } | |
2942 | } | |
2943 | ||
2944 | i->type = mreq->mr_type; | |
2945 | i->ifindex = mreq->mr_ifindex; | |
2946 | i->alen = mreq->mr_alen; | |
2947 | memcpy(i->addr, mreq->mr_address, i->alen); | |
2948 | i->count = 1; | |
2949 | i->next = po->mclist; | |
2950 | po->mclist = i; | |
2aeb0b88 WC |
2951 | err = packet_dev_mc(dev, i, 1); |
2952 | if (err) { | |
2953 | po->mclist = i->next; | |
2954 | kfree(i); | |
2955 | } | |
1da177e4 LT |
2956 | |
2957 | done: | |
2958 | rtnl_unlock(); | |
2959 | return err; | |
2960 | } | |
2961 | ||
0fb375fb | 2962 | static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) |
1da177e4 LT |
2963 | { |
2964 | struct packet_mclist *ml, **mlp; | |
2965 | ||
2966 | rtnl_lock(); | |
2967 | ||
2968 | for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { | |
2969 | if (ml->ifindex == mreq->mr_ifindex && | |
2970 | ml->type == mreq->mr_type && | |
2971 | ml->alen == mreq->mr_alen && | |
2972 | memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { | |
2973 | if (--ml->count == 0) { | |
2974 | struct net_device *dev; | |
2975 | *mlp = ml->next; | |
ad959e76 ED |
2976 | dev = __dev_get_by_index(sock_net(sk), ml->ifindex); |
2977 | if (dev) | |
1da177e4 | 2978 | packet_dev_mc(dev, ml, -1); |
1da177e4 LT |
2979 | kfree(ml); |
2980 | } | |
2981 | rtnl_unlock(); | |
2982 | return 0; | |
2983 | } | |
2984 | } | |
2985 | rtnl_unlock(); | |
2986 | return -EADDRNOTAVAIL; | |
2987 | } | |
2988 | ||
2989 | static void packet_flush_mclist(struct sock *sk) | |
2990 | { | |
2991 | struct packet_sock *po = pkt_sk(sk); | |
2992 | struct packet_mclist *ml; | |
2993 | ||
2994 | if (!po->mclist) | |
2995 | return; | |
2996 | ||
2997 | rtnl_lock(); | |
2998 | while ((ml = po->mclist) != NULL) { | |
2999 | struct net_device *dev; | |
3000 | ||
3001 | po->mclist = ml->next; | |
ad959e76 ED |
3002 | dev = __dev_get_by_index(sock_net(sk), ml->ifindex); |
3003 | if (dev != NULL) | |
1da177e4 | 3004 | packet_dev_mc(dev, ml, -1); |
1da177e4 LT |
3005 | kfree(ml); |
3006 | } | |
3007 | rtnl_unlock(); | |
3008 | } | |
1da177e4 LT |
3009 | |
3010 | static int | |
b7058842 | 3011 | packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) |
1da177e4 LT |
3012 | { |
3013 | struct sock *sk = sock->sk; | |
8dc41944 | 3014 | struct packet_sock *po = pkt_sk(sk); |
1da177e4 LT |
3015 | int ret; |
3016 | ||
3017 | if (level != SOL_PACKET) | |
3018 | return -ENOPROTOOPT; | |
3019 | ||
69e3c75f | 3020 | switch (optname) { |
1ce4f28b | 3021 | case PACKET_ADD_MEMBERSHIP: |
1da177e4 LT |
3022 | case PACKET_DROP_MEMBERSHIP: |
3023 | { | |
0fb375fb EB |
3024 | struct packet_mreq_max mreq; |
3025 | int len = optlen; | |
3026 | memset(&mreq, 0, sizeof(mreq)); | |
3027 | if (len < sizeof(struct packet_mreq)) | |
1da177e4 | 3028 | return -EINVAL; |
0fb375fb EB |
3029 | if (len > sizeof(mreq)) |
3030 | len = sizeof(mreq); | |
40d4e3df | 3031 | if (copy_from_user(&mreq, optval, len)) |
1da177e4 | 3032 | return -EFAULT; |
0fb375fb EB |
3033 | if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) |
3034 | return -EINVAL; | |
1da177e4 LT |
3035 | if (optname == PACKET_ADD_MEMBERSHIP) |
3036 | ret = packet_mc_add(sk, &mreq); | |
3037 | else | |
3038 | ret = packet_mc_drop(sk, &mreq); | |
3039 | return ret; | |
3040 | } | |
a2efcfa0 | 3041 | |
1da177e4 | 3042 | case PACKET_RX_RING: |
69e3c75f | 3043 | case PACKET_TX_RING: |
1da177e4 | 3044 | { |
f6fb8f10 | 3045 | union tpacket_req_u req_u; |
3046 | int len; | |
1da177e4 | 3047 | |
f6fb8f10 | 3048 | switch (po->tp_version) { |
3049 | case TPACKET_V1: | |
3050 | case TPACKET_V2: | |
3051 | len = sizeof(req_u.req); | |
3052 | break; | |
3053 | case TPACKET_V3: | |
3054 | default: | |
3055 | len = sizeof(req_u.req3); | |
3056 | break; | |
3057 | } | |
3058 | if (optlen < len) | |
1da177e4 | 3059 | return -EINVAL; |
bfd5f4a3 SS |
3060 | if (pkt_sk(sk)->has_vnet_hdr) |
3061 | return -EINVAL; | |
f6fb8f10 | 3062 | if (copy_from_user(&req_u.req, optval, len)) |
1da177e4 | 3063 | return -EFAULT; |
f6fb8f10 | 3064 | return packet_set_ring(sk, &req_u, 0, |
3065 | optname == PACKET_TX_RING); | |
1da177e4 LT |
3066 | } |
3067 | case PACKET_COPY_THRESH: | |
3068 | { | |
3069 | int val; | |
3070 | ||
40d4e3df | 3071 | if (optlen != sizeof(val)) |
1da177e4 | 3072 | return -EINVAL; |
40d4e3df | 3073 | if (copy_from_user(&val, optval, sizeof(val))) |
1da177e4 LT |
3074 | return -EFAULT; |
3075 | ||
3076 | pkt_sk(sk)->copy_thresh = val; | |
3077 | return 0; | |
3078 | } | |
bbd6ef87 PM |
3079 | case PACKET_VERSION: |
3080 | { | |
3081 | int val; | |
3082 | ||
3083 | if (optlen != sizeof(val)) | |
3084 | return -EINVAL; | |
69e3c75f | 3085 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) |
bbd6ef87 PM |
3086 | return -EBUSY; |
3087 | if (copy_from_user(&val, optval, sizeof(val))) | |
3088 | return -EFAULT; | |
3089 | switch (val) { | |
3090 | case TPACKET_V1: | |
3091 | case TPACKET_V2: | |
f6fb8f10 | 3092 | case TPACKET_V3: |
bbd6ef87 PM |
3093 | po->tp_version = val; |
3094 | return 0; | |
3095 | default: | |
3096 | return -EINVAL; | |
3097 | } | |
3098 | } | |
8913336a PM |
3099 | case PACKET_RESERVE: |
3100 | { | |
3101 | unsigned int val; | |
3102 | ||
3103 | if (optlen != sizeof(val)) | |
3104 | return -EINVAL; | |
69e3c75f | 3105 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) |
8913336a PM |
3106 | return -EBUSY; |
3107 | if (copy_from_user(&val, optval, sizeof(val))) | |
3108 | return -EFAULT; | |
3109 | po->tp_reserve = val; | |
3110 | return 0; | |
3111 | } | |
69e3c75f JB |
3112 | case PACKET_LOSS: |
3113 | { | |
3114 | unsigned int val; | |
3115 | ||
3116 | if (optlen != sizeof(val)) | |
3117 | return -EINVAL; | |
3118 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) | |
3119 | return -EBUSY; | |
3120 | if (copy_from_user(&val, optval, sizeof(val))) | |
3121 | return -EFAULT; | |
3122 | po->tp_loss = !!val; | |
3123 | return 0; | |
3124 | } | |
8dc41944 HX |
3125 | case PACKET_AUXDATA: |
3126 | { | |
3127 | int val; | |
3128 | ||
3129 | if (optlen < sizeof(val)) | |
3130 | return -EINVAL; | |
3131 | if (copy_from_user(&val, optval, sizeof(val))) | |
3132 | return -EFAULT; | |
3133 | ||
3134 | po->auxdata = !!val; | |
3135 | return 0; | |
3136 | } | |
80feaacb PWJ |
3137 | case PACKET_ORIGDEV: |
3138 | { | |
3139 | int val; | |
3140 | ||
3141 | if (optlen < sizeof(val)) | |
3142 | return -EINVAL; | |
3143 | if (copy_from_user(&val, optval, sizeof(val))) | |
3144 | return -EFAULT; | |
3145 | ||
3146 | po->origdev = !!val; | |
3147 | return 0; | |
3148 | } | |
bfd5f4a3 SS |
3149 | case PACKET_VNET_HDR: |
3150 | { | |
3151 | int val; | |
3152 | ||
3153 | if (sock->type != SOCK_RAW) | |
3154 | return -EINVAL; | |
3155 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) | |
3156 | return -EBUSY; | |
3157 | if (optlen < sizeof(val)) | |
3158 | return -EINVAL; | |
3159 | if (copy_from_user(&val, optval, sizeof(val))) | |
3160 | return -EFAULT; | |
3161 | ||
3162 | po->has_vnet_hdr = !!val; | |
3163 | return 0; | |
3164 | } | |
614f60fa SM |
3165 | case PACKET_TIMESTAMP: |
3166 | { | |
3167 | int val; | |
3168 | ||
3169 | if (optlen != sizeof(val)) | |
3170 | return -EINVAL; | |
3171 | if (copy_from_user(&val, optval, sizeof(val))) | |
3172 | return -EFAULT; | |
3173 | ||
3174 | po->tp_tstamp = val; | |
3175 | return 0; | |
3176 | } | |
dc99f600 DM |
3177 | case PACKET_FANOUT: |
3178 | { | |
3179 | int val; | |
3180 | ||
3181 | if (optlen != sizeof(val)) | |
3182 | return -EINVAL; | |
3183 | if (copy_from_user(&val, optval, sizeof(val))) | |
3184 | return -EFAULT; | |
3185 | ||
3186 | return fanout_add(sk, val & 0xffff, val >> 16); | |
3187 | } | |
1da177e4 LT |
3188 | default: |
3189 | return -ENOPROTOOPT; | |
3190 | } | |
3191 | } | |
3192 | ||
3193 | static int packet_getsockopt(struct socket *sock, int level, int optname, | |
3194 | char __user *optval, int __user *optlen) | |
3195 | { | |
3196 | int len; | |
8dc41944 | 3197 | int val; |
1da177e4 LT |
3198 | struct sock *sk = sock->sk; |
3199 | struct packet_sock *po = pkt_sk(sk); | |
8dc41944 HX |
3200 | void *data; |
3201 | struct tpacket_stats st; | |
f6fb8f10 | 3202 | union tpacket_stats_u st_u; |
1da177e4 LT |
3203 | |
3204 | if (level != SOL_PACKET) | |
3205 | return -ENOPROTOOPT; | |
3206 | ||
8ae55f04 KK |
3207 | if (get_user(len, optlen)) |
3208 | return -EFAULT; | |
1da177e4 LT |
3209 | |
3210 | if (len < 0) | |
3211 | return -EINVAL; | |
1ce4f28b | 3212 | |
69e3c75f | 3213 | switch (optname) { |
1da177e4 | 3214 | case PACKET_STATISTICS: |
f6fb8f10 | 3215 | if (po->tp_version == TPACKET_V3) { |
3216 | len = sizeof(struct tpacket_stats_v3); | |
3217 | } else { | |
3218 | if (len > sizeof(struct tpacket_stats)) | |
3219 | len = sizeof(struct tpacket_stats); | |
3220 | } | |
1da177e4 | 3221 | spin_lock_bh(&sk->sk_receive_queue.lock); |
f6fb8f10 | 3222 | if (po->tp_version == TPACKET_V3) { |
3223 | memcpy(&st_u.stats3, &po->stats, | |
3224 | sizeof(struct tpacket_stats)); | |
3225 | st_u.stats3.tp_freeze_q_cnt = | |
3226 | po->stats_u.stats3.tp_freeze_q_cnt; | |
3227 | st_u.stats3.tp_packets += po->stats.tp_drops; | |
3228 | data = &st_u.stats3; | |
3229 | } else { | |
3230 | st = po->stats; | |
3231 | st.tp_packets += st.tp_drops; | |
3232 | data = &st; | |
3233 | } | |
1da177e4 LT |
3234 | memset(&po->stats, 0, sizeof(st)); |
3235 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
8dc41944 HX |
3236 | break; |
3237 | case PACKET_AUXDATA: | |
3238 | if (len > sizeof(int)) | |
3239 | len = sizeof(int); | |
3240 | val = po->auxdata; | |
3241 | ||
80feaacb PWJ |
3242 | data = &val; |
3243 | break; | |
3244 | case PACKET_ORIGDEV: | |
3245 | if (len > sizeof(int)) | |
3246 | len = sizeof(int); | |
3247 | val = po->origdev; | |
3248 | ||
bfd5f4a3 SS |
3249 | data = &val; |
3250 | break; | |
3251 | case PACKET_VNET_HDR: | |
3252 | if (len > sizeof(int)) | |
3253 | len = sizeof(int); | |
3254 | val = po->has_vnet_hdr; | |
3255 | ||
8dc41944 | 3256 | data = &val; |
1da177e4 | 3257 | break; |
bbd6ef87 PM |
3258 | case PACKET_VERSION: |
3259 | if (len > sizeof(int)) | |
3260 | len = sizeof(int); | |
3261 | val = po->tp_version; | |
3262 | data = &val; | |
3263 | break; | |
3264 | case PACKET_HDRLEN: | |
3265 | if (len > sizeof(int)) | |
3266 | len = sizeof(int); | |
3267 | if (copy_from_user(&val, optval, len)) | |
3268 | return -EFAULT; | |
3269 | switch (val) { | |
3270 | case TPACKET_V1: | |
3271 | val = sizeof(struct tpacket_hdr); | |
3272 | break; | |
3273 | case TPACKET_V2: | |
3274 | val = sizeof(struct tpacket2_hdr); | |
3275 | break; | |
f6fb8f10 | 3276 | case TPACKET_V3: |
3277 | val = sizeof(struct tpacket3_hdr); | |
3278 | break; | |
bbd6ef87 PM |
3279 | default: |
3280 | return -EINVAL; | |
3281 | } | |
3282 | data = &val; | |
3283 | break; | |
8913336a PM |
3284 | case PACKET_RESERVE: |
3285 | if (len > sizeof(unsigned int)) | |
3286 | len = sizeof(unsigned int); | |
3287 | val = po->tp_reserve; | |
3288 | data = &val; | |
3289 | break; | |
69e3c75f JB |
3290 | case PACKET_LOSS: |
3291 | if (len > sizeof(unsigned int)) | |
3292 | len = sizeof(unsigned int); | |
3293 | val = po->tp_loss; | |
3294 | data = &val; | |
3295 | break; | |
614f60fa SM |
3296 | case PACKET_TIMESTAMP: |
3297 | if (len > sizeof(int)) | |
3298 | len = sizeof(int); | |
3299 | val = po->tp_tstamp; | |
3300 | data = &val; | |
3301 | break; | |
dc99f600 DM |
3302 | case PACKET_FANOUT: |
3303 | if (len > sizeof(int)) | |
3304 | len = sizeof(int); | |
3305 | val = (po->fanout ? | |
3306 | ((u32)po->fanout->id | | |
3307 | ((u32)po->fanout->type << 16)) : | |
3308 | 0); | |
3309 | data = &val; | |
3310 | break; | |
1da177e4 LT |
3311 | default: |
3312 | return -ENOPROTOOPT; | |
3313 | } | |
3314 | ||
8ae55f04 KK |
3315 | if (put_user(len, optlen)) |
3316 | return -EFAULT; | |
8dc41944 HX |
3317 | if (copy_to_user(optval, data, len)) |
3318 | return -EFAULT; | |
8ae55f04 | 3319 | return 0; |
1da177e4 LT |
3320 | } |
3321 | ||
3322 | ||
3323 | static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data) | |
3324 | { | |
3325 | struct sock *sk; | |
3326 | struct hlist_node *node; | |
ad930650 | 3327 | struct net_device *dev = data; |
c346dca1 | 3328 | struct net *net = dev_net(dev); |
1da177e4 | 3329 | |
808f5114 | 3330 | rcu_read_lock(); |
3331 | sk_for_each_rcu(sk, node, &net->packet.sklist) { | |
1da177e4 LT |
3332 | struct packet_sock *po = pkt_sk(sk); |
3333 | ||
3334 | switch (msg) { | |
3335 | case NETDEV_UNREGISTER: | |
1da177e4 LT |
3336 | if (po->mclist) |
3337 | packet_dev_mclist(dev, po->mclist, -1); | |
a2efcfa0 DM |
3338 | /* fallthrough */ |
3339 | ||
1da177e4 LT |
3340 | case NETDEV_DOWN: |
3341 | if (dev->ifindex == po->ifindex) { | |
3342 | spin_lock(&po->bind_lock); | |
3343 | if (po->running) { | |
ce06b03e | 3344 | __unregister_prot_hook(sk, false); |
1da177e4 LT |
3345 | sk->sk_err = ENETDOWN; |
3346 | if (!sock_flag(sk, SOCK_DEAD)) | |
3347 | sk->sk_error_report(sk); | |
3348 | } | |
3349 | if (msg == NETDEV_UNREGISTER) { | |
3350 | po->ifindex = -1; | |
160ff18a BG |
3351 | if (po->prot_hook.dev) |
3352 | dev_put(po->prot_hook.dev); | |
1da177e4 LT |
3353 | po->prot_hook.dev = NULL; |
3354 | } | |
3355 | spin_unlock(&po->bind_lock); | |
3356 | } | |
3357 | break; | |
3358 | case NETDEV_UP: | |
808f5114 | 3359 | if (dev->ifindex == po->ifindex) { |
3360 | spin_lock(&po->bind_lock); | |
ce06b03e DM |
3361 | if (po->num) |
3362 | register_prot_hook(sk); | |
808f5114 | 3363 | spin_unlock(&po->bind_lock); |
1da177e4 | 3364 | } |
1da177e4 LT |
3365 | break; |
3366 | } | |
3367 | } | |
808f5114 | 3368 | rcu_read_unlock(); |
1da177e4 LT |
3369 | return NOTIFY_DONE; |
3370 | } | |
3371 | ||
3372 | ||
3373 | static int packet_ioctl(struct socket *sock, unsigned int cmd, | |
3374 | unsigned long arg) | |
3375 | { | |
3376 | struct sock *sk = sock->sk; | |
3377 | ||
69e3c75f | 3378 | switch (cmd) { |
40d4e3df ED |
3379 | case SIOCOUTQ: |
3380 | { | |
3381 | int amount = sk_wmem_alloc_get(sk); | |
31e6d363 | 3382 | |
40d4e3df ED |
3383 | return put_user(amount, (int __user *)arg); |
3384 | } | |
3385 | case SIOCINQ: | |
3386 | { | |
3387 | struct sk_buff *skb; | |
3388 | int amount = 0; | |
3389 | ||
3390 | spin_lock_bh(&sk->sk_receive_queue.lock); | |
3391 | skb = skb_peek(&sk->sk_receive_queue); | |
3392 | if (skb) | |
3393 | amount = skb->len; | |
3394 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
3395 | return put_user(amount, (int __user *)arg); | |
3396 | } | |
3397 | case SIOCGSTAMP: | |
3398 | return sock_get_timestamp(sk, (struct timeval __user *)arg); | |
3399 | case SIOCGSTAMPNS: | |
3400 | return sock_get_timestampns(sk, (struct timespec __user *)arg); | |
1ce4f28b | 3401 | |
1da177e4 | 3402 | #ifdef CONFIG_INET |
40d4e3df ED |
3403 | case SIOCADDRT: |
3404 | case SIOCDELRT: | |
3405 | case SIOCDARP: | |
3406 | case SIOCGARP: | |
3407 | case SIOCSARP: | |
3408 | case SIOCGIFADDR: | |
3409 | case SIOCSIFADDR: | |
3410 | case SIOCGIFBRDADDR: | |
3411 | case SIOCSIFBRDADDR: | |
3412 | case SIOCGIFNETMASK: | |
3413 | case SIOCSIFNETMASK: | |
3414 | case SIOCGIFDSTADDR: | |
3415 | case SIOCSIFDSTADDR: | |
3416 | case SIOCSIFFLAGS: | |
40d4e3df | 3417 | return inet_dgram_ops.ioctl(sock, cmd, arg); |
1da177e4 LT |
3418 | #endif |
3419 | ||
40d4e3df ED |
3420 | default: |
3421 | return -ENOIOCTLCMD; | |
1da177e4 LT |
3422 | } |
3423 | return 0; | |
3424 | } | |
3425 | ||
40d4e3df | 3426 | static unsigned int packet_poll(struct file *file, struct socket *sock, |
1da177e4 LT |
3427 | poll_table *wait) |
3428 | { | |
3429 | struct sock *sk = sock->sk; | |
3430 | struct packet_sock *po = pkt_sk(sk); | |
3431 | unsigned int mask = datagram_poll(file, sock, wait); | |
3432 | ||
3433 | spin_lock_bh(&sk->sk_receive_queue.lock); | |
69e3c75f | 3434 | if (po->rx_ring.pg_vec) { |
f6fb8f10 | 3435 | if (!packet_previous_rx_frame(po, &po->rx_ring, |
3436 | TP_STATUS_KERNEL)) | |
1da177e4 LT |
3437 | mask |= POLLIN | POLLRDNORM; |
3438 | } | |
3439 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
69e3c75f JB |
3440 | spin_lock_bh(&sk->sk_write_queue.lock); |
3441 | if (po->tx_ring.pg_vec) { | |
3442 | if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) | |
3443 | mask |= POLLOUT | POLLWRNORM; | |
3444 | } | |
3445 | spin_unlock_bh(&sk->sk_write_queue.lock); | |
1da177e4 LT |
3446 | return mask; |
3447 | } | |
3448 | ||
3449 | ||
3450 | /* Dirty? Well, I still did not learn better way to account | |
3451 | * for user mmaps. | |
3452 | */ | |
3453 | ||
3454 | static void packet_mm_open(struct vm_area_struct *vma) | |
3455 | { | |
3456 | struct file *file = vma->vm_file; | |
40d4e3df | 3457 | struct socket *sock = file->private_data; |
1da177e4 | 3458 | struct sock *sk = sock->sk; |
1ce4f28b | 3459 | |
1da177e4 LT |
3460 | if (sk) |
3461 | atomic_inc(&pkt_sk(sk)->mapped); | |
3462 | } | |
3463 | ||
3464 | static void packet_mm_close(struct vm_area_struct *vma) | |
3465 | { | |
3466 | struct file *file = vma->vm_file; | |
40d4e3df | 3467 | struct socket *sock = file->private_data; |
1da177e4 | 3468 | struct sock *sk = sock->sk; |
1ce4f28b | 3469 | |
1da177e4 LT |
3470 | if (sk) |
3471 | atomic_dec(&pkt_sk(sk)->mapped); | |
3472 | } | |
3473 | ||
f0f37e2f | 3474 | static const struct vm_operations_struct packet_mmap_ops = { |
40d4e3df ED |
3475 | .open = packet_mm_open, |
3476 | .close = packet_mm_close, | |
1da177e4 LT |
3477 | }; |
3478 | ||
0e3125c7 NH |
3479 | static void free_pg_vec(struct pgv *pg_vec, unsigned int order, |
3480 | unsigned int len) | |
1da177e4 LT |
3481 | { |
3482 | int i; | |
3483 | ||
4ebf0ae2 | 3484 | for (i = 0; i < len; i++) { |
0e3125c7 | 3485 | if (likely(pg_vec[i].buffer)) { |
c56b4d90 | 3486 | if (is_vmalloc_addr(pg_vec[i].buffer)) |
0e3125c7 NH |
3487 | vfree(pg_vec[i].buffer); |
3488 | else | |
3489 | free_pages((unsigned long)pg_vec[i].buffer, | |
3490 | order); | |
3491 | pg_vec[i].buffer = NULL; | |
3492 | } | |
1da177e4 LT |
3493 | } |
3494 | kfree(pg_vec); | |
3495 | } | |
3496 | ||
c56b4d90 | 3497 | static inline char *alloc_one_pg_vec_page(unsigned long order) |
4ebf0ae2 | 3498 | { |
0e3125c7 NH |
3499 | char *buffer = NULL; |
3500 | gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | | |
3501 | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; | |
3502 | ||
3503 | buffer = (char *) __get_free_pages(gfp_flags, order); | |
3504 | ||
3505 | if (buffer) | |
3506 | return buffer; | |
3507 | ||
3508 | /* | |
3509 | * __get_free_pages failed, fall back to vmalloc | |
3510 | */ | |
bbce5a59 | 3511 | buffer = vzalloc((1 << order) * PAGE_SIZE); |
719bfeaa | 3512 | |
0e3125c7 NH |
3513 | if (buffer) |
3514 | return buffer; | |
3515 | ||
3516 | /* | |
3517 | * vmalloc failed, lets dig into swap here | |
3518 | */ | |
0e3125c7 NH |
3519 | gfp_flags &= ~__GFP_NORETRY; |
3520 | buffer = (char *)__get_free_pages(gfp_flags, order); | |
3521 | if (buffer) | |
3522 | return buffer; | |
3523 | ||
3524 | /* | |
3525 | * complete and utter failure | |
3526 | */ | |
3527 | return NULL; | |
4ebf0ae2 DM |
3528 | } |
3529 | ||
0e3125c7 | 3530 | static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) |
4ebf0ae2 DM |
3531 | { |
3532 | unsigned int block_nr = req->tp_block_nr; | |
0e3125c7 | 3533 | struct pgv *pg_vec; |
4ebf0ae2 DM |
3534 | int i; |
3535 | ||
0e3125c7 | 3536 | pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); |
4ebf0ae2 DM |
3537 | if (unlikely(!pg_vec)) |
3538 | goto out; | |
3539 | ||
3540 | for (i = 0; i < block_nr; i++) { | |
c56b4d90 | 3541 | pg_vec[i].buffer = alloc_one_pg_vec_page(order); |
0e3125c7 | 3542 | if (unlikely(!pg_vec[i].buffer)) |
4ebf0ae2 DM |
3543 | goto out_free_pgvec; |
3544 | } | |
3545 | ||
3546 | out: | |
3547 | return pg_vec; | |
3548 | ||
3549 | out_free_pgvec: | |
3550 | free_pg_vec(pg_vec, order, block_nr); | |
3551 | pg_vec = NULL; | |
3552 | goto out; | |
3553 | } | |
1da177e4 | 3554 | |
f6fb8f10 | 3555 | static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
69e3c75f | 3556 | int closing, int tx_ring) |
1da177e4 | 3557 | { |
0e3125c7 | 3558 | struct pgv *pg_vec = NULL; |
1da177e4 | 3559 | struct packet_sock *po = pkt_sk(sk); |
0e11c91e | 3560 | int was_running, order = 0; |
69e3c75f JB |
3561 | struct packet_ring_buffer *rb; |
3562 | struct sk_buff_head *rb_queue; | |
0e11c91e | 3563 | __be16 num; |
f6fb8f10 | 3564 | int err = -EINVAL; |
3565 | /* Added to avoid minimal code churn */ | |
3566 | struct tpacket_req *req = &req_u->req; | |
3567 | ||
3568 | /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ | |
3569 | if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { | |
3570 | WARN(1, "Tx-ring is not supported.\n"); | |
3571 | goto out; | |
3572 | } | |
1ce4f28b | 3573 | |
69e3c75f JB |
3574 | rb = tx_ring ? &po->tx_ring : &po->rx_ring; |
3575 | rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; | |
1da177e4 | 3576 | |
69e3c75f JB |
3577 | err = -EBUSY; |
3578 | if (!closing) { | |
3579 | if (atomic_read(&po->mapped)) | |
3580 | goto out; | |
3581 | if (atomic_read(&rb->pending)) | |
3582 | goto out; | |
3583 | } | |
1da177e4 | 3584 | |
69e3c75f JB |
3585 | if (req->tp_block_nr) { |
3586 | /* Sanity tests and some calculations */ | |
3587 | err = -EBUSY; | |
3588 | if (unlikely(rb->pg_vec)) | |
3589 | goto out; | |
1da177e4 | 3590 | |
bbd6ef87 PM |
3591 | switch (po->tp_version) { |
3592 | case TPACKET_V1: | |
3593 | po->tp_hdrlen = TPACKET_HDRLEN; | |
3594 | break; | |
3595 | case TPACKET_V2: | |
3596 | po->tp_hdrlen = TPACKET2_HDRLEN; | |
3597 | break; | |
f6fb8f10 | 3598 | case TPACKET_V3: |
3599 | po->tp_hdrlen = TPACKET3_HDRLEN; | |
3600 | break; | |
bbd6ef87 PM |
3601 | } |
3602 | ||
69e3c75f | 3603 | err = -EINVAL; |
4ebf0ae2 | 3604 | if (unlikely((int)req->tp_block_size <= 0)) |
69e3c75f | 3605 | goto out; |
4ebf0ae2 | 3606 | if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) |
69e3c75f | 3607 | goto out; |
8913336a | 3608 | if (unlikely(req->tp_frame_size < po->tp_hdrlen + |
69e3c75f JB |
3609 | po->tp_reserve)) |
3610 | goto out; | |
4ebf0ae2 | 3611 | if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) |
69e3c75f | 3612 | goto out; |
1da177e4 | 3613 | |
69e3c75f JB |
3614 | rb->frames_per_block = req->tp_block_size/req->tp_frame_size; |
3615 | if (unlikely(rb->frames_per_block <= 0)) | |
3616 | goto out; | |
3617 | if (unlikely((rb->frames_per_block * req->tp_block_nr) != | |
3618 | req->tp_frame_nr)) | |
3619 | goto out; | |
1da177e4 LT |
3620 | |
3621 | err = -ENOMEM; | |
4ebf0ae2 DM |
3622 | order = get_order(req->tp_block_size); |
3623 | pg_vec = alloc_pg_vec(req, order); | |
3624 | if (unlikely(!pg_vec)) | |
1da177e4 | 3625 | goto out; |
f6fb8f10 | 3626 | switch (po->tp_version) { |
3627 | case TPACKET_V3: | |
3628 | /* Transmit path is not supported. We checked | |
3629 | * it above but just being paranoid | |
3630 | */ | |
3631 | if (!tx_ring) | |
3632 | init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); | |
3633 | break; | |
3634 | default: | |
3635 | break; | |
3636 | } | |
69e3c75f JB |
3637 | } |
3638 | /* Done */ | |
3639 | else { | |
3640 | err = -EINVAL; | |
4ebf0ae2 | 3641 | if (unlikely(req->tp_frame_nr)) |
69e3c75f | 3642 | goto out; |
1da177e4 LT |
3643 | } |
3644 | ||
3645 | lock_sock(sk); | |
3646 | ||
3647 | /* Detach socket from network */ | |
3648 | spin_lock(&po->bind_lock); | |
3649 | was_running = po->running; | |
3650 | num = po->num; | |
3651 | if (was_running) { | |
1da177e4 | 3652 | po->num = 0; |
ce06b03e | 3653 | __unregister_prot_hook(sk, false); |
1da177e4 LT |
3654 | } |
3655 | spin_unlock(&po->bind_lock); | |
1ce4f28b | 3656 | |
1da177e4 LT |
3657 | synchronize_net(); |
3658 | ||
3659 | err = -EBUSY; | |
905db440 | 3660 | mutex_lock(&po->pg_vec_lock); |
1da177e4 LT |
3661 | if (closing || atomic_read(&po->mapped) == 0) { |
3662 | err = 0; | |
69e3c75f | 3663 | spin_lock_bh(&rb_queue->lock); |
c053fd96 | 3664 | swap(rb->pg_vec, pg_vec); |
69e3c75f JB |
3665 | rb->frame_max = (req->tp_frame_nr - 1); |
3666 | rb->head = 0; | |
3667 | rb->frame_size = req->tp_frame_size; | |
3668 | spin_unlock_bh(&rb_queue->lock); | |
3669 | ||
c053fd96 CG |
3670 | swap(rb->pg_vec_order, order); |
3671 | swap(rb->pg_vec_len, req->tp_block_nr); | |
69e3c75f JB |
3672 | |
3673 | rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; | |
3674 | po->prot_hook.func = (po->rx_ring.pg_vec) ? | |
3675 | tpacket_rcv : packet_rcv; | |
3676 | skb_queue_purge(rb_queue); | |
1da177e4 | 3677 | if (atomic_read(&po->mapped)) |
40d4e3df ED |
3678 | pr_err("packet_mmap: vma is busy: %d\n", |
3679 | atomic_read(&po->mapped)); | |
1da177e4 | 3680 | } |
905db440 | 3681 | mutex_unlock(&po->pg_vec_lock); |
1da177e4 LT |
3682 | |
3683 | spin_lock(&po->bind_lock); | |
ce06b03e | 3684 | if (was_running) { |
1da177e4 | 3685 | po->num = num; |
ce06b03e | 3686 | register_prot_hook(sk); |
1da177e4 LT |
3687 | } |
3688 | spin_unlock(&po->bind_lock); | |
f6fb8f10 | 3689 | if (closing && (po->tp_version > TPACKET_V2)) { |
3690 | /* Because we don't support block-based V3 on tx-ring */ | |
3691 | if (!tx_ring) | |
3692 | prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue); | |
3693 | } | |
1da177e4 LT |
3694 | release_sock(sk); |
3695 | ||
1da177e4 LT |
3696 | if (pg_vec) |
3697 | free_pg_vec(pg_vec, order, req->tp_block_nr); | |
3698 | out: | |
3699 | return err; | |
3700 | } | |
3701 | ||
69e3c75f JB |
3702 | static int packet_mmap(struct file *file, struct socket *sock, |
3703 | struct vm_area_struct *vma) | |
1da177e4 LT |
3704 | { |
3705 | struct sock *sk = sock->sk; | |
3706 | struct packet_sock *po = pkt_sk(sk); | |
69e3c75f JB |
3707 | unsigned long size, expected_size; |
3708 | struct packet_ring_buffer *rb; | |
1da177e4 LT |
3709 | unsigned long start; |
3710 | int err = -EINVAL; | |
3711 | int i; | |
3712 | ||
3713 | if (vma->vm_pgoff) | |
3714 | return -EINVAL; | |
3715 | ||
905db440 | 3716 | mutex_lock(&po->pg_vec_lock); |
69e3c75f JB |
3717 | |
3718 | expected_size = 0; | |
3719 | for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { | |
3720 | if (rb->pg_vec) { | |
3721 | expected_size += rb->pg_vec_len | |
3722 | * rb->pg_vec_pages | |
3723 | * PAGE_SIZE; | |
3724 | } | |
3725 | } | |
3726 | ||
3727 | if (expected_size == 0) | |
1da177e4 | 3728 | goto out; |
69e3c75f JB |
3729 | |
3730 | size = vma->vm_end - vma->vm_start; | |
3731 | if (size != expected_size) | |
1da177e4 LT |
3732 | goto out; |
3733 | ||
1da177e4 | 3734 | start = vma->vm_start; |
69e3c75f JB |
3735 | for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { |
3736 | if (rb->pg_vec == NULL) | |
3737 | continue; | |
3738 | ||
3739 | for (i = 0; i < rb->pg_vec_len; i++) { | |
0e3125c7 NH |
3740 | struct page *page; |
3741 | void *kaddr = rb->pg_vec[i].buffer; | |
69e3c75f JB |
3742 | int pg_num; |
3743 | ||
c56b4d90 CG |
3744 | for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { |
3745 | page = pgv_to_page(kaddr); | |
69e3c75f JB |
3746 | err = vm_insert_page(vma, start, page); |
3747 | if (unlikely(err)) | |
3748 | goto out; | |
3749 | start += PAGE_SIZE; | |
0e3125c7 | 3750 | kaddr += PAGE_SIZE; |
69e3c75f | 3751 | } |
4ebf0ae2 | 3752 | } |
1da177e4 | 3753 | } |
69e3c75f | 3754 | |
4ebf0ae2 | 3755 | atomic_inc(&po->mapped); |
1da177e4 LT |
3756 | vma->vm_ops = &packet_mmap_ops; |
3757 | err = 0; | |
3758 | ||
3759 | out: | |
905db440 | 3760 | mutex_unlock(&po->pg_vec_lock); |
1da177e4 LT |
3761 | return err; |
3762 | } | |
1da177e4 | 3763 | |
90ddc4f0 | 3764 | static const struct proto_ops packet_ops_spkt = { |
1da177e4 LT |
3765 | .family = PF_PACKET, |
3766 | .owner = THIS_MODULE, | |
3767 | .release = packet_release, | |
3768 | .bind = packet_bind_spkt, | |
3769 | .connect = sock_no_connect, | |
3770 | .socketpair = sock_no_socketpair, | |
3771 | .accept = sock_no_accept, | |
3772 | .getname = packet_getname_spkt, | |
3773 | .poll = datagram_poll, | |
3774 | .ioctl = packet_ioctl, | |
3775 | .listen = sock_no_listen, | |
3776 | .shutdown = sock_no_shutdown, | |
3777 | .setsockopt = sock_no_setsockopt, | |
3778 | .getsockopt = sock_no_getsockopt, | |
3779 | .sendmsg = packet_sendmsg_spkt, | |
3780 | .recvmsg = packet_recvmsg, | |
3781 | .mmap = sock_no_mmap, | |
3782 | .sendpage = sock_no_sendpage, | |
3783 | }; | |
1da177e4 | 3784 | |
90ddc4f0 | 3785 | static const struct proto_ops packet_ops = { |
1da177e4 LT |
3786 | .family = PF_PACKET, |
3787 | .owner = THIS_MODULE, | |
3788 | .release = packet_release, | |
3789 | .bind = packet_bind, | |
3790 | .connect = sock_no_connect, | |
3791 | .socketpair = sock_no_socketpair, | |
3792 | .accept = sock_no_accept, | |
1ce4f28b | 3793 | .getname = packet_getname, |
1da177e4 LT |
3794 | .poll = packet_poll, |
3795 | .ioctl = packet_ioctl, | |
3796 | .listen = sock_no_listen, | |
3797 | .shutdown = sock_no_shutdown, | |
3798 | .setsockopt = packet_setsockopt, | |
3799 | .getsockopt = packet_getsockopt, | |
3800 | .sendmsg = packet_sendmsg, | |
3801 | .recvmsg = packet_recvmsg, | |
3802 | .mmap = packet_mmap, | |
3803 | .sendpage = sock_no_sendpage, | |
3804 | }; | |
3805 | ||
ec1b4cf7 | 3806 | static const struct net_proto_family packet_family_ops = { |
1da177e4 LT |
3807 | .family = PF_PACKET, |
3808 | .create = packet_create, | |
3809 | .owner = THIS_MODULE, | |
3810 | }; | |
3811 | ||
3812 | static struct notifier_block packet_netdev_notifier = { | |
40d4e3df | 3813 | .notifier_call = packet_notifier, |
1da177e4 LT |
3814 | }; |
3815 | ||
3816 | #ifdef CONFIG_PROC_FS | |
1da177e4 LT |
3817 | |
3818 | static void *packet_seq_start(struct seq_file *seq, loff_t *pos) | |
808f5114 | 3819 | __acquires(RCU) |
1da177e4 | 3820 | { |
e372c414 | 3821 | struct net *net = seq_file_net(seq); |
808f5114 | 3822 | |
3823 | rcu_read_lock(); | |
3824 | return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); | |
1da177e4 LT |
3825 | } |
3826 | ||
3827 | static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
3828 | { | |
1bf40954 | 3829 | struct net *net = seq_file_net(seq); |
808f5114 | 3830 | return seq_hlist_next_rcu(v, &net->packet.sklist, pos); |
1da177e4 LT |
3831 | } |
3832 | ||
3833 | static void packet_seq_stop(struct seq_file *seq, void *v) | |
808f5114 | 3834 | __releases(RCU) |
1da177e4 | 3835 | { |
808f5114 | 3836 | rcu_read_unlock(); |
1da177e4 LT |
3837 | } |
3838 | ||
1ce4f28b | 3839 | static int packet_seq_show(struct seq_file *seq, void *v) |
1da177e4 LT |
3840 | { |
3841 | if (v == SEQ_START_TOKEN) | |
3842 | seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); | |
3843 | else { | |
b7ceabd9 | 3844 | struct sock *s = sk_entry(v); |
1da177e4 LT |
3845 | const struct packet_sock *po = pkt_sk(s); |
3846 | ||
3847 | seq_printf(seq, | |
71338aa7 | 3848 | "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", |
1da177e4 LT |
3849 | s, |
3850 | atomic_read(&s->sk_refcnt), | |
3851 | s->sk_type, | |
3852 | ntohs(po->num), | |
3853 | po->ifindex, | |
3854 | po->running, | |
3855 | atomic_read(&s->sk_rmem_alloc), | |
3856 | sock_i_uid(s), | |
40d4e3df | 3857 | sock_i_ino(s)); |
1da177e4 LT |
3858 | } |
3859 | ||
3860 | return 0; | |
3861 | } | |
3862 | ||
56b3d975 | 3863 | static const struct seq_operations packet_seq_ops = { |
1da177e4 LT |
3864 | .start = packet_seq_start, |
3865 | .next = packet_seq_next, | |
3866 | .stop = packet_seq_stop, | |
3867 | .show = packet_seq_show, | |
3868 | }; | |
3869 | ||
3870 | static int packet_seq_open(struct inode *inode, struct file *file) | |
3871 | { | |
e372c414 DL |
3872 | return seq_open_net(inode, file, &packet_seq_ops, |
3873 | sizeof(struct seq_net_private)); | |
1da177e4 LT |
3874 | } |
3875 | ||
da7071d7 | 3876 | static const struct file_operations packet_seq_fops = { |
1da177e4 LT |
3877 | .owner = THIS_MODULE, |
3878 | .open = packet_seq_open, | |
3879 | .read = seq_read, | |
3880 | .llseek = seq_lseek, | |
e372c414 | 3881 | .release = seq_release_net, |
1da177e4 LT |
3882 | }; |
3883 | ||
3884 | #endif | |
3885 | ||
2c8c1e72 | 3886 | static int __net_init packet_net_init(struct net *net) |
d12d01d6 | 3887 | { |
808f5114 | 3888 | spin_lock_init(&net->packet.sklist_lock); |
2aaef4e4 | 3889 | INIT_HLIST_HEAD(&net->packet.sklist); |
d12d01d6 DL |
3890 | |
3891 | if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops)) | |
3892 | return -ENOMEM; | |
3893 | ||
3894 | return 0; | |
3895 | } | |
3896 | ||
2c8c1e72 | 3897 | static void __net_exit packet_net_exit(struct net *net) |
d12d01d6 DL |
3898 | { |
3899 | proc_net_remove(net, "packet"); | |
3900 | } | |
3901 | ||
3902 | static struct pernet_operations packet_net_ops = { | |
3903 | .init = packet_net_init, | |
3904 | .exit = packet_net_exit, | |
3905 | }; | |
3906 | ||
3907 | ||
1da177e4 LT |
3908 | static void __exit packet_exit(void) |
3909 | { | |
1da177e4 | 3910 | unregister_netdevice_notifier(&packet_netdev_notifier); |
d12d01d6 | 3911 | unregister_pernet_subsys(&packet_net_ops); |
1da177e4 LT |
3912 | sock_unregister(PF_PACKET); |
3913 | proto_unregister(&packet_proto); | |
3914 | } | |
3915 | ||
3916 | static int __init packet_init(void) | |
3917 | { | |
3918 | int rc = proto_register(&packet_proto, 0); | |
3919 | ||
3920 | if (rc != 0) | |
3921 | goto out; | |
3922 | ||
3923 | sock_register(&packet_family_ops); | |
d12d01d6 | 3924 | register_pernet_subsys(&packet_net_ops); |
1da177e4 | 3925 | register_netdevice_notifier(&packet_netdev_notifier); |
1da177e4 LT |
3926 | out: |
3927 | return rc; | |
3928 | } | |
3929 | ||
3930 | module_init(packet_init); | |
3931 | module_exit(packet_exit); | |
3932 | MODULE_LICENSE("GPL"); | |
3933 | MODULE_ALIAS_NETPROTO(PF_PACKET); |