]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Definitions for the Interfaces handler. | |
7 | * | |
8 | * Version: @(#)dev.h 1.0.10 08/12/93 | |
9 | * | |
02c30a84 | 10 | * Authors: Ross Biro |
1da177e4 LT |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
13 | * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> | |
113aa838 | 14 | * Alan Cox, <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
15 | * Bjorn Ekwall. <bj0rn@blox.se> |
16 | * Pekka Riikonen <priikone@poseidon.pspt.fi> | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License | |
20 | * as published by the Free Software Foundation; either version | |
21 | * 2 of the License, or (at your option) any later version. | |
22 | * | |
23 | * Moved to /usr/include/linux for NET3 | |
24 | */ | |
25 | #ifndef _LINUX_NETDEVICE_H | |
26 | #define _LINUX_NETDEVICE_H | |
27 | ||
28 | #include <linux/if.h> | |
29 | #include <linux/if_ether.h> | |
30 | #include <linux/if_packet.h> | |
31 | ||
32 | #ifdef __KERNEL__ | |
d7fe0f24 | 33 | #include <linux/timer.h> |
bea3348e | 34 | #include <linux/delay.h> |
cc0be322 | 35 | #include <linux/mm.h> |
1da177e4 LT |
36 | #include <asm/atomic.h> |
37 | #include <asm/cache.h> | |
38 | #include <asm/byteorder.h> | |
39 | ||
1da177e4 LT |
40 | #include <linux/device.h> |
41 | #include <linux/percpu.h> | |
4d5b78c0 | 42 | #include <linux/rculist.h> |
db217334 | 43 | #include <linux/dmaengine.h> |
bea3348e | 44 | #include <linux/workqueue.h> |
1da177e4 | 45 | |
b1b67dd4 | 46 | #include <linux/ethtool.h> |
a050c33f | 47 | #include <net/net_namespace.h> |
cf85d08f | 48 | #include <net/dsa.h> |
7a6b6f51 | 49 | #ifdef CONFIG_DCB |
2f90b865 AD |
50 | #include <net/dcbnl.h> |
51 | #endif | |
a050c33f | 52 | |
1da177e4 | 53 | struct vlan_group; |
115c1d6e | 54 | struct netpoll_info; |
704232c2 JB |
55 | /* 802.11 specific */ |
56 | struct wireless_dev; | |
1da177e4 LT |
57 | /* source back-compat hooks */ |
58 | #define SET_ETHTOOL_OPS(netdev,ops) \ | |
59 | ( (netdev)->ethtool_ops = (ops) ) | |
60 | ||
61 | #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev | |
62 | functions are available. */ | |
63 | #define HAVE_FREE_NETDEV /* free_netdev() */ | |
64 | #define HAVE_NETDEV_PRIV /* netdev_priv() */ | |
65 | ||
9a1654ba JP |
66 | /* Backlog congestion levels */ |
67 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ | |
68 | #define NET_RX_DROP 1 /* packet dropped */ | |
69 | ||
572a9d7b PM |
70 | /* |
71 | * Transmit return codes: transmit return codes originate from three different | |
72 | * namespaces: | |
73 | * | |
74 | * - qdisc return codes | |
75 | * - driver transmit return codes | |
76 | * - errno values | |
77 | * | |
78 | * Drivers are allowed to return any one of those in their hard_start_xmit() | |
79 | * function. Real network devices commonly used with qdiscs should only return | |
80 | * the driver transmit return codes though - when qdiscs are used, the actual | |
81 | * transmission happens asynchronously, so the value is not propagated to | |
82 | * higher layers. Virtual network devices transmit synchronously, in this case | |
83 | * the driver transmit return codes are consumed by dev_queue_xmit(), all | |
84 | * others are propagated to higher layers. | |
85 | */ | |
86 | ||
87 | /* qdisc ->enqueue() return codes. */ | |
88 | #define NET_XMIT_SUCCESS 0x00 | |
9a1654ba JP |
89 | #define NET_XMIT_DROP 0x01 /* skb dropped */ |
90 | #define NET_XMIT_CN 0x02 /* congestion notification */ | |
91 | #define NET_XMIT_POLICED 0x03 /* skb is shot by police */ | |
92 | #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ | |
1da177e4 | 93 | |
b9df3cb8 GR |
94 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It |
95 | * indicates that the device will soon be dropping packets, or already drops | |
96 | * some packets of the same priority; prompting us to send less aggressively. */ | |
572a9d7b | 97 | #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) |
1da177e4 LT |
98 | #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) |
99 | ||
dc1f8bf6 | 100 | /* Driver transmit return codes */ |
9a1654ba | 101 | #define NETDEV_TX_MASK 0xf0 |
572a9d7b | 102 | |
dc1f8bf6 | 103 | enum netdev_tx { |
572a9d7b | 104 | __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ |
9a1654ba JP |
105 | NETDEV_TX_OK = 0x00, /* driver took care of packet */ |
106 | NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ | |
107 | NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */ | |
dc1f8bf6 SH |
108 | }; |
109 | typedef enum netdev_tx netdev_tx_t; | |
110 | ||
9a1654ba JP |
111 | /* |
112 | * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; | |
113 | * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. | |
114 | */ | |
115 | static inline bool dev_xmit_complete(int rc) | |
116 | { | |
117 | /* | |
118 | * Positive cases with an skb consumed by a driver: | |
119 | * - successful transmission (rc == NETDEV_TX_OK) | |
120 | * - error while transmitting (rc < 0) | |
121 | * - error while queueing to a different device (rc & NET_XMIT_MASK) | |
122 | */ | |
123 | if (likely(rc < NET_XMIT_MASK)) | |
124 | return true; | |
125 | ||
126 | return false; | |
127 | } | |
128 | ||
1da177e4 LT |
129 | #endif |
130 | ||
131 | #define MAX_ADDR_LEN 32 /* Largest hardware address length */ | |
132 | ||
c88e6f51 | 133 | #ifdef __KERNEL__ |
1da177e4 LT |
134 | /* |
135 | * Compute the worst case header length according to the protocols | |
136 | * used. | |
137 | */ | |
fe2918b0 | 138 | |
8388e3da DM |
139 | #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) |
140 | # if defined(CONFIG_MAC80211_MESH) | |
141 | # define LL_MAX_HEADER 128 | |
142 | # else | |
143 | # define LL_MAX_HEADER 96 | |
144 | # endif | |
c759a6b4 | 145 | #elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) |
8388e3da | 146 | # define LL_MAX_HEADER 48 |
1da177e4 | 147 | #else |
8388e3da | 148 | # define LL_MAX_HEADER 32 |
1da177e4 LT |
149 | #endif |
150 | ||
e81c7359 DM |
151 | #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ |
152 | !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \ | |
153 | !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \ | |
154 | !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE) | |
1da177e4 LT |
155 | #define MAX_HEADER LL_MAX_HEADER |
156 | #else | |
157 | #define MAX_HEADER (LL_MAX_HEADER + 48) | |
158 | #endif | |
159 | ||
c88e6f51 AB |
160 | #endif /* __KERNEL__ */ |
161 | ||
1da177e4 LT |
162 | /* |
163 | * Network device statistics. Akin to the 2.0 ether stats but | |
164 | * with byte counters. | |
165 | */ | |
fe2918b0 | 166 | |
d94d9fee | 167 | struct net_device_stats { |
1da177e4 LT |
168 | unsigned long rx_packets; /* total packets received */ |
169 | unsigned long tx_packets; /* total packets transmitted */ | |
170 | unsigned long rx_bytes; /* total bytes received */ | |
171 | unsigned long tx_bytes; /* total bytes transmitted */ | |
172 | unsigned long rx_errors; /* bad packets received */ | |
173 | unsigned long tx_errors; /* packet transmit problems */ | |
174 | unsigned long rx_dropped; /* no space in linux buffers */ | |
175 | unsigned long tx_dropped; /* no space available in linux */ | |
176 | unsigned long multicast; /* multicast packets received */ | |
177 | unsigned long collisions; | |
178 | ||
179 | /* detailed rx_errors: */ | |
180 | unsigned long rx_length_errors; | |
181 | unsigned long rx_over_errors; /* receiver ring buff overflow */ | |
182 | unsigned long rx_crc_errors; /* recved pkt with crc error */ | |
183 | unsigned long rx_frame_errors; /* recv'd frame alignment error */ | |
184 | unsigned long rx_fifo_errors; /* recv'r fifo overrun */ | |
185 | unsigned long rx_missed_errors; /* receiver missed packet */ | |
186 | ||
187 | /* detailed tx_errors */ | |
188 | unsigned long tx_aborted_errors; | |
189 | unsigned long tx_carrier_errors; | |
190 | unsigned long tx_fifo_errors; | |
191 | unsigned long tx_heartbeat_errors; | |
192 | unsigned long tx_window_errors; | |
193 | ||
194 | /* for cslip etc */ | |
195 | unsigned long rx_compressed; | |
196 | unsigned long tx_compressed; | |
197 | }; | |
198 | ||
199 | ||
200 | /* Media selection options. */ | |
201 | enum { | |
202 | IF_PORT_UNKNOWN = 0, | |
203 | IF_PORT_10BASE2, | |
204 | IF_PORT_10BASET, | |
205 | IF_PORT_AUI, | |
206 | IF_PORT_100BASET, | |
207 | IF_PORT_100BASETX, | |
208 | IF_PORT_100BASEFX | |
209 | }; | |
210 | ||
211 | #ifdef __KERNEL__ | |
212 | ||
213 | #include <linux/cache.h> | |
214 | #include <linux/skbuff.h> | |
215 | ||
216 | struct neighbour; | |
217 | struct neigh_parms; | |
218 | struct sk_buff; | |
219 | ||
d94d9fee | 220 | struct netif_rx_stats { |
1da177e4 LT |
221 | unsigned total; |
222 | unsigned dropped; | |
223 | unsigned time_squeeze; | |
1da177e4 LT |
224 | unsigned cpu_collision; |
225 | }; | |
226 | ||
227 | DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); | |
228 | ||
d94d9fee | 229 | struct dev_addr_list { |
bf742482 PM |
230 | struct dev_addr_list *next; |
231 | u8 da_addr[MAX_ADDR_LEN]; | |
232 | u8 da_addrlen; | |
a0a400d7 | 233 | u8 da_synced; |
bf742482 PM |
234 | int da_users; |
235 | int da_gusers; | |
236 | }; | |
1da177e4 LT |
237 | |
238 | /* | |
239 | * We tag multicasts with these structures. | |
240 | */ | |
3fba5a8b PM |
241 | |
242 | #define dev_mc_list dev_addr_list | |
243 | #define dmi_addr da_addr | |
244 | #define dmi_addrlen da_addrlen | |
245 | #define dmi_users da_users | |
246 | #define dmi_gusers da_gusers | |
1da177e4 | 247 | |
f001fde5 JP |
248 | struct netdev_hw_addr { |
249 | struct list_head list; | |
250 | unsigned char addr[MAX_ADDR_LEN]; | |
251 | unsigned char type; | |
ccffad25 JP |
252 | #define NETDEV_HW_ADDR_T_LAN 1 |
253 | #define NETDEV_HW_ADDR_T_SAN 2 | |
254 | #define NETDEV_HW_ADDR_T_SLAVE 3 | |
255 | #define NETDEV_HW_ADDR_T_UNICAST 4 | |
256 | int refcount; | |
257 | bool synced; | |
f001fde5 JP |
258 | struct rcu_head rcu_head; |
259 | }; | |
260 | ||
31278e71 JP |
261 | struct netdev_hw_addr_list { |
262 | struct list_head list; | |
263 | int count; | |
264 | }; | |
265 | ||
32e7bfc4 JP |
266 | #define netdev_uc_count(dev) ((dev)->uc.count) |
267 | #define netdev_uc_empty(dev) ((dev)->uc.count == 0) | |
268 | #define netdev_for_each_uc_addr(ha, dev) \ | |
269 | list_for_each_entry(ha, &dev->uc.list, list) | |
270 | ||
d94d9fee | 271 | struct hh_cache { |
1da177e4 LT |
272 | struct hh_cache *hh_next; /* Next entry */ |
273 | atomic_t hh_refcnt; /* number of users */ | |
f0490980 ED |
274 | /* |
275 | * We want hh_output, hh_len, hh_lock and hh_data be a in a separate | |
276 | * cache line on SMP. | |
277 | * They are mostly read, but hh_refcnt may be changed quite frequently, | |
278 | * incurring cache line ping pongs. | |
279 | */ | |
280 | __be16 hh_type ____cacheline_aligned_in_smp; | |
281 | /* protocol identifier, f.e ETH_P_IP | |
1da177e4 LT |
282 | * NOTE: For VLANs, this will be the |
283 | * encapuslated type. --BLG | |
284 | */ | |
d5c42c0e | 285 | u16 hh_len; /* length of header */ |
1da177e4 | 286 | int (*hh_output)(struct sk_buff *skb); |
3644f0ce | 287 | seqlock_t hh_lock; |
1da177e4 LT |
288 | |
289 | /* cached hardware header; allow for machine alignment needs. */ | |
290 | #define HH_DATA_MOD 16 | |
291 | #define HH_DATA_OFF(__len) \ | |
5ba0eac6 | 292 | (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) |
1da177e4 LT |
293 | #define HH_DATA_ALIGN(__len) \ |
294 | (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) | |
295 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | |
296 | }; | |
297 | ||
298 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. | |
299 | * Alternative is: | |
300 | * dev->hard_header_len ? (dev->hard_header_len + | |
301 | * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 | |
302 | * | |
303 | * We could use other alignment values, but we must maintain the | |
304 | * relationship HH alignment <= LL alignment. | |
f5184d26 JB |
305 | * |
306 | * LL_ALLOCATED_SPACE also takes into account the tailroom the device | |
307 | * may need. | |
1da177e4 LT |
308 | */ |
309 | #define LL_RESERVED_SPACE(dev) \ | |
f5184d26 | 310 | ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
1da177e4 | 311 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ |
f5184d26 JB |
312 | ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
313 | #define LL_ALLOCATED_SPACE(dev) \ | |
314 | ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | |
1da177e4 | 315 | |
3b04ddde SH |
316 | struct header_ops { |
317 | int (*create) (struct sk_buff *skb, struct net_device *dev, | |
318 | unsigned short type, const void *daddr, | |
319 | const void *saddr, unsigned len); | |
320 | int (*parse)(const struct sk_buff *skb, unsigned char *haddr); | |
321 | int (*rebuild)(struct sk_buff *skb); | |
322 | #define HAVE_HEADER_CACHE | |
323 | int (*cache)(const struct neighbour *neigh, struct hh_cache *hh); | |
324 | void (*cache_update)(struct hh_cache *hh, | |
325 | const struct net_device *dev, | |
326 | const unsigned char *haddr); | |
327 | }; | |
328 | ||
1da177e4 LT |
329 | /* These flag bits are private to the generic network queueing |
330 | * layer, they may not be explicitly referenced by any other | |
331 | * code. | |
332 | */ | |
333 | ||
d94d9fee | 334 | enum netdev_state_t { |
1da177e4 LT |
335 | __LINK_STATE_START, |
336 | __LINK_STATE_PRESENT, | |
1da177e4 | 337 | __LINK_STATE_NOCARRIER, |
b00055aa SR |
338 | __LINK_STATE_LINKWATCH_PENDING, |
339 | __LINK_STATE_DORMANT, | |
1da177e4 LT |
340 | }; |
341 | ||
342 | ||
343 | /* | |
344 | * This structure holds at boot time configured netdevice settings. They | |
fe2918b0 | 345 | * are then used in the device probing. |
1da177e4 LT |
346 | */ |
347 | struct netdev_boot_setup { | |
348 | char name[IFNAMSIZ]; | |
349 | struct ifmap map; | |
350 | }; | |
351 | #define NETDEV_BOOT_SETUP_MAX 8 | |
352 | ||
20380731 | 353 | extern int __init netdev_boot_setup(char *str); |
1da177e4 | 354 | |
bea3348e SH |
355 | /* |
356 | * Structure for NAPI scheduling similar to tasklet but with weighting | |
357 | */ | |
358 | struct napi_struct { | |
359 | /* The poll_list must only be managed by the entity which | |
360 | * changes the state of the NAPI_STATE_SCHED bit. This means | |
361 | * whoever atomically sets that bit can add this napi_struct | |
362 | * to the per-cpu poll_list, and whoever clears that bit | |
363 | * can remove from the list right before clearing the bit. | |
364 | */ | |
365 | struct list_head poll_list; | |
366 | ||
367 | unsigned long state; | |
368 | int weight; | |
369 | int (*poll)(struct napi_struct *, int); | |
370 | #ifdef CONFIG_NETPOLL | |
371 | spinlock_t poll_lock; | |
372 | int poll_owner; | |
bea3348e | 373 | #endif |
4ae5544f HX |
374 | |
375 | unsigned int gro_count; | |
376 | ||
5d38a079 | 377 | struct net_device *dev; |
d565b0a1 HX |
378 | struct list_head dev_list; |
379 | struct sk_buff *gro_list; | |
5d38a079 | 380 | struct sk_buff *skb; |
bea3348e SH |
381 | }; |
382 | ||
d94d9fee | 383 | enum { |
bea3348e | 384 | NAPI_STATE_SCHED, /* Poll is scheduled */ |
a0a46196 | 385 | NAPI_STATE_DISABLE, /* Disable pending */ |
7b363e44 | 386 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ |
bea3348e SH |
387 | }; |
388 | ||
5b252f0c | 389 | enum gro_result { |
d1c76af9 HX |
390 | GRO_MERGED, |
391 | GRO_MERGED_FREE, | |
392 | GRO_HELD, | |
393 | GRO_NORMAL, | |
394 | GRO_DROP, | |
395 | }; | |
5b252f0c | 396 | typedef enum gro_result gro_result_t; |
d1c76af9 | 397 | |
b3c97528 | 398 | extern void __napi_schedule(struct napi_struct *n); |
bea3348e | 399 | |
a0a46196 DM |
400 | static inline int napi_disable_pending(struct napi_struct *n) |
401 | { | |
402 | return test_bit(NAPI_STATE_DISABLE, &n->state); | |
403 | } | |
404 | ||
bea3348e SH |
405 | /** |
406 | * napi_schedule_prep - check if napi can be scheduled | |
407 | * @n: napi context | |
408 | * | |
409 | * Test if NAPI routine is already running, and if not mark | |
410 | * it as running. This is used as a condition variable | |
a0a46196 DM |
411 | * insure only one NAPI poll instance runs. We also make |
412 | * sure there is no pending NAPI disable. | |
bea3348e SH |
413 | */ |
414 | static inline int napi_schedule_prep(struct napi_struct *n) | |
415 | { | |
a0a46196 DM |
416 | return !napi_disable_pending(n) && |
417 | !test_and_set_bit(NAPI_STATE_SCHED, &n->state); | |
bea3348e SH |
418 | } |
419 | ||
420 | /** | |
421 | * napi_schedule - schedule NAPI poll | |
422 | * @n: napi context | |
423 | * | |
424 | * Schedule NAPI poll routine to be called if it is not already | |
425 | * running. | |
426 | */ | |
427 | static inline void napi_schedule(struct napi_struct *n) | |
428 | { | |
429 | if (napi_schedule_prep(n)) | |
430 | __napi_schedule(n); | |
431 | } | |
432 | ||
bfe13f54 RD |
433 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ |
434 | static inline int napi_reschedule(struct napi_struct *napi) | |
435 | { | |
436 | if (napi_schedule_prep(napi)) { | |
437 | __napi_schedule(napi); | |
438 | return 1; | |
439 | } | |
440 | return 0; | |
441 | } | |
442 | ||
bea3348e SH |
443 | /** |
444 | * napi_complete - NAPI processing complete | |
445 | * @n: napi context | |
446 | * | |
447 | * Mark NAPI processing as complete. | |
448 | */ | |
d565b0a1 HX |
449 | extern void __napi_complete(struct napi_struct *n); |
450 | extern void napi_complete(struct napi_struct *n); | |
bea3348e SH |
451 | |
452 | /** | |
453 | * napi_disable - prevent NAPI from scheduling | |
454 | * @n: napi context | |
455 | * | |
456 | * Stop NAPI from being scheduled on this context. | |
457 | * Waits till any outstanding processing completes. | |
458 | */ | |
459 | static inline void napi_disable(struct napi_struct *n) | |
460 | { | |
a0a46196 | 461 | set_bit(NAPI_STATE_DISABLE, &n->state); |
bea3348e | 462 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) |
43cc7380 | 463 | msleep(1); |
a0a46196 | 464 | clear_bit(NAPI_STATE_DISABLE, &n->state); |
bea3348e SH |
465 | } |
466 | ||
467 | /** | |
468 | * napi_enable - enable NAPI scheduling | |
469 | * @n: napi context | |
470 | * | |
471 | * Resume NAPI from being scheduled on this context. | |
472 | * Must be paired with napi_disable. | |
473 | */ | |
474 | static inline void napi_enable(struct napi_struct *n) | |
475 | { | |
476 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | |
477 | smp_mb__before_clear_bit(); | |
478 | clear_bit(NAPI_STATE_SCHED, &n->state); | |
479 | } | |
480 | ||
c264c3de SH |
481 | #ifdef CONFIG_SMP |
482 | /** | |
483 | * napi_synchronize - wait until NAPI is not running | |
484 | * @n: napi context | |
485 | * | |
486 | * Wait until NAPI is done being scheduled on this context. | |
487 | * Waits till any outstanding processing completes but | |
488 | * does not disable future activations. | |
489 | */ | |
490 | static inline void napi_synchronize(const struct napi_struct *n) | |
491 | { | |
492 | while (test_bit(NAPI_STATE_SCHED, &n->state)) | |
493 | msleep(1); | |
494 | } | |
495 | #else | |
496 | # define napi_synchronize(n) barrier() | |
497 | #endif | |
498 | ||
d94d9fee | 499 | enum netdev_queue_state_t { |
79d16385 | 500 | __QUEUE_STATE_XOFF, |
c3f26a26 | 501 | __QUEUE_STATE_FROZEN, |
79d16385 DM |
502 | }; |
503 | ||
bb949fbd | 504 | struct netdev_queue { |
6a321cb3 ED |
505 | /* |
506 | * read mostly part | |
507 | */ | |
bb949fbd | 508 | struct net_device *dev; |
b0e1e646 | 509 | struct Qdisc *qdisc; |
79d16385 | 510 | unsigned long state; |
b0e1e646 | 511 | struct Qdisc *qdisc_sleeping; |
6a321cb3 ED |
512 | /* |
513 | * write mostly part | |
514 | */ | |
515 | spinlock_t _xmit_lock ____cacheline_aligned_in_smp; | |
516 | int xmit_lock_owner; | |
9d21493b ED |
517 | /* |
518 | * please use this field instead of dev->trans_start | |
519 | */ | |
520 | unsigned long trans_start; | |
7004bf25 ED |
521 | unsigned long tx_bytes; |
522 | unsigned long tx_packets; | |
523 | unsigned long tx_dropped; | |
e8a0464c | 524 | } ____cacheline_aligned_in_smp; |
bb949fbd | 525 | |
d314774c SH |
526 | |
527 | /* | |
528 | * This structure defines the management hooks for network devices. | |
00829823 SH |
529 | * The following hooks can be defined; unless noted otherwise, they are |
530 | * optional and can be filled with a null pointer. | |
d314774c SH |
531 | * |
532 | * int (*ndo_init)(struct net_device *dev); | |
533 | * This function is called once when network device is registered. | |
534 | * The network device can use this to any late stage initializaton | |
535 | * or semantic validattion. It can fail with an error code which will | |
536 | * be propogated back to register_netdev | |
537 | * | |
538 | * void (*ndo_uninit)(struct net_device *dev); | |
539 | * This function is called when device is unregistered or when registration | |
540 | * fails. It is not called if init fails. | |
541 | * | |
542 | * int (*ndo_open)(struct net_device *dev); | |
543 | * This function is called when network device transistions to the up | |
544 | * state. | |
545 | * | |
546 | * int (*ndo_stop)(struct net_device *dev); | |
547 | * This function is called when network device transistions to the down | |
548 | * state. | |
549 | * | |
dc1f8bf6 SH |
550 | * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, |
551 | * struct net_device *dev); | |
00829823 | 552 | * Called when a packet needs to be transmitted. |
dc1f8bf6 SH |
553 | * Must return NETDEV_TX_OK , NETDEV_TX_BUSY. |
554 | * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) | |
00829823 SH |
555 | * Required can not be NULL. |
556 | * | |
557 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); | |
558 | * Called to decide which queue to when device supports multiple | |
559 | * transmit queues. | |
560 | * | |
d314774c SH |
561 | * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); |
562 | * This function is called to allow device receiver to make | |
563 | * changes to configuration when multicast or promiscious is enabled. | |
564 | * | |
565 | * void (*ndo_set_rx_mode)(struct net_device *dev); | |
566 | * This function is called device changes address list filtering. | |
567 | * | |
568 | * void (*ndo_set_multicast_list)(struct net_device *dev); | |
569 | * This function is called when the multicast address list changes. | |
570 | * | |
571 | * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); | |
572 | * This function is called when the Media Access Control address | |
37b607c5 | 573 | * needs to be changed. If this interface is not defined, the |
d314774c SH |
574 | * mac address can not be changed. |
575 | * | |
576 | * int (*ndo_validate_addr)(struct net_device *dev); | |
577 | * Test if Media Access Control address is valid for the device. | |
578 | * | |
579 | * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); | |
580 | * Called when a user request an ioctl which can't be handled by | |
581 | * the generic interface code. If not defined ioctl's return | |
582 | * not supported error code. | |
583 | * | |
584 | * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); | |
585 | * Used to set network devices bus interface parameters. This interface | |
586 | * is retained for legacy reason, new devices should use the bus | |
587 | * interface (PCI) for low level management. | |
588 | * | |
589 | * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); | |
590 | * Called when a user wants to change the Maximum Transfer Unit | |
591 | * of a device. If not defined, any request to change MTU will | |
592 | * will return an error. | |
593 | * | |
00829823 | 594 | * void (*ndo_tx_timeout)(struct net_device *dev); |
d314774c SH |
595 | * Callback uses when the transmitter has not made any progress |
596 | * for dev->watchdog ticks. | |
597 | * | |
d308e38f | 598 | * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); |
d314774c SH |
599 | * Called when a user wants to get the network device usage |
600 | * statistics. If not defined, the counters in dev->stats will | |
601 | * be used. | |
602 | * | |
603 | * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp); | |
604 | * If device support VLAN receive accleration | |
605 | * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called | |
606 | * when vlan groups for the device changes. Note: grp is NULL | |
607 | * if no vlan's groups are being used. | |
608 | * | |
609 | * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); | |
610 | * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) | |
611 | * this function is called when a VLAN id is registered. | |
612 | * | |
613 | * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); | |
614 | * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) | |
615 | * this function is called when a VLAN id is unregistered. | |
616 | * | |
617 | * void (*ndo_poll_controller)(struct net_device *dev); | |
618 | */ | |
47fd5b83 | 619 | #define HAVE_NET_DEVICE_OPS |
d314774c SH |
620 | struct net_device_ops { |
621 | int (*ndo_init)(struct net_device *dev); | |
622 | void (*ndo_uninit)(struct net_device *dev); | |
623 | int (*ndo_open)(struct net_device *dev); | |
624 | int (*ndo_stop)(struct net_device *dev); | |
dc1f8bf6 | 625 | netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, |
00829823 SH |
626 | struct net_device *dev); |
627 | u16 (*ndo_select_queue)(struct net_device *dev, | |
628 | struct sk_buff *skb); | |
d314774c SH |
629 | void (*ndo_change_rx_flags)(struct net_device *dev, |
630 | int flags); | |
d314774c | 631 | void (*ndo_set_rx_mode)(struct net_device *dev); |
d314774c | 632 | void (*ndo_set_multicast_list)(struct net_device *dev); |
d314774c SH |
633 | int (*ndo_set_mac_address)(struct net_device *dev, |
634 | void *addr); | |
d314774c | 635 | int (*ndo_validate_addr)(struct net_device *dev); |
d314774c SH |
636 | int (*ndo_do_ioctl)(struct net_device *dev, |
637 | struct ifreq *ifr, int cmd); | |
d314774c SH |
638 | int (*ndo_set_config)(struct net_device *dev, |
639 | struct ifmap *map); | |
00829823 SH |
640 | int (*ndo_change_mtu)(struct net_device *dev, |
641 | int new_mtu); | |
642 | int (*ndo_neigh_setup)(struct net_device *dev, | |
643 | struct neigh_parms *); | |
d314774c SH |
644 | void (*ndo_tx_timeout) (struct net_device *dev); |
645 | ||
646 | struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); | |
647 | ||
648 | void (*ndo_vlan_rx_register)(struct net_device *dev, | |
649 | struct vlan_group *grp); | |
650 | void (*ndo_vlan_rx_add_vid)(struct net_device *dev, | |
651 | unsigned short vid); | |
652 | void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, | |
653 | unsigned short vid); | |
654 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
d314774c SH |
655 | void (*ndo_poll_controller)(struct net_device *dev); |
656 | #endif | |
4d288d57 | 657 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
cb454399 YZ |
658 | int (*ndo_fcoe_enable)(struct net_device *dev); |
659 | int (*ndo_fcoe_disable)(struct net_device *dev); | |
4d288d57 YZ |
660 | int (*ndo_fcoe_ddp_setup)(struct net_device *dev, |
661 | u16 xid, | |
662 | struct scatterlist *sgl, | |
663 | unsigned int sgc); | |
664 | int (*ndo_fcoe_ddp_done)(struct net_device *dev, | |
665 | u16 xid); | |
df5c7945 YZ |
666 | #define NETDEV_FCOE_WWNN 0 |
667 | #define NETDEV_FCOE_WWPN 1 | |
668 | int (*ndo_fcoe_get_wwn)(struct net_device *dev, | |
669 | u64 *wwn, int type); | |
4d288d57 | 670 | #endif |
d314774c SH |
671 | }; |
672 | ||
1da177e4 LT |
673 | /* |
674 | * The DEVICE structure. | |
675 | * Actually, this whole structure is a big mistake. It mixes I/O | |
676 | * data with strictly "high-level" data, and it has to know about | |
677 | * almost every data structure used in the INET module. | |
678 | * | |
679 | * FIXME: cleanup struct net_device such that network protocol info | |
680 | * moves out. | |
681 | */ | |
682 | ||
d94d9fee | 683 | struct net_device { |
1da177e4 LT |
684 | |
685 | /* | |
686 | * This is the first field of the "visible" part of this structure | |
687 | * (i.e. as seen by users in the "Space.c" file). It is the name | |
688 | * the interface. | |
689 | */ | |
690 | char name[IFNAMSIZ]; | |
9356b8fc ED |
691 | /* device name hash chain */ |
692 | struct hlist_node name_hlist; | |
0b815a1a SH |
693 | /* snmp alias */ |
694 | char *ifalias; | |
1da177e4 LT |
695 | |
696 | /* | |
697 | * I/O specific fields | |
698 | * FIXME: Merge these and struct ifmap into one | |
699 | */ | |
700 | unsigned long mem_end; /* shared mem end */ | |
701 | unsigned long mem_start; /* shared mem start */ | |
702 | unsigned long base_addr; /* device I/O address */ | |
703 | unsigned int irq; /* device IRQ number */ | |
704 | ||
705 | /* | |
706 | * Some hardware also needs these fields, but they are not | |
707 | * part of the usual set specified in Space.c. | |
708 | */ | |
709 | ||
710 | unsigned char if_port; /* Selectable AUI, TP,..*/ | |
711 | unsigned char dma; /* DMA channel */ | |
712 | ||
713 | unsigned long state; | |
714 | ||
7562f876 | 715 | struct list_head dev_list; |
bea3348e | 716 | struct list_head napi_list; |
44a0873d | 717 | struct list_head unreg_list; |
1da177e4 | 718 | |
9356b8fc ED |
719 | /* Net device features */ |
720 | unsigned long features; | |
721 | #define NETIF_F_SG 1 /* Scatter/gather IO. */ | |
d212f87b | 722 | #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */ |
9356b8fc ED |
723 | #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ |
724 | #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ | |
d212f87b | 725 | #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */ |
9356b8fc ED |
726 | #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ |
727 | #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ | |
728 | #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ | |
729 | #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ | |
730 | #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ | |
731 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ | |
37c3185a | 732 | #define NETIF_F_GSO 2048 /* Enable software GSO. */ |
e24eb521 CB |
733 | #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ |
734 | /* do not use LLTX in new drivers */ | |
ce286d32 | 735 | #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ |
d565b0a1 | 736 | #define NETIF_F_GRO 16384 /* Generic receive offload */ |
3ae7c0b2 | 737 | #define NETIF_F_LRO 32768 /* large receive offload */ |
7967168c | 738 | |
8dc92f7e | 739 | /* the GSO_MASK reserves bits 16 through 23 */ |
01d5b2fc | 740 | #define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */ |
8dc92f7e | 741 | #define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */ |
bb2af4f5 | 742 | #define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ |
01d5b2fc | 743 | |
7967168c | 744 | /* Segmentation offload features */ |
289c79a4 | 745 | #define NETIF_F_GSO_SHIFT 16 |
43eb99c5 | 746 | #define NETIF_F_GSO_MASK 0x00ff0000 |
7967168c | 747 | #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) |
f83ef8c0 | 748 | #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) |
576a30eb | 749 | #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) |
f83ef8c0 HX |
750 | #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) |
751 | #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) | |
01d5b2fc | 752 | #define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT) |
9356b8fc | 753 | |
78eb8877 HX |
754 | /* List of features with software fallbacks. */ |
755 | #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) | |
756 | ||
d212f87b | 757 | |
8648b305 | 758 | #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) |
d212f87b SH |
759 | #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) |
760 | #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) | |
761 | #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) | |
8648b305 | 762 | |
b63365a2 HX |
763 | /* |
764 | * If one device supports one of these features, then enable them | |
765 | * for all in netdev_increment_features. | |
766 | */ | |
767 | #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ | |
d314774c | 768 | NETIF_F_SG | NETIF_F_HIGHDMA | \ |
b63365a2 HX |
769 | NETIF_F_FRAGLIST) |
770 | ||
1da177e4 LT |
771 | /* Interface index. Unique device identifier */ |
772 | int ifindex; | |
773 | int iflink; | |
774 | ||
c45d286e | 775 | struct net_device_stats stats; |
1da177e4 | 776 | |
b86e0280 | 777 | #ifdef CONFIG_WIRELESS_EXT |
1da177e4 LT |
778 | /* List of functions to handle Wireless Extensions (instead of ioctl). |
779 | * See <net/iw_handler.h> for details. Jean II */ | |
780 | const struct iw_handler_def * wireless_handlers; | |
781 | /* Instance data managed by the core of Wireless Extensions. */ | |
782 | struct iw_public_data * wireless_data; | |
b86e0280 | 783 | #endif |
d314774c SH |
784 | /* Management operations */ |
785 | const struct net_device_ops *netdev_ops; | |
76fd8593 | 786 | const struct ethtool_ops *ethtool_ops; |
1da177e4 | 787 | |
3b04ddde SH |
788 | /* Hardware header description */ |
789 | const struct header_ops *header_ops; | |
790 | ||
b00055aa | 791 | unsigned int flags; /* interface flags (a la BSD) */ |
1da177e4 LT |
792 | unsigned short gflags; |
793 | unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */ | |
794 | unsigned short padded; /* How much padding added by alloc_netdev() */ | |
795 | ||
b00055aa SR |
796 | unsigned char operstate; /* RFC2863 operstate */ |
797 | unsigned char link_mode; /* mapping policy to operstate */ | |
798 | ||
1da177e4 LT |
799 | unsigned mtu; /* interface MTU value */ |
800 | unsigned short type; /* interface hardware type */ | |
801 | unsigned short hard_header_len; /* hardware hdr length */ | |
1da177e4 | 802 | |
f5184d26 JB |
803 | /* extra head- and tailroom the hardware may need, but not in all cases |
804 | * can this be guaranteed, especially tailroom. Some cases also use | |
805 | * LL_MAX_HEADER instead to allocate the skb. | |
806 | */ | |
807 | unsigned short needed_headroom; | |
808 | unsigned short needed_tailroom; | |
809 | ||
1da177e4 LT |
810 | struct net_device *master; /* Pointer to master device of a group, |
811 | * which this device is member of. | |
812 | */ | |
813 | ||
814 | /* Interface address info. */ | |
a6f9a705 | 815 | unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ |
1da177e4 LT |
816 | unsigned char addr_len; /* hardware address length */ |
817 | unsigned short dev_id; /* for shared network cards */ | |
818 | ||
31278e71 JP |
819 | struct netdev_hw_addr_list uc; /* Secondary unicast |
820 | mac addresses */ | |
4417da66 | 821 | int uc_promisc; |
ccffad25 | 822 | spinlock_t addr_list_lock; |
3fba5a8b | 823 | struct dev_addr_list *mc_list; /* Multicast mac addresses */ |
1da177e4 | 824 | int mc_count; /* Number of installed mcasts */ |
9d45abe1 WC |
825 | unsigned int promiscuity; |
826 | unsigned int allmulti; | |
1da177e4 | 827 | |
1da177e4 LT |
828 | |
829 | /* Protocol specific pointers */ | |
830 | ||
91da11f8 LB |
831 | #ifdef CONFIG_NET_DSA |
832 | void *dsa_ptr; /* dsa specific data */ | |
833 | #endif | |
1da177e4 | 834 | void *atalk_ptr; /* AppleTalk link */ |
fe2918b0 | 835 | void *ip_ptr; /* IPv4 specific data */ |
1da177e4 LT |
836 | void *dn_ptr; /* DECnet specific data */ |
837 | void *ip6_ptr; /* IPv6 specific data */ | |
838 | void *ec_ptr; /* Econet specific data */ | |
839 | void *ax25_ptr; /* AX.25 specific data */ | |
704232c2 JB |
840 | struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data, |
841 | assign before registering */ | |
1da177e4 | 842 | |
9356b8fc ED |
843 | /* |
844 | * Cache line mostly used on receive path (including eth_type_trans()) | |
845 | */ | |
9356b8fc ED |
846 | unsigned long last_rx; /* Time of last Rx */ |
847 | /* Interface address info used in eth_type_trans() */ | |
f001fde5 JP |
848 | unsigned char *dev_addr; /* hw address, (before bcast |
849 | because most packets are | |
850 | unicast) */ | |
851 | ||
31278e71 JP |
852 | struct netdev_hw_addr_list dev_addrs; /* list of device |
853 | hw addresses */ | |
9356b8fc ED |
854 | |
855 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ | |
1da177e4 | 856 | |
bb949fbd | 857 | struct netdev_queue rx_queue; |
e8a0464c DM |
858 | |
859 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; | |
fd2ea0a7 DM |
860 | |
861 | /* Number of TX queues allocated at alloc_netdev_mq() time */ | |
e8a0464c | 862 | unsigned int num_tx_queues; |
fd2ea0a7 DM |
863 | |
864 | /* Number of TX queues currently active in device */ | |
865 | unsigned int real_num_tx_queues; | |
866 | ||
af356afa PM |
867 | /* root qdisc from userspace point of view */ |
868 | struct Qdisc *qdisc; | |
869 | ||
1da177e4 | 870 | unsigned long tx_queue_len; /* Max frames per queue allowed */ |
c3f26a26 | 871 | spinlock_t tx_global_lock; |
9356b8fc ED |
872 | /* |
873 | * One part is mostly used on xmit path (device) | |
874 | */ | |
9356b8fc | 875 | /* These may be needed for future network-power-down code. */ |
9d21493b ED |
876 | |
877 | /* | |
878 | * trans_start here is expensive for high speed devices on SMP, | |
879 | * please use netdev_queue->trans_start instead. | |
880 | */ | |
9356b8fc ED |
881 | unsigned long trans_start; /* Time (in jiffies) of last Tx */ |
882 | ||
883 | int watchdog_timeo; /* used by dev_watchdog() */ | |
884 | struct timer_list watchdog_timer; | |
885 | ||
1da177e4 | 886 | /* Number of references to this device */ |
9356b8fc ED |
887 | atomic_t refcnt ____cacheline_aligned_in_smp; |
888 | ||
1da177e4 LT |
889 | /* delayed register/unregister */ |
890 | struct list_head todo_list; | |
1da177e4 LT |
891 | /* device index hash chain */ |
892 | struct hlist_node index_hlist; | |
893 | ||
e014debe | 894 | struct list_head link_watch_list; |
572a103d | 895 | |
1da177e4 LT |
896 | /* register/unregister state machine */ |
897 | enum { NETREG_UNINITIALIZED=0, | |
b17a7c17 | 898 | NETREG_REGISTERED, /* completed register_netdevice */ |
1da177e4 LT |
899 | NETREG_UNREGISTERING, /* called unregister_netdevice */ |
900 | NETREG_UNREGISTERED, /* completed unregister todo */ | |
901 | NETREG_RELEASED, /* called free_netdev */ | |
937f1ba5 | 902 | NETREG_DUMMY, /* dummy device for NAPI poll */ |
1da177e4 LT |
903 | } reg_state; |
904 | ||
d314774c SH |
905 | /* Called from unregister, can be used to call free_netdev */ |
906 | void (*destructor)(struct net_device *dev); | |
1da177e4 | 907 | |
1da177e4 | 908 | #ifdef CONFIG_NETPOLL |
115c1d6e | 909 | struct netpoll_info *npinfo; |
1da177e4 | 910 | #endif |
eae792b7 | 911 | |
c346dca1 | 912 | #ifdef CONFIG_NET_NS |
4a1c5371 EB |
913 | /* Network namespace this network device is inside */ |
914 | struct net *nd_net; | |
c346dca1 | 915 | #endif |
4a1c5371 | 916 | |
4951704b DM |
917 | /* mid-layer private */ |
918 | void *ml_priv; | |
919 | ||
1da177e4 LT |
920 | /* bridge stuff */ |
921 | struct net_bridge_port *br_port; | |
b863ceb7 PM |
922 | /* macvlan */ |
923 | struct macvlan_port *macvlan_port; | |
eca9ebac PM |
924 | /* GARP */ |
925 | struct garp_port *garp_port; | |
1da177e4 | 926 | |
1da177e4 | 927 | /* class/net/name entry */ |
43cb76d9 | 928 | struct device dev; |
0c509a6c EB |
929 | /* space for optional device, statistics, and wireless sysfs groups */ |
930 | const struct attribute_group *sysfs_groups[4]; | |
38f7b870 PM |
931 | |
932 | /* rtnetlink link ops */ | |
933 | const struct rtnl_link_ops *rtnl_link_ops; | |
f25f4e44 | 934 | |
289c79a4 PM |
935 | /* VLAN feature mask */ |
936 | unsigned long vlan_features; | |
937 | ||
82cc1a7a PWJ |
938 | /* for setting kernel sock attribute on TCP connection setup */ |
939 | #define GSO_MAX_SIZE 65536 | |
940 | unsigned int gso_max_size; | |
d314774c | 941 | |
7a6b6f51 | 942 | #ifdef CONFIG_DCB |
2f90b865 | 943 | /* Data Center Bridging netlink ops */ |
32953543 | 944 | const struct dcbnl_rtnl_ops *dcbnl_ops; |
2f90b865 AD |
945 | #endif |
946 | ||
4d288d57 YZ |
947 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
948 | /* max exchange id for FCoE LRO by ddp */ | |
949 | unsigned int fcoe_ddp_xid; | |
950 | #endif | |
1da177e4 | 951 | }; |
43cb76d9 | 952 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
1da177e4 LT |
953 | |
954 | #define NETDEV_ALIGN 32 | |
1da177e4 | 955 | |
e8a0464c DM |
956 | static inline |
957 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, | |
958 | unsigned int index) | |
959 | { | |
960 | return &dev->_tx[index]; | |
961 | } | |
962 | ||
963 | static inline void netdev_for_each_tx_queue(struct net_device *dev, | |
964 | void (*f)(struct net_device *, | |
965 | struct netdev_queue *, | |
966 | void *), | |
967 | void *arg) | |
968 | { | |
969 | unsigned int i; | |
970 | ||
971 | for (i = 0; i < dev->num_tx_queues; i++) | |
972 | f(dev, &dev->_tx[i], arg); | |
973 | } | |
974 | ||
c346dca1 YH |
975 | /* |
976 | * Net namespace inlines | |
977 | */ | |
978 | static inline | |
979 | struct net *dev_net(const struct net_device *dev) | |
980 | { | |
981 | #ifdef CONFIG_NET_NS | |
982 | return dev->nd_net; | |
983 | #else | |
984 | return &init_net; | |
985 | #endif | |
986 | } | |
987 | ||
988 | static inline | |
f5aa23fd | 989 | void dev_net_set(struct net_device *dev, struct net *net) |
c346dca1 YH |
990 | { |
991 | #ifdef CONFIG_NET_NS | |
f3005d7f DL |
992 | release_net(dev->nd_net); |
993 | dev->nd_net = hold_net(net); | |
c346dca1 YH |
994 | #endif |
995 | } | |
996 | ||
cf85d08f LB |
997 | static inline bool netdev_uses_dsa_tags(struct net_device *dev) |
998 | { | |
999 | #ifdef CONFIG_NET_DSA_TAG_DSA | |
1000 | if (dev->dsa_ptr != NULL) | |
1001 | return dsa_uses_dsa_tags(dev->dsa_ptr); | |
1002 | #endif | |
1003 | ||
1004 | return 0; | |
1005 | } | |
1006 | ||
8a83a00b AB |
1007 | #ifndef CONFIG_NET_NS |
1008 | static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev) | |
1009 | { | |
1010 | skb->dev = dev; | |
1011 | } | |
1012 | #else /* CONFIG_NET_NS */ | |
1013 | void skb_set_dev(struct sk_buff *skb, struct net_device *dev); | |
1014 | #endif | |
1015 | ||
396138f0 LB |
1016 | static inline bool netdev_uses_trailer_tags(struct net_device *dev) |
1017 | { | |
1018 | #ifdef CONFIG_NET_DSA_TAG_TRAILER | |
1019 | if (dev->dsa_ptr != NULL) | |
1020 | return dsa_uses_trailer_tags(dev->dsa_ptr); | |
1021 | #endif | |
1022 | ||
1023 | return 0; | |
1024 | } | |
1025 | ||
bea3348e SH |
1026 | /** |
1027 | * netdev_priv - access network device private data | |
1028 | * @dev: network device | |
1029 | * | |
1030 | * Get network device private data | |
1031 | */ | |
6472ce60 | 1032 | static inline void *netdev_priv(const struct net_device *dev) |
1da177e4 | 1033 | { |
1ce8e7b5 | 1034 | return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); |
1da177e4 LT |
1035 | } |
1036 | ||
1da177e4 LT |
1037 | /* Set the sysfs physical device reference for the network logical device |
1038 | * if set prior to registration will cause a symlink during initialization. | |
1039 | */ | |
43cb76d9 | 1040 | #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) |
1da177e4 | 1041 | |
384912ed MH |
1042 | /* Set the sysfs device type for the network logical device to allow |
1043 | * fin grained indentification of different network device types. For | |
1044 | * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. | |
1045 | */ | |
1046 | #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) | |
1047 | ||
3b582cc1 SH |
1048 | /** |
1049 | * netif_napi_add - initialize a napi context | |
1050 | * @dev: network device | |
1051 | * @napi: napi context | |
1052 | * @poll: polling function | |
1053 | * @weight: default weight | |
1054 | * | |
1055 | * netif_napi_add() must be used to initialize a napi context prior to calling | |
1056 | * *any* of the other napi related functions. | |
1057 | */ | |
d565b0a1 HX |
1058 | void netif_napi_add(struct net_device *dev, struct napi_struct *napi, |
1059 | int (*poll)(struct napi_struct *, int), int weight); | |
bea3348e | 1060 | |
d8156534 AD |
1061 | /** |
1062 | * netif_napi_del - remove a napi context | |
1063 | * @napi: napi context | |
1064 | * | |
1065 | * netif_napi_del() removes a napi context from the network device napi list | |
1066 | */ | |
d565b0a1 HX |
1067 | void netif_napi_del(struct napi_struct *napi); |
1068 | ||
1069 | struct napi_gro_cb { | |
78a478d0 HX |
1070 | /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ |
1071 | void *frag0; | |
1072 | ||
7489594c HX |
1073 | /* Length of frag0. */ |
1074 | unsigned int frag0_len; | |
1075 | ||
86911732 HX |
1076 | /* This indicates where we are processing relative to skb->data. */ |
1077 | int data_offset; | |
1078 | ||
d565b0a1 HX |
1079 | /* This is non-zero if the packet may be of the same flow. */ |
1080 | int same_flow; | |
1081 | ||
1082 | /* This is non-zero if the packet cannot be merged with the new skb. */ | |
1083 | int flush; | |
1084 | ||
1085 | /* Number of segments aggregated. */ | |
1086 | int count; | |
5d38a079 HX |
1087 | |
1088 | /* Free the skb? */ | |
1089 | int free; | |
d565b0a1 HX |
1090 | }; |
1091 | ||
1092 | #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) | |
d8156534 | 1093 | |
1da177e4 | 1094 | struct packet_type { |
f2ccd8fa DM |
1095 | __be16 type; /* This is really htons(ether_type). */ |
1096 | struct net_device *dev; /* NULL is wildcarded here */ | |
1097 | int (*func) (struct sk_buff *, | |
1098 | struct net_device *, | |
1099 | struct packet_type *, | |
1100 | struct net_device *); | |
576a30eb HX |
1101 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
1102 | int features); | |
a430a43d | 1103 | int (*gso_send_check)(struct sk_buff *skb); |
d565b0a1 HX |
1104 | struct sk_buff **(*gro_receive)(struct sk_buff **head, |
1105 | struct sk_buff *skb); | |
1106 | int (*gro_complete)(struct sk_buff *skb); | |
1da177e4 LT |
1107 | void *af_packet_priv; |
1108 | struct list_head list; | |
1109 | }; | |
1110 | ||
1111 | #include <linux/interrupt.h> | |
1112 | #include <linux/notifier.h> | |
1113 | ||
1da177e4 LT |
1114 | extern rwlock_t dev_base_lock; /* Device list lock */ |
1115 | ||
7562f876 | 1116 | |
881d966b EB |
1117 | #define for_each_netdev(net, d) \ |
1118 | list_for_each_entry(d, &(net)->dev_base_head, dev_list) | |
dcbccbd4 EB |
1119 | #define for_each_netdev_reverse(net, d) \ |
1120 | list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) | |
c6d14c84 ED |
1121 | #define for_each_netdev_rcu(net, d) \ |
1122 | list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) | |
881d966b EB |
1123 | #define for_each_netdev_safe(net, d, n) \ |
1124 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) | |
1125 | #define for_each_netdev_continue(net, d) \ | |
1126 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) | |
254245d2 | 1127 | #define for_each_netdev_continue_rcu(net, d) \ |
1128 | list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) | |
881d966b | 1129 | #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) |
7562f876 | 1130 | |
a050c33f DL |
1131 | static inline struct net_device *next_net_device(struct net_device *dev) |
1132 | { | |
1133 | struct list_head *lh; | |
1134 | struct net *net; | |
1135 | ||
c346dca1 | 1136 | net = dev_net(dev); |
a050c33f DL |
1137 | lh = dev->dev_list.next; |
1138 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | |
1139 | } | |
1140 | ||
ce81b76a ED |
1141 | static inline struct net_device *next_net_device_rcu(struct net_device *dev) |
1142 | { | |
1143 | struct list_head *lh; | |
1144 | struct net *net; | |
1145 | ||
1146 | net = dev_net(dev); | |
1147 | lh = rcu_dereference(dev->dev_list.next); | |
1148 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | |
1149 | } | |
1150 | ||
a050c33f DL |
1151 | static inline struct net_device *first_net_device(struct net *net) |
1152 | { | |
1153 | return list_empty(&net->dev_base_head) ? NULL : | |
1154 | net_device_entry(net->dev_base_head.next); | |
1155 | } | |
7562f876 | 1156 | |
1da177e4 LT |
1157 | extern int netdev_boot_setup_check(struct net_device *dev); |
1158 | extern unsigned long netdev_boot_base(const char *prefix, int unit); | |
881d966b EB |
1159 | extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr); |
1160 | extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); | |
1161 | extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); | |
1da177e4 LT |
1162 | extern void dev_add_pack(struct packet_type *pt); |
1163 | extern void dev_remove_pack(struct packet_type *pt); | |
1164 | extern void __dev_remove_pack(struct packet_type *pt); | |
1165 | ||
881d966b | 1166 | extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags, |
1da177e4 | 1167 | unsigned short mask); |
881d966b | 1168 | extern struct net_device *dev_get_by_name(struct net *net, const char *name); |
72c9528b | 1169 | extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); |
881d966b | 1170 | extern struct net_device *__dev_get_by_name(struct net *net, const char *name); |
1da177e4 LT |
1171 | extern int dev_alloc_name(struct net_device *dev, const char *name); |
1172 | extern int dev_open(struct net_device *dev); | |
1173 | extern int dev_close(struct net_device *dev); | |
0187bdfb | 1174 | extern void dev_disable_lro(struct net_device *dev); |
1da177e4 LT |
1175 | extern int dev_queue_xmit(struct sk_buff *skb); |
1176 | extern int register_netdevice(struct net_device *dev); | |
44a0873d ED |
1177 | extern void unregister_netdevice_queue(struct net_device *dev, |
1178 | struct list_head *head); | |
9b5e383c | 1179 | extern void unregister_netdevice_many(struct list_head *head); |
44a0873d ED |
1180 | static inline void unregister_netdevice(struct net_device *dev) |
1181 | { | |
1182 | unregister_netdevice_queue(dev, NULL); | |
1183 | } | |
1184 | ||
1da177e4 LT |
1185 | extern void free_netdev(struct net_device *dev); |
1186 | extern void synchronize_net(void); | |
1187 | extern int register_netdevice_notifier(struct notifier_block *nb); | |
1188 | extern int unregister_netdevice_notifier(struct notifier_block *nb); | |
937f1ba5 | 1189 | extern int init_dummy_netdev(struct net_device *dev); |
9d40bbda | 1190 | extern void netdev_resync_ops(struct net_device *dev); |
937f1ba5 | 1191 | |
ad7379d4 | 1192 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); |
881d966b EB |
1193 | extern struct net_device *dev_get_by_index(struct net *net, int ifindex); |
1194 | extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); | |
fb699dfd | 1195 | extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); |
1da177e4 LT |
1196 | extern int dev_restart(struct net_device *dev); |
1197 | #ifdef CONFIG_NETPOLL_TRAP | |
1198 | extern int netpoll_trap(void); | |
1199 | #endif | |
86911732 HX |
1200 | extern int skb_gro_receive(struct sk_buff **head, |
1201 | struct sk_buff *skb); | |
78a478d0 | 1202 | extern void skb_gro_reset_offset(struct sk_buff *skb); |
86911732 HX |
1203 | |
1204 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) | |
1205 | { | |
1206 | return NAPI_GRO_CB(skb)->data_offset; | |
1207 | } | |
1208 | ||
1209 | static inline unsigned int skb_gro_len(const struct sk_buff *skb) | |
1210 | { | |
1211 | return skb->len - NAPI_GRO_CB(skb)->data_offset; | |
1212 | } | |
1213 | ||
1214 | static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) | |
1215 | { | |
1216 | NAPI_GRO_CB(skb)->data_offset += len; | |
1217 | } | |
1218 | ||
a5b1cf28 HX |
1219 | static inline void *skb_gro_header_fast(struct sk_buff *skb, |
1220 | unsigned int offset) | |
86911732 | 1221 | { |
a5b1cf28 HX |
1222 | return NAPI_GRO_CB(skb)->frag0 + offset; |
1223 | } | |
78a478d0 | 1224 | |
a5b1cf28 HX |
1225 | static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) |
1226 | { | |
1227 | return NAPI_GRO_CB(skb)->frag0_len < hlen; | |
1228 | } | |
78a478d0 | 1229 | |
a5b1cf28 HX |
1230 | static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, |
1231 | unsigned int offset) | |
1232 | { | |
1233 | NAPI_GRO_CB(skb)->frag0 = NULL; | |
1234 | NAPI_GRO_CB(skb)->frag0_len = 0; | |
1235 | return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL; | |
86911732 | 1236 | } |
1da177e4 | 1237 | |
aa4b9f53 HX |
1238 | static inline void *skb_gro_mac_header(struct sk_buff *skb) |
1239 | { | |
78d3fd0b | 1240 | return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb); |
aa4b9f53 HX |
1241 | } |
1242 | ||
36e7b1b8 HX |
1243 | static inline void *skb_gro_network_header(struct sk_buff *skb) |
1244 | { | |
78d3fd0b HX |
1245 | return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + |
1246 | skb_network_offset(skb); | |
36e7b1b8 HX |
1247 | } |
1248 | ||
0c4e8581 SH |
1249 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
1250 | unsigned short type, | |
3b04ddde SH |
1251 | const void *daddr, const void *saddr, |
1252 | unsigned len) | |
0c4e8581 | 1253 | { |
f1ecfd5d | 1254 | if (!dev->header_ops || !dev->header_ops->create) |
0c4e8581 | 1255 | return 0; |
3b04ddde SH |
1256 | |
1257 | return dev->header_ops->create(skb, dev, type, daddr, saddr, len); | |
0c4e8581 SH |
1258 | } |
1259 | ||
b95cce35 SH |
1260 | static inline int dev_parse_header(const struct sk_buff *skb, |
1261 | unsigned char *haddr) | |
1262 | { | |
1263 | const struct net_device *dev = skb->dev; | |
1264 | ||
1b83336b | 1265 | if (!dev->header_ops || !dev->header_ops->parse) |
b95cce35 | 1266 | return 0; |
3b04ddde | 1267 | return dev->header_ops->parse(skb, haddr); |
b95cce35 SH |
1268 | } |
1269 | ||
1da177e4 LT |
1270 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); |
1271 | extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); | |
1272 | static inline int unregister_gifconf(unsigned int family) | |
1273 | { | |
1274 | return register_gifconf(family, NULL); | |
1275 | } | |
1276 | ||
1277 | /* | |
1278 | * Incoming packets are placed on per-cpu queues so that | |
1279 | * no locking is needed. | |
1280 | */ | |
d94d9fee | 1281 | struct softnet_data { |
37437bb2 | 1282 | struct Qdisc *output_queue; |
1da177e4 LT |
1283 | struct sk_buff_head input_pkt_queue; |
1284 | struct list_head poll_list; | |
1da177e4 LT |
1285 | struct sk_buff *completion_queue; |
1286 | ||
bea3348e | 1287 | struct napi_struct backlog; |
1da177e4 LT |
1288 | }; |
1289 | ||
1290 | DECLARE_PER_CPU(struct softnet_data,softnet_data); | |
1291 | ||
1292 | #define HAVE_NETIF_QUEUE | |
1293 | ||
37437bb2 | 1294 | extern void __netif_schedule(struct Qdisc *q); |
1da177e4 | 1295 | |
86d804e1 | 1296 | static inline void netif_schedule_queue(struct netdev_queue *txq) |
1da177e4 | 1297 | { |
79d16385 | 1298 | if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) |
37437bb2 | 1299 | __netif_schedule(txq->qdisc); |
86d804e1 DM |
1300 | } |
1301 | ||
fd2ea0a7 DM |
1302 | static inline void netif_tx_schedule_all(struct net_device *dev) |
1303 | { | |
1304 | unsigned int i; | |
1305 | ||
1306 | for (i = 0; i < dev->num_tx_queues; i++) | |
1307 | netif_schedule_queue(netdev_get_tx_queue(dev, i)); | |
1308 | } | |
1309 | ||
d29f749e DJ |
1310 | static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) |
1311 | { | |
1312 | clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | |
1313 | } | |
1314 | ||
bea3348e SH |
1315 | /** |
1316 | * netif_start_queue - allow transmit | |
1317 | * @dev: network device | |
1318 | * | |
1319 | * Allow upper layers to call the device hard_start_xmit routine. | |
1320 | */ | |
1da177e4 LT |
1321 | static inline void netif_start_queue(struct net_device *dev) |
1322 | { | |
e8a0464c | 1323 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
1324 | } |
1325 | ||
fd2ea0a7 DM |
1326 | static inline void netif_tx_start_all_queues(struct net_device *dev) |
1327 | { | |
1328 | unsigned int i; | |
1329 | ||
1330 | for (i = 0; i < dev->num_tx_queues; i++) { | |
1331 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
1332 | netif_tx_start_queue(txq); | |
1333 | } | |
1334 | } | |
1335 | ||
79d16385 | 1336 | static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) |
1da177e4 LT |
1337 | { |
1338 | #ifdef CONFIG_NETPOLL_TRAP | |
5f286e11 | 1339 | if (netpoll_trap()) { |
7b3d3e4f | 1340 | netif_tx_start_queue(dev_queue); |
1da177e4 | 1341 | return; |
5f286e11 | 1342 | } |
1da177e4 | 1343 | #endif |
79d16385 | 1344 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) |
37437bb2 | 1345 | __netif_schedule(dev_queue->qdisc); |
79d16385 DM |
1346 | } |
1347 | ||
d29f749e DJ |
1348 | /** |
1349 | * netif_wake_queue - restart transmit | |
1350 | * @dev: network device | |
1351 | * | |
1352 | * Allow upper layers to call the device hard_start_xmit routine. | |
1353 | * Used for flow control when transmit resources are available. | |
1354 | */ | |
79d16385 DM |
1355 | static inline void netif_wake_queue(struct net_device *dev) |
1356 | { | |
e8a0464c | 1357 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
1358 | } |
1359 | ||
fd2ea0a7 DM |
1360 | static inline void netif_tx_wake_all_queues(struct net_device *dev) |
1361 | { | |
1362 | unsigned int i; | |
1363 | ||
1364 | for (i = 0; i < dev->num_tx_queues; i++) { | |
1365 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
1366 | netif_tx_wake_queue(txq); | |
1367 | } | |
1368 | } | |
1369 | ||
d29f749e DJ |
1370 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) |
1371 | { | |
1372 | set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | |
1373 | } | |
1374 | ||
bea3348e SH |
1375 | /** |
1376 | * netif_stop_queue - stop transmitted packets | |
1377 | * @dev: network device | |
1378 | * | |
1379 | * Stop upper layers calling the device hard_start_xmit routine. | |
1380 | * Used for flow control when transmit resources are unavailable. | |
1381 | */ | |
1da177e4 LT |
1382 | static inline void netif_stop_queue(struct net_device *dev) |
1383 | { | |
e8a0464c | 1384 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
1385 | } |
1386 | ||
fd2ea0a7 DM |
1387 | static inline void netif_tx_stop_all_queues(struct net_device *dev) |
1388 | { | |
1389 | unsigned int i; | |
1390 | ||
1391 | for (i = 0; i < dev->num_tx_queues; i++) { | |
1392 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
1393 | netif_tx_stop_queue(txq); | |
1394 | } | |
1395 | } | |
1396 | ||
d29f749e DJ |
1397 | static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) |
1398 | { | |
1399 | return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | |
1400 | } | |
1401 | ||
bea3348e SH |
1402 | /** |
1403 | * netif_queue_stopped - test if transmit queue is flowblocked | |
1404 | * @dev: network device | |
1405 | * | |
1406 | * Test if transmit queue on device is currently unable to send. | |
1407 | */ | |
1da177e4 LT |
1408 | static inline int netif_queue_stopped(const struct net_device *dev) |
1409 | { | |
e8a0464c | 1410 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
1411 | } |
1412 | ||
c3f26a26 DM |
1413 | static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue) |
1414 | { | |
1415 | return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state); | |
1416 | } | |
1417 | ||
bea3348e SH |
1418 | /** |
1419 | * netif_running - test if up | |
1420 | * @dev: network device | |
1421 | * | |
1422 | * Test if the device has been brought up. | |
1423 | */ | |
1da177e4 LT |
1424 | static inline int netif_running(const struct net_device *dev) |
1425 | { | |
1426 | return test_bit(__LINK_STATE_START, &dev->state); | |
1427 | } | |
1428 | ||
f25f4e44 PWJ |
1429 | /* |
1430 | * Routines to manage the subqueues on a device. We only need start | |
1431 | * stop, and a check if it's stopped. All other device management is | |
1432 | * done at the overall netdevice level. | |
1433 | * Also test the device if we're multiqueue. | |
1434 | */ | |
bea3348e SH |
1435 | |
1436 | /** | |
1437 | * netif_start_subqueue - allow sending packets on subqueue | |
1438 | * @dev: network device | |
1439 | * @queue_index: sub queue index | |
1440 | * | |
1441 | * Start individual transmit queue of a device with multiple transmit queues. | |
1442 | */ | |
f25f4e44 PWJ |
1443 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
1444 | { | |
fd2ea0a7 | 1445 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
7b3d3e4f KK |
1446 | |
1447 | netif_tx_start_queue(txq); | |
f25f4e44 PWJ |
1448 | } |
1449 | ||
bea3348e SH |
1450 | /** |
1451 | * netif_stop_subqueue - stop sending packets on subqueue | |
1452 | * @dev: network device | |
1453 | * @queue_index: sub queue index | |
1454 | * | |
1455 | * Stop individual transmit queue of a device with multiple transmit queues. | |
1456 | */ | |
f25f4e44 PWJ |
1457 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) |
1458 | { | |
fd2ea0a7 | 1459 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
f25f4e44 PWJ |
1460 | #ifdef CONFIG_NETPOLL_TRAP |
1461 | if (netpoll_trap()) | |
1462 | return; | |
1463 | #endif | |
7b3d3e4f | 1464 | netif_tx_stop_queue(txq); |
f25f4e44 PWJ |
1465 | } |
1466 | ||
bea3348e SH |
1467 | /** |
1468 | * netif_subqueue_stopped - test status of subqueue | |
1469 | * @dev: network device | |
1470 | * @queue_index: sub queue index | |
1471 | * | |
1472 | * Check individual transmit queue of a device with multiple transmit queues. | |
1473 | */ | |
668f895a | 1474 | static inline int __netif_subqueue_stopped(const struct net_device *dev, |
f25f4e44 PWJ |
1475 | u16 queue_index) |
1476 | { | |
fd2ea0a7 | 1477 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
7b3d3e4f KK |
1478 | |
1479 | return netif_tx_queue_stopped(txq); | |
f25f4e44 PWJ |
1480 | } |
1481 | ||
668f895a PE |
1482 | static inline int netif_subqueue_stopped(const struct net_device *dev, |
1483 | struct sk_buff *skb) | |
1484 | { | |
1485 | return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); | |
1486 | } | |
bea3348e SH |
1487 | |
1488 | /** | |
1489 | * netif_wake_subqueue - allow sending packets on subqueue | |
1490 | * @dev: network device | |
1491 | * @queue_index: sub queue index | |
1492 | * | |
1493 | * Resume individual transmit queue of a device with multiple transmit queues. | |
1494 | */ | |
f25f4e44 PWJ |
1495 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) |
1496 | { | |
fd2ea0a7 | 1497 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
f25f4e44 PWJ |
1498 | #ifdef CONFIG_NETPOLL_TRAP |
1499 | if (netpoll_trap()) | |
1500 | return; | |
1501 | #endif | |
fd2ea0a7 | 1502 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) |
37437bb2 | 1503 | __netif_schedule(txq->qdisc); |
f25f4e44 PWJ |
1504 | } |
1505 | ||
bea3348e SH |
1506 | /** |
1507 | * netif_is_multiqueue - test if device has multiple transmit queues | |
1508 | * @dev: network device | |
1509 | * | |
1510 | * Check if device has multiple transmit queues | |
bea3348e | 1511 | */ |
f25f4e44 PWJ |
1512 | static inline int netif_is_multiqueue(const struct net_device *dev) |
1513 | { | |
09e83b5d | 1514 | return (dev->num_tx_queues > 1); |
f25f4e44 | 1515 | } |
1da177e4 LT |
1516 | |
1517 | /* Use this variant when it is known for sure that it | |
0ef47309 ML |
1518 | * is executing from hardware interrupt context or with hardware interrupts |
1519 | * disabled. | |
1da177e4 | 1520 | */ |
bea3348e | 1521 | extern void dev_kfree_skb_irq(struct sk_buff *skb); |
1da177e4 LT |
1522 | |
1523 | /* Use this variant in places where it could be invoked | |
0ef47309 ML |
1524 | * from either hardware interrupt or other context, with hardware interrupts |
1525 | * either disabled or enabled. | |
1da177e4 | 1526 | */ |
56079431 | 1527 | extern void dev_kfree_skb_any(struct sk_buff *skb); |
1da177e4 LT |
1528 | |
1529 | #define HAVE_NETIF_RX 1 | |
1530 | extern int netif_rx(struct sk_buff *skb); | |
1531 | extern int netif_rx_ni(struct sk_buff *skb); | |
1532 | #define HAVE_NETIF_RECEIVE_SKB 1 | |
1533 | extern int netif_receive_skb(struct sk_buff *skb); | |
5b252f0c | 1534 | extern gro_result_t dev_gro_receive(struct napi_struct *napi, |
96e93eab | 1535 | struct sk_buff *skb); |
c7c4b3b6 BH |
1536 | extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb); |
1537 | extern gro_result_t napi_gro_receive(struct napi_struct *napi, | |
d565b0a1 | 1538 | struct sk_buff *skb); |
96e93eab HX |
1539 | extern void napi_reuse_skb(struct napi_struct *napi, |
1540 | struct sk_buff *skb); | |
76620aaf | 1541 | extern struct sk_buff * napi_get_frags(struct napi_struct *napi); |
c7c4b3b6 | 1542 | extern gro_result_t napi_frags_finish(struct napi_struct *napi, |
5b252f0c BH |
1543 | struct sk_buff *skb, |
1544 | gro_result_t ret); | |
76620aaf | 1545 | extern struct sk_buff * napi_frags_skb(struct napi_struct *napi); |
c7c4b3b6 | 1546 | extern gro_result_t napi_gro_frags(struct napi_struct *napi); |
76620aaf HX |
1547 | |
1548 | static inline void napi_free_frags(struct napi_struct *napi) | |
1549 | { | |
1550 | kfree_skb(napi->skb); | |
1551 | napi->skb = NULL; | |
1552 | } | |
1553 | ||
bc1d0411 | 1554 | extern void netif_nit_deliver(struct sk_buff *skb); |
c2373ee9 | 1555 | extern int dev_valid_name(const char *name); |
881d966b EB |
1556 | extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); |
1557 | extern int dev_ethtool(struct net *net, struct ifreq *); | |
1da177e4 LT |
1558 | extern unsigned dev_get_flags(const struct net_device *); |
1559 | extern int dev_change_flags(struct net_device *, unsigned); | |
cf04a4c7 | 1560 | extern int dev_change_name(struct net_device *, const char *); |
0b815a1a | 1561 | extern int dev_set_alias(struct net_device *, const char *, size_t); |
ce286d32 EB |
1562 | extern int dev_change_net_namespace(struct net_device *, |
1563 | struct net *, const char *); | |
1da177e4 LT |
1564 | extern int dev_set_mtu(struct net_device *, int); |
1565 | extern int dev_set_mac_address(struct net_device *, | |
1566 | struct sockaddr *); | |
f6a78bfc | 1567 | extern int dev_hard_start_xmit(struct sk_buff *skb, |
fd2ea0a7 DM |
1568 | struct net_device *dev, |
1569 | struct netdev_queue *txq); | |
44540960 AB |
1570 | extern int dev_forward_skb(struct net_device *dev, |
1571 | struct sk_buff *skb); | |
1da177e4 | 1572 | |
20380731 | 1573 | extern int netdev_budget; |
1da177e4 LT |
1574 | |
1575 | /* Called by rtnetlink.c:rtnl_unlock() */ | |
1576 | extern void netdev_run_todo(void); | |
1577 | ||
bea3348e SH |
1578 | /** |
1579 | * dev_put - release reference to device | |
1580 | * @dev: network device | |
1581 | * | |
9ef4429b | 1582 | * Release reference to device to allow it to be freed. |
bea3348e | 1583 | */ |
1da177e4 LT |
1584 | static inline void dev_put(struct net_device *dev) |
1585 | { | |
1586 | atomic_dec(&dev->refcnt); | |
1587 | } | |
1588 | ||
bea3348e SH |
1589 | /** |
1590 | * dev_hold - get reference to device | |
1591 | * @dev: network device | |
1592 | * | |
9ef4429b | 1593 | * Hold reference to device to keep it from being freed. |
bea3348e | 1594 | */ |
15333061 SH |
1595 | static inline void dev_hold(struct net_device *dev) |
1596 | { | |
1597 | atomic_inc(&dev->refcnt); | |
1598 | } | |
1da177e4 LT |
1599 | |
1600 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on | |
1601 | * and _off may be called from IRQ context, but it is caller | |
1602 | * who is responsible for serialization of these calls. | |
b00055aa SR |
1603 | * |
1604 | * The name carrier is inappropriate, these functions should really be | |
1605 | * called netif_lowerlayer_*() because they represent the state of any | |
1606 | * kind of lower layer not just hardware media. | |
1da177e4 LT |
1607 | */ |
1608 | ||
1609 | extern void linkwatch_fire_event(struct net_device *dev); | |
e014debe | 1610 | extern void linkwatch_forget_dev(struct net_device *dev); |
1da177e4 | 1611 | |
bea3348e SH |
1612 | /** |
1613 | * netif_carrier_ok - test if carrier present | |
1614 | * @dev: network device | |
1615 | * | |
1616 | * Check if carrier is present on device | |
1617 | */ | |
1da177e4 LT |
1618 | static inline int netif_carrier_ok(const struct net_device *dev) |
1619 | { | |
1620 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); | |
1621 | } | |
1622 | ||
9d21493b ED |
1623 | extern unsigned long dev_trans_start(struct net_device *dev); |
1624 | ||
1da177e4 LT |
1625 | extern void __netdev_watchdog_up(struct net_device *dev); |
1626 | ||
0a242efc | 1627 | extern void netif_carrier_on(struct net_device *dev); |
1da177e4 | 1628 | |
0a242efc | 1629 | extern void netif_carrier_off(struct net_device *dev); |
1da177e4 | 1630 | |
bea3348e SH |
1631 | /** |
1632 | * netif_dormant_on - mark device as dormant. | |
1633 | * @dev: network device | |
1634 | * | |
1635 | * Mark device as dormant (as per RFC2863). | |
1636 | * | |
1637 | * The dormant state indicates that the relevant interface is not | |
1638 | * actually in a condition to pass packets (i.e., it is not 'up') but is | |
1639 | * in a "pending" state, waiting for some external event. For "on- | |
1640 | * demand" interfaces, this new state identifies the situation where the | |
1641 | * interface is waiting for events to place it in the up state. | |
1642 | * | |
1643 | */ | |
b00055aa SR |
1644 | static inline void netif_dormant_on(struct net_device *dev) |
1645 | { | |
1646 | if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) | |
1647 | linkwatch_fire_event(dev); | |
1648 | } | |
1649 | ||
bea3348e SH |
1650 | /** |
1651 | * netif_dormant_off - set device as not dormant. | |
1652 | * @dev: network device | |
1653 | * | |
1654 | * Device is not in dormant state. | |
1655 | */ | |
b00055aa SR |
1656 | static inline void netif_dormant_off(struct net_device *dev) |
1657 | { | |
1658 | if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) | |
1659 | linkwatch_fire_event(dev); | |
1660 | } | |
1661 | ||
bea3348e SH |
1662 | /** |
1663 | * netif_dormant - test if carrier present | |
1664 | * @dev: network device | |
1665 | * | |
1666 | * Check if carrier is present on device | |
1667 | */ | |
b00055aa SR |
1668 | static inline int netif_dormant(const struct net_device *dev) |
1669 | { | |
1670 | return test_bit(__LINK_STATE_DORMANT, &dev->state); | |
1671 | } | |
1672 | ||
1673 | ||
bea3348e SH |
1674 | /** |
1675 | * netif_oper_up - test if device is operational | |
1676 | * @dev: network device | |
1677 | * | |
1678 | * Check if carrier is operational | |
1679 | */ | |
d94d9fee ED |
1680 | static inline int netif_oper_up(const struct net_device *dev) |
1681 | { | |
b00055aa SR |
1682 | return (dev->operstate == IF_OPER_UP || |
1683 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); | |
1684 | } | |
1685 | ||
bea3348e SH |
1686 | /** |
1687 | * netif_device_present - is device available or removed | |
1688 | * @dev: network device | |
1689 | * | |
1690 | * Check if device has not been removed from system. | |
1691 | */ | |
1da177e4 LT |
1692 | static inline int netif_device_present(struct net_device *dev) |
1693 | { | |
1694 | return test_bit(__LINK_STATE_PRESENT, &dev->state); | |
1695 | } | |
1696 | ||
56079431 | 1697 | extern void netif_device_detach(struct net_device *dev); |
1da177e4 | 1698 | |
56079431 | 1699 | extern void netif_device_attach(struct net_device *dev); |
1da177e4 LT |
1700 | |
1701 | /* | |
1702 | * Network interface message level settings | |
1703 | */ | |
1704 | #define HAVE_NETIF_MSG 1 | |
1705 | ||
1706 | enum { | |
1707 | NETIF_MSG_DRV = 0x0001, | |
1708 | NETIF_MSG_PROBE = 0x0002, | |
1709 | NETIF_MSG_LINK = 0x0004, | |
1710 | NETIF_MSG_TIMER = 0x0008, | |
1711 | NETIF_MSG_IFDOWN = 0x0010, | |
1712 | NETIF_MSG_IFUP = 0x0020, | |
1713 | NETIF_MSG_RX_ERR = 0x0040, | |
1714 | NETIF_MSG_TX_ERR = 0x0080, | |
1715 | NETIF_MSG_TX_QUEUED = 0x0100, | |
1716 | NETIF_MSG_INTR = 0x0200, | |
1717 | NETIF_MSG_TX_DONE = 0x0400, | |
1718 | NETIF_MSG_RX_STATUS = 0x0800, | |
1719 | NETIF_MSG_PKTDATA = 0x1000, | |
1720 | NETIF_MSG_HW = 0x2000, | |
1721 | NETIF_MSG_WOL = 0x4000, | |
1722 | }; | |
1723 | ||
1724 | #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) | |
1725 | #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) | |
1726 | #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) | |
1727 | #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) | |
1728 | #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) | |
1729 | #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) | |
1730 | #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) | |
1731 | #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) | |
1732 | #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) | |
1733 | #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) | |
1734 | #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) | |
1735 | #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) | |
1736 | #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) | |
1737 | #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) | |
1738 | #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) | |
1739 | ||
1740 | static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |
1741 | { | |
1742 | /* use default */ | |
1743 | if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) | |
1744 | return default_msg_enable_bits; | |
1745 | if (debug_value == 0) /* no output */ | |
1746 | return 0; | |
1747 | /* set low N bits */ | |
1748 | return (1 << debug_value) - 1; | |
1749 | } | |
1750 | ||
c773e847 | 1751 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
932ff279 | 1752 | { |
c773e847 DM |
1753 | spin_lock(&txq->_xmit_lock); |
1754 | txq->xmit_lock_owner = cpu; | |
22dd7495 JHS |
1755 | } |
1756 | ||
fd2ea0a7 DM |
1757 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) |
1758 | { | |
1759 | spin_lock_bh(&txq->_xmit_lock); | |
1760 | txq->xmit_lock_owner = smp_processor_id(); | |
1761 | } | |
1762 | ||
c3f26a26 DM |
1763 | static inline int __netif_tx_trylock(struct netdev_queue *txq) |
1764 | { | |
1765 | int ok = spin_trylock(&txq->_xmit_lock); | |
1766 | if (likely(ok)) | |
1767 | txq->xmit_lock_owner = smp_processor_id(); | |
1768 | return ok; | |
1769 | } | |
1770 | ||
1771 | static inline void __netif_tx_unlock(struct netdev_queue *txq) | |
1772 | { | |
1773 | txq->xmit_lock_owner = -1; | |
1774 | spin_unlock(&txq->_xmit_lock); | |
1775 | } | |
1776 | ||
1777 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | |
1778 | { | |
1779 | txq->xmit_lock_owner = -1; | |
1780 | spin_unlock_bh(&txq->_xmit_lock); | |
1781 | } | |
1782 | ||
08baf561 ED |
1783 | static inline void txq_trans_update(struct netdev_queue *txq) |
1784 | { | |
1785 | if (txq->xmit_lock_owner != -1) | |
1786 | txq->trans_start = jiffies; | |
1787 | } | |
1788 | ||
d29f749e DJ |
1789 | /** |
1790 | * netif_tx_lock - grab network device transmit lock | |
1791 | * @dev: network device | |
d29f749e DJ |
1792 | * |
1793 | * Get network device transmit lock | |
1794 | */ | |
22dd7495 JHS |
1795 | static inline void netif_tx_lock(struct net_device *dev) |
1796 | { | |
e8a0464c | 1797 | unsigned int i; |
c3f26a26 | 1798 | int cpu; |
c773e847 | 1799 | |
c3f26a26 DM |
1800 | spin_lock(&dev->tx_global_lock); |
1801 | cpu = smp_processor_id(); | |
e8a0464c DM |
1802 | for (i = 0; i < dev->num_tx_queues; i++) { |
1803 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c3f26a26 DM |
1804 | |
1805 | /* We are the only thread of execution doing a | |
1806 | * freeze, but we have to grab the _xmit_lock in | |
1807 | * order to synchronize with threads which are in | |
1808 | * the ->hard_start_xmit() handler and already | |
1809 | * checked the frozen bit. | |
1810 | */ | |
e8a0464c | 1811 | __netif_tx_lock(txq, cpu); |
c3f26a26 DM |
1812 | set_bit(__QUEUE_STATE_FROZEN, &txq->state); |
1813 | __netif_tx_unlock(txq); | |
e8a0464c | 1814 | } |
932ff279 HX |
1815 | } |
1816 | ||
1817 | static inline void netif_tx_lock_bh(struct net_device *dev) | |
1818 | { | |
e8a0464c DM |
1819 | local_bh_disable(); |
1820 | netif_tx_lock(dev); | |
932ff279 HX |
1821 | } |
1822 | ||
932ff279 HX |
1823 | static inline void netif_tx_unlock(struct net_device *dev) |
1824 | { | |
e8a0464c DM |
1825 | unsigned int i; |
1826 | ||
1827 | for (i = 0; i < dev->num_tx_queues; i++) { | |
1828 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c773e847 | 1829 | |
c3f26a26 DM |
1830 | /* No need to grab the _xmit_lock here. If the |
1831 | * queue is not stopped for another reason, we | |
1832 | * force a schedule. | |
1833 | */ | |
1834 | clear_bit(__QUEUE_STATE_FROZEN, &txq->state); | |
7b3d3e4f | 1835 | netif_schedule_queue(txq); |
c3f26a26 DM |
1836 | } |
1837 | spin_unlock(&dev->tx_global_lock); | |
932ff279 HX |
1838 | } |
1839 | ||
1840 | static inline void netif_tx_unlock_bh(struct net_device *dev) | |
1841 | { | |
e8a0464c DM |
1842 | netif_tx_unlock(dev); |
1843 | local_bh_enable(); | |
932ff279 HX |
1844 | } |
1845 | ||
c773e847 | 1846 | #define HARD_TX_LOCK(dev, txq, cpu) { \ |
22dd7495 | 1847 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
c773e847 | 1848 | __netif_tx_lock(txq, cpu); \ |
22dd7495 JHS |
1849 | } \ |
1850 | } | |
1851 | ||
c773e847 | 1852 | #define HARD_TX_UNLOCK(dev, txq) { \ |
22dd7495 | 1853 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
c773e847 | 1854 | __netif_tx_unlock(txq); \ |
22dd7495 JHS |
1855 | } \ |
1856 | } | |
1857 | ||
1da177e4 LT |
1858 | static inline void netif_tx_disable(struct net_device *dev) |
1859 | { | |
fd2ea0a7 | 1860 | unsigned int i; |
c3f26a26 | 1861 | int cpu; |
fd2ea0a7 | 1862 | |
c3f26a26 DM |
1863 | local_bh_disable(); |
1864 | cpu = smp_processor_id(); | |
fd2ea0a7 DM |
1865 | for (i = 0; i < dev->num_tx_queues; i++) { |
1866 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c3f26a26 DM |
1867 | |
1868 | __netif_tx_lock(txq, cpu); | |
fd2ea0a7 | 1869 | netif_tx_stop_queue(txq); |
c3f26a26 | 1870 | __netif_tx_unlock(txq); |
fd2ea0a7 | 1871 | } |
c3f26a26 | 1872 | local_bh_enable(); |
1da177e4 LT |
1873 | } |
1874 | ||
e308a5d8 DM |
1875 | static inline void netif_addr_lock(struct net_device *dev) |
1876 | { | |
1877 | spin_lock(&dev->addr_list_lock); | |
1878 | } | |
1879 | ||
1880 | static inline void netif_addr_lock_bh(struct net_device *dev) | |
1881 | { | |
1882 | spin_lock_bh(&dev->addr_list_lock); | |
1883 | } | |
1884 | ||
1885 | static inline void netif_addr_unlock(struct net_device *dev) | |
1886 | { | |
1887 | spin_unlock(&dev->addr_list_lock); | |
1888 | } | |
1889 | ||
1890 | static inline void netif_addr_unlock_bh(struct net_device *dev) | |
1891 | { | |
1892 | spin_unlock_bh(&dev->addr_list_lock); | |
1893 | } | |
1894 | ||
f001fde5 | 1895 | /* |
31278e71 | 1896 | * dev_addrs walker. Should be used only for read access. Call with |
f001fde5 JP |
1897 | * rcu_read_lock held. |
1898 | */ | |
1899 | #define for_each_dev_addr(dev, ha) \ | |
31278e71 | 1900 | list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) |
f001fde5 | 1901 | |
1da177e4 LT |
1902 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ |
1903 | ||
1904 | extern void ether_setup(struct net_device *dev); | |
1905 | ||
1906 | /* Support for loadable net-drivers */ | |
f25f4e44 PWJ |
1907 | extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, |
1908 | void (*setup)(struct net_device *), | |
1909 | unsigned int queue_count); | |
1910 | #define alloc_netdev(sizeof_priv, name, setup) \ | |
1911 | alloc_netdev_mq(sizeof_priv, name, setup, 1) | |
1da177e4 LT |
1912 | extern int register_netdev(struct net_device *dev); |
1913 | extern void unregister_netdev(struct net_device *dev); | |
f001fde5 JP |
1914 | |
1915 | /* Functions used for device addresses handling */ | |
1916 | extern int dev_addr_add(struct net_device *dev, unsigned char *addr, | |
1917 | unsigned char addr_type); | |
1918 | extern int dev_addr_del(struct net_device *dev, unsigned char *addr, | |
1919 | unsigned char addr_type); | |
1920 | extern int dev_addr_add_multiple(struct net_device *to_dev, | |
1921 | struct net_device *from_dev, | |
1922 | unsigned char addr_type); | |
1923 | extern int dev_addr_del_multiple(struct net_device *to_dev, | |
1924 | struct net_device *from_dev, | |
1925 | unsigned char addr_type); | |
1926 | ||
4417da66 PM |
1927 | /* Functions used for secondary unicast and multicast support */ |
1928 | extern void dev_set_rx_mode(struct net_device *dev); | |
1929 | extern void __dev_set_rx_mode(struct net_device *dev); | |
ccffad25 JP |
1930 | extern int dev_unicast_delete(struct net_device *dev, void *addr); |
1931 | extern int dev_unicast_add(struct net_device *dev, void *addr); | |
e83a2ea8 CL |
1932 | extern int dev_unicast_sync(struct net_device *to, struct net_device *from); |
1933 | extern void dev_unicast_unsync(struct net_device *to, struct net_device *from); | |
1da177e4 LT |
1934 | extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); |
1935 | extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); | |
a0a400d7 PM |
1936 | extern int dev_mc_sync(struct net_device *to, struct net_device *from); |
1937 | extern void dev_mc_unsync(struct net_device *to, struct net_device *from); | |
61cbc2fc PM |
1938 | extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all); |
1939 | extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); | |
e83a2ea8 CL |
1940 | extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); |
1941 | extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); | |
dad9b335 WC |
1942 | extern int dev_set_promiscuity(struct net_device *dev, int inc); |
1943 | extern int dev_set_allmulti(struct net_device *dev, int inc); | |
1da177e4 | 1944 | extern void netdev_state_change(struct net_device *dev); |
75c78500 MS |
1945 | extern void netdev_bonding_change(struct net_device *dev, |
1946 | unsigned long event); | |
d8a33ac4 | 1947 | extern void netdev_features_change(struct net_device *dev); |
1da177e4 | 1948 | /* Load a device via the kmod */ |
881d966b | 1949 | extern void dev_load(struct net *net, const char *name); |
1da177e4 | 1950 | extern void dev_mcast_init(void); |
eeda3fd6 | 1951 | extern const struct net_device_stats *dev_get_stats(struct net_device *dev); |
d83345ad | 1952 | extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats); |
eeda3fd6 | 1953 | |
1da177e4 LT |
1954 | extern int netdev_max_backlog; |
1955 | extern int weight_p; | |
1956 | extern int netdev_set_master(struct net_device *dev, struct net_device *master); | |
84fa7933 | 1957 | extern int skb_checksum_help(struct sk_buff *skb); |
576a30eb | 1958 | extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features); |
fb286bb2 HX |
1959 | #ifdef CONFIG_BUG |
1960 | extern void netdev_rx_csum_fault(struct net_device *dev); | |
1961 | #else | |
1962 | static inline void netdev_rx_csum_fault(struct net_device *dev) | |
1963 | { | |
1964 | } | |
1965 | #endif | |
1da177e4 LT |
1966 | /* rx skb timestamps */ |
1967 | extern void net_enable_timestamp(void); | |
1968 | extern void net_disable_timestamp(void); | |
1969 | ||
20380731 ACM |
1970 | #ifdef CONFIG_PROC_FS |
1971 | extern void *dev_seq_start(struct seq_file *seq, loff_t *pos); | |
1972 | extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); | |
1973 | extern void dev_seq_stop(struct seq_file *seq, void *v); | |
1974 | #endif | |
1975 | ||
b8a9787e JV |
1976 | extern int netdev_class_create_file(struct class_attribute *class_attr); |
1977 | extern void netdev_class_remove_file(struct class_attribute *class_attr); | |
1978 | ||
cf04a4c7 | 1979 | extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len); |
6579e57b | 1980 | |
20380731 ACM |
1981 | extern void linkwatch_run_queue(void); |
1982 | ||
b63365a2 HX |
1983 | unsigned long netdev_increment_features(unsigned long all, unsigned long one, |
1984 | unsigned long mask); | |
1985 | unsigned long netdev_fix_features(unsigned long features, const char *name); | |
7f353bf2 | 1986 | |
fc4a7489 PM |
1987 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, |
1988 | struct net_device *dev); | |
1989 | ||
bcd76111 | 1990 | static inline int net_gso_ok(int features, int gso_type) |
576a30eb | 1991 | { |
bcd76111 | 1992 | int feature = gso_type << NETIF_F_GSO_SHIFT; |
d6b4991a | 1993 | return (features & feature) == feature; |
576a30eb HX |
1994 | } |
1995 | ||
bcd76111 HX |
1996 | static inline int skb_gso_ok(struct sk_buff *skb, int features) |
1997 | { | |
278b2513 | 1998 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && |
a5bd8a13 | 1999 | (!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST)); |
bcd76111 HX |
2000 | } |
2001 | ||
7967168c HX |
2002 | static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) |
2003 | { | |
a430a43d HX |
2004 | return skb_is_gso(skb) && |
2005 | (!skb_gso_ok(skb, dev->features) || | |
84fa7933 | 2006 | unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); |
7967168c HX |
2007 | } |
2008 | ||
82cc1a7a PWJ |
2009 | static inline void netif_set_gso_max_size(struct net_device *dev, |
2010 | unsigned int size) | |
2011 | { | |
2012 | dev->gso_max_size = size; | |
2013 | } | |
2014 | ||
5d4e039b JP |
2015 | static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, |
2016 | struct net_device *master) | |
2017 | { | |
2018 | if (skb->pkt_type == PACKET_HOST) { | |
2019 | u16 *dest = (u16 *) eth_hdr(skb)->h_dest; | |
2020 | ||
2021 | memcpy(dest, master->dev_addr, ETH_ALEN); | |
2022 | } | |
2023 | } | |
2024 | ||
7ea49ed7 | 2025 | /* On bonding slaves other than the currently active slave, suppress |
f5b2b966 JV |
2026 | * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and |
2027 | * ARP on active-backup slaves with arp_validate enabled. | |
7ea49ed7 DM |
2028 | */ |
2029 | static inline int skb_bond_should_drop(struct sk_buff *skb) | |
2030 | { | |
2031 | struct net_device *dev = skb->dev; | |
2032 | struct net_device *master = dev->master; | |
2033 | ||
6cf3f41e JV |
2034 | if (master) { |
2035 | if (master->priv_flags & IFF_MASTER_ARPMON) | |
2036 | dev->last_rx = jiffies; | |
2037 | ||
5d4e039b JP |
2038 | if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) { |
2039 | /* Do address unmangle. The local destination address | |
2040 | * will be always the one master has. Provides the right | |
2041 | * functionality in a bridge. | |
2042 | */ | |
2043 | skb_bond_set_mac_by_master(skb, master); | |
2044 | } | |
2045 | ||
6cf3f41e JV |
2046 | if (dev->priv_flags & IFF_SLAVE_INACTIVE) { |
2047 | if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && | |
f3a7c66b | 2048 | skb->protocol == __cpu_to_be16(ETH_P_ARP)) |
7ea49ed7 | 2049 | return 0; |
7ea49ed7 | 2050 | |
6cf3f41e JV |
2051 | if (master->priv_flags & IFF_MASTER_ALB) { |
2052 | if (skb->pkt_type != PACKET_BROADCAST && | |
2053 | skb->pkt_type != PACKET_MULTICAST) | |
2054 | return 0; | |
2055 | } | |
2056 | if (master->priv_flags & IFF_MASTER_8023AD && | |
f3a7c66b | 2057 | skb->protocol == __cpu_to_be16(ETH_P_SLOW)) |
6cf3f41e JV |
2058 | return 0; |
2059 | ||
2060 | return 1; | |
2061 | } | |
7ea49ed7 DM |
2062 | } |
2063 | return 0; | |
2064 | } | |
2065 | ||
505d4f73 | 2066 | extern struct pernet_operations __net_initdata loopback_net_ops; |
b1b67dd4 PM |
2067 | |
2068 | static inline int dev_ethtool_get_settings(struct net_device *dev, | |
2069 | struct ethtool_cmd *cmd) | |
2070 | { | |
2071 | if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings) | |
2072 | return -EOPNOTSUPP; | |
2073 | return dev->ethtool_ops->get_settings(dev, cmd); | |
2074 | } | |
2075 | ||
2076 | static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev) | |
2077 | { | |
2078 | if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum) | |
2079 | return 0; | |
2080 | return dev->ethtool_ops->get_rx_csum(dev); | |
2081 | } | |
2082 | ||
2083 | static inline u32 dev_ethtool_get_flags(struct net_device *dev) | |
2084 | { | |
2085 | if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags) | |
2086 | return 0; | |
2087 | return dev->ethtool_ops->get_flags(dev); | |
2088 | } | |
1da177e4 LT |
2089 | #endif /* __KERNEL__ */ |
2090 | ||
385a154c | 2091 | #endif /* _LINUX_NETDEVICE_H */ |