]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Definitions for the Interfaces handler. | |
7 | * | |
8 | * Version: @(#)dev.h 1.0.10 08/12/93 | |
9 | * | |
02c30a84 | 10 | * Authors: Ross Biro |
1da177e4 LT |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
13 | * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> | |
113aa838 | 14 | * Alan Cox, <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
15 | * Bjorn Ekwall. <bj0rn@blox.se> |
16 | * Pekka Riikonen <priikone@poseidon.pspt.fi> | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License | |
20 | * as published by the Free Software Foundation; either version | |
21 | * 2 of the License, or (at your option) any later version. | |
22 | * | |
23 | * Moved to /usr/include/linux for NET3 | |
24 | */ | |
25 | #ifndef _LINUX_NETDEVICE_H | |
26 | #define _LINUX_NETDEVICE_H | |
27 | ||
28 | #include <linux/if.h> | |
29 | #include <linux/if_ether.h> | |
30 | #include <linux/if_packet.h> | |
95c26df8 | 31 | #include <linux/if_link.h> |
1da177e4 LT |
32 | |
33 | #ifdef __KERNEL__ | |
ed77134b | 34 | #include <linux/pm_qos_params.h> |
d7fe0f24 | 35 | #include <linux/timer.h> |
bea3348e | 36 | #include <linux/delay.h> |
1da177e4 LT |
37 | #include <asm/atomic.h> |
38 | #include <asm/cache.h> | |
39 | #include <asm/byteorder.h> | |
40 | ||
1da177e4 LT |
41 | #include <linux/device.h> |
42 | #include <linux/percpu.h> | |
4d5b78c0 | 43 | #include <linux/rculist.h> |
db217334 | 44 | #include <linux/dmaengine.h> |
bea3348e | 45 | #include <linux/workqueue.h> |
1da177e4 | 46 | |
b1b67dd4 | 47 | #include <linux/ethtool.h> |
a050c33f | 48 | #include <net/net_namespace.h> |
cf85d08f | 49 | #include <net/dsa.h> |
7a6b6f51 | 50 | #ifdef CONFIG_DCB |
2f90b865 AD |
51 | #include <net/dcbnl.h> |
52 | #endif | |
a050c33f | 53 | |
1da177e4 | 54 | struct vlan_group; |
115c1d6e | 55 | struct netpoll_info; |
c1f19b51 | 56 | struct phy_device; |
704232c2 JB |
57 | /* 802.11 specific */ |
58 | struct wireless_dev; | |
1da177e4 LT |
59 | /* source back-compat hooks */ |
60 | #define SET_ETHTOOL_OPS(netdev,ops) \ | |
61 | ( (netdev)->ethtool_ops = (ops) ) | |
62 | ||
c1f79426 SA |
63 | /* hardware address assignment types */ |
64 | #define NET_ADDR_PERM 0 /* address is permanent (default) */ | |
65 | #define NET_ADDR_RANDOM 1 /* address is generated randomly */ | |
66 | #define NET_ADDR_STOLEN 2 /* address is stolen from other device */ | |
67 | ||
9a1654ba JP |
68 | /* Backlog congestion levels */ |
69 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ | |
70 | #define NET_RX_DROP 1 /* packet dropped */ | |
71 | ||
572a9d7b PM |
72 | /* |
73 | * Transmit return codes: transmit return codes originate from three different | |
74 | * namespaces: | |
75 | * | |
76 | * - qdisc return codes | |
77 | * - driver transmit return codes | |
78 | * - errno values | |
79 | * | |
80 | * Drivers are allowed to return any one of those in their hard_start_xmit() | |
81 | * function. Real network devices commonly used with qdiscs should only return | |
82 | * the driver transmit return codes though - when qdiscs are used, the actual | |
83 | * transmission happens asynchronously, so the value is not propagated to | |
84 | * higher layers. Virtual network devices transmit synchronously, in this case | |
85 | * the driver transmit return codes are consumed by dev_queue_xmit(), all | |
86 | * others are propagated to higher layers. | |
87 | */ | |
88 | ||
89 | /* qdisc ->enqueue() return codes. */ | |
90 | #define NET_XMIT_SUCCESS 0x00 | |
9a1654ba JP |
91 | #define NET_XMIT_DROP 0x01 /* skb dropped */ |
92 | #define NET_XMIT_CN 0x02 /* congestion notification */ | |
93 | #define NET_XMIT_POLICED 0x03 /* skb is shot by police */ | |
94 | #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ | |
1da177e4 | 95 | |
b9df3cb8 GR |
96 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It |
97 | * indicates that the device will soon be dropping packets, or already drops | |
98 | * some packets of the same priority; prompting us to send less aggressively. */ | |
572a9d7b | 99 | #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) |
1da177e4 LT |
100 | #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) |
101 | ||
dc1f8bf6 | 102 | /* Driver transmit return codes */ |
9a1654ba | 103 | #define NETDEV_TX_MASK 0xf0 |
572a9d7b | 104 | |
dc1f8bf6 | 105 | enum netdev_tx { |
572a9d7b | 106 | __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ |
9a1654ba JP |
107 | NETDEV_TX_OK = 0x00, /* driver took care of packet */ |
108 | NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ | |
109 | NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */ | |
dc1f8bf6 SH |
110 | }; |
111 | typedef enum netdev_tx netdev_tx_t; | |
112 | ||
9a1654ba JP |
113 | /* |
114 | * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; | |
115 | * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. | |
116 | */ | |
117 | static inline bool dev_xmit_complete(int rc) | |
118 | { | |
119 | /* | |
120 | * Positive cases with an skb consumed by a driver: | |
121 | * - successful transmission (rc == NETDEV_TX_OK) | |
122 | * - error while transmitting (rc < 0) | |
123 | * - error while queueing to a different device (rc & NET_XMIT_MASK) | |
124 | */ | |
125 | if (likely(rc < NET_XMIT_MASK)) | |
126 | return true; | |
127 | ||
128 | return false; | |
129 | } | |
130 | ||
1da177e4 LT |
131 | #endif |
132 | ||
133 | #define MAX_ADDR_LEN 32 /* Largest hardware address length */ | |
134 | ||
23b41168 VD |
135 | /* Initial net device group. All devices belong to group 0 by default. */ |
136 | #define INIT_NETDEV_GROUP 0 | |
137 | ||
c88e6f51 | 138 | #ifdef __KERNEL__ |
1da177e4 LT |
139 | /* |
140 | * Compute the worst case header length according to the protocols | |
141 | * used. | |
142 | */ | |
fe2918b0 | 143 | |
caf66e58 | 144 | #if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) |
8388e3da DM |
145 | # if defined(CONFIG_MAC80211_MESH) |
146 | # define LL_MAX_HEADER 128 | |
147 | # else | |
148 | # define LL_MAX_HEADER 96 | |
149 | # endif | |
c759a6b4 | 150 | #elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) |
8388e3da | 151 | # define LL_MAX_HEADER 48 |
1da177e4 | 152 | #else |
8388e3da | 153 | # define LL_MAX_HEADER 32 |
1da177e4 LT |
154 | #endif |
155 | ||
e81c7359 DM |
156 | #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ |
157 | !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \ | |
158 | !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \ | |
159 | !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE) | |
1da177e4 LT |
160 | #define MAX_HEADER LL_MAX_HEADER |
161 | #else | |
162 | #define MAX_HEADER (LL_MAX_HEADER + 48) | |
163 | #endif | |
164 | ||
165 | /* | |
be1f3c2c BH |
166 | * Old network device statistics. Fields are native words |
167 | * (unsigned long) so they can be read and written atomically. | |
1da177e4 | 168 | */ |
fe2918b0 | 169 | |
d94d9fee | 170 | struct net_device_stats { |
3cfde79c BH |
171 | unsigned long rx_packets; |
172 | unsigned long tx_packets; | |
173 | unsigned long rx_bytes; | |
174 | unsigned long tx_bytes; | |
175 | unsigned long rx_errors; | |
176 | unsigned long tx_errors; | |
177 | unsigned long rx_dropped; | |
178 | unsigned long tx_dropped; | |
179 | unsigned long multicast; | |
1da177e4 | 180 | unsigned long collisions; |
1da177e4 | 181 | unsigned long rx_length_errors; |
3cfde79c BH |
182 | unsigned long rx_over_errors; |
183 | unsigned long rx_crc_errors; | |
184 | unsigned long rx_frame_errors; | |
185 | unsigned long rx_fifo_errors; | |
186 | unsigned long rx_missed_errors; | |
1da177e4 LT |
187 | unsigned long tx_aborted_errors; |
188 | unsigned long tx_carrier_errors; | |
189 | unsigned long tx_fifo_errors; | |
190 | unsigned long tx_heartbeat_errors; | |
191 | unsigned long tx_window_errors; | |
1da177e4 LT |
192 | unsigned long rx_compressed; |
193 | unsigned long tx_compressed; | |
194 | }; | |
195 | ||
be1f3c2c BH |
196 | #endif /* __KERNEL__ */ |
197 | ||
1da177e4 LT |
198 | |
199 | /* Media selection options. */ | |
200 | enum { | |
201 | IF_PORT_UNKNOWN = 0, | |
202 | IF_PORT_10BASE2, | |
203 | IF_PORT_10BASET, | |
204 | IF_PORT_AUI, | |
205 | IF_PORT_100BASET, | |
206 | IF_PORT_100BASETX, | |
207 | IF_PORT_100BASEFX | |
208 | }; | |
209 | ||
210 | #ifdef __KERNEL__ | |
211 | ||
212 | #include <linux/cache.h> | |
213 | #include <linux/skbuff.h> | |
214 | ||
215 | struct neighbour; | |
216 | struct neigh_parms; | |
217 | struct sk_buff; | |
218 | ||
f001fde5 JP |
219 | struct netdev_hw_addr { |
220 | struct list_head list; | |
221 | unsigned char addr[MAX_ADDR_LEN]; | |
222 | unsigned char type; | |
ccffad25 JP |
223 | #define NETDEV_HW_ADDR_T_LAN 1 |
224 | #define NETDEV_HW_ADDR_T_SAN 2 | |
225 | #define NETDEV_HW_ADDR_T_SLAVE 3 | |
226 | #define NETDEV_HW_ADDR_T_UNICAST 4 | |
22bedad3 | 227 | #define NETDEV_HW_ADDR_T_MULTICAST 5 |
ccffad25 | 228 | bool synced; |
22bedad3 | 229 | bool global_use; |
8f8f103d | 230 | int refcount; |
f001fde5 JP |
231 | struct rcu_head rcu_head; |
232 | }; | |
233 | ||
31278e71 JP |
234 | struct netdev_hw_addr_list { |
235 | struct list_head list; | |
236 | int count; | |
237 | }; | |
238 | ||
22bedad3 JP |
239 | #define netdev_hw_addr_list_count(l) ((l)->count) |
240 | #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) | |
241 | #define netdev_hw_addr_list_for_each(ha, l) \ | |
242 | list_for_each_entry(ha, &(l)->list, list) | |
32e7bfc4 | 243 | |
22bedad3 JP |
244 | #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) |
245 | #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) | |
246 | #define netdev_for_each_uc_addr(ha, dev) \ | |
247 | netdev_hw_addr_list_for_each(ha, &(dev)->uc) | |
6683ece3 | 248 | |
22bedad3 JP |
249 | #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) |
250 | #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) | |
18e225f2 | 251 | #define netdev_for_each_mc_addr(ha, dev) \ |
22bedad3 | 252 | netdev_hw_addr_list_for_each(ha, &(dev)->mc) |
6683ece3 | 253 | |
d94d9fee | 254 | struct hh_cache { |
1da177e4 LT |
255 | struct hh_cache *hh_next; /* Next entry */ |
256 | atomic_t hh_refcnt; /* number of users */ | |
f0490980 ED |
257 | /* |
258 | * We want hh_output, hh_len, hh_lock and hh_data be a in a separate | |
259 | * cache line on SMP. | |
260 | * They are mostly read, but hh_refcnt may be changed quite frequently, | |
261 | * incurring cache line ping pongs. | |
262 | */ | |
263 | __be16 hh_type ____cacheline_aligned_in_smp; | |
264 | /* protocol identifier, f.e ETH_P_IP | |
1da177e4 LT |
265 | * NOTE: For VLANs, this will be the |
266 | * encapuslated type. --BLG | |
267 | */ | |
d5c42c0e | 268 | u16 hh_len; /* length of header */ |
1da177e4 | 269 | int (*hh_output)(struct sk_buff *skb); |
3644f0ce | 270 | seqlock_t hh_lock; |
1da177e4 LT |
271 | |
272 | /* cached hardware header; allow for machine alignment needs. */ | |
273 | #define HH_DATA_MOD 16 | |
274 | #define HH_DATA_OFF(__len) \ | |
5ba0eac6 | 275 | (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) |
1da177e4 LT |
276 | #define HH_DATA_ALIGN(__len) \ |
277 | (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) | |
278 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | |
279 | }; | |
280 | ||
34d101dd ED |
281 | static inline void hh_cache_put(struct hh_cache *hh) |
282 | { | |
283 | if (atomic_dec_and_test(&hh->hh_refcnt)) | |
284 | kfree(hh); | |
285 | } | |
286 | ||
1da177e4 LT |
287 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. |
288 | * Alternative is: | |
289 | * dev->hard_header_len ? (dev->hard_header_len + | |
290 | * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 | |
291 | * | |
292 | * We could use other alignment values, but we must maintain the | |
293 | * relationship HH alignment <= LL alignment. | |
f5184d26 JB |
294 | * |
295 | * LL_ALLOCATED_SPACE also takes into account the tailroom the device | |
296 | * may need. | |
1da177e4 LT |
297 | */ |
298 | #define LL_RESERVED_SPACE(dev) \ | |
f5184d26 | 299 | ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
1da177e4 | 300 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ |
f5184d26 JB |
301 | ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
302 | #define LL_ALLOCATED_SPACE(dev) \ | |
303 | ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | |
1da177e4 | 304 | |
3b04ddde SH |
305 | struct header_ops { |
306 | int (*create) (struct sk_buff *skb, struct net_device *dev, | |
307 | unsigned short type, const void *daddr, | |
308 | const void *saddr, unsigned len); | |
309 | int (*parse)(const struct sk_buff *skb, unsigned char *haddr); | |
310 | int (*rebuild)(struct sk_buff *skb); | |
e69dd336 | 311 | int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); |
3b04ddde SH |
312 | void (*cache_update)(struct hh_cache *hh, |
313 | const struct net_device *dev, | |
314 | const unsigned char *haddr); | |
315 | }; | |
316 | ||
1da177e4 LT |
317 | /* These flag bits are private to the generic network queueing |
318 | * layer, they may not be explicitly referenced by any other | |
319 | * code. | |
320 | */ | |
321 | ||
d94d9fee | 322 | enum netdev_state_t { |
1da177e4 LT |
323 | __LINK_STATE_START, |
324 | __LINK_STATE_PRESENT, | |
1da177e4 | 325 | __LINK_STATE_NOCARRIER, |
b00055aa SR |
326 | __LINK_STATE_LINKWATCH_PENDING, |
327 | __LINK_STATE_DORMANT, | |
1da177e4 LT |
328 | }; |
329 | ||
330 | ||
331 | /* | |
332 | * This structure holds at boot time configured netdevice settings. They | |
fe2918b0 | 333 | * are then used in the device probing. |
1da177e4 LT |
334 | */ |
335 | struct netdev_boot_setup { | |
336 | char name[IFNAMSIZ]; | |
337 | struct ifmap map; | |
338 | }; | |
339 | #define NETDEV_BOOT_SETUP_MAX 8 | |
340 | ||
20380731 | 341 | extern int __init netdev_boot_setup(char *str); |
1da177e4 | 342 | |
bea3348e SH |
343 | /* |
344 | * Structure for NAPI scheduling similar to tasklet but with weighting | |
345 | */ | |
346 | struct napi_struct { | |
347 | /* The poll_list must only be managed by the entity which | |
348 | * changes the state of the NAPI_STATE_SCHED bit. This means | |
349 | * whoever atomically sets that bit can add this napi_struct | |
350 | * to the per-cpu poll_list, and whoever clears that bit | |
351 | * can remove from the list right before clearing the bit. | |
352 | */ | |
353 | struct list_head poll_list; | |
354 | ||
355 | unsigned long state; | |
356 | int weight; | |
357 | int (*poll)(struct napi_struct *, int); | |
358 | #ifdef CONFIG_NETPOLL | |
359 | spinlock_t poll_lock; | |
360 | int poll_owner; | |
bea3348e | 361 | #endif |
4ae5544f HX |
362 | |
363 | unsigned int gro_count; | |
364 | ||
5d38a079 | 365 | struct net_device *dev; |
d565b0a1 HX |
366 | struct list_head dev_list; |
367 | struct sk_buff *gro_list; | |
5d38a079 | 368 | struct sk_buff *skb; |
bea3348e SH |
369 | }; |
370 | ||
d94d9fee | 371 | enum { |
bea3348e | 372 | NAPI_STATE_SCHED, /* Poll is scheduled */ |
a0a46196 | 373 | NAPI_STATE_DISABLE, /* Disable pending */ |
7b363e44 | 374 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ |
bea3348e SH |
375 | }; |
376 | ||
5b252f0c | 377 | enum gro_result { |
d1c76af9 HX |
378 | GRO_MERGED, |
379 | GRO_MERGED_FREE, | |
380 | GRO_HELD, | |
381 | GRO_NORMAL, | |
382 | GRO_DROP, | |
383 | }; | |
5b252f0c | 384 | typedef enum gro_result gro_result_t; |
d1c76af9 | 385 | |
8a4eb573 JP |
386 | /* |
387 | * enum rx_handler_result - Possible return values for rx_handlers. | |
388 | * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it | |
389 | * further. | |
390 | * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in | |
391 | * case skb->dev was changed by rx_handler. | |
392 | * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. | |
393 | * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called. | |
394 | * | |
395 | * rx_handlers are functions called from inside __netif_receive_skb(), to do | |
396 | * special processing of the skb, prior to delivery to protocol handlers. | |
397 | * | |
398 | * Currently, a net_device can only have a single rx_handler registered. Trying | |
399 | * to register a second rx_handler will return -EBUSY. | |
400 | * | |
401 | * To register a rx_handler on a net_device, use netdev_rx_handler_register(). | |
402 | * To unregister a rx_handler on a net_device, use | |
403 | * netdev_rx_handler_unregister(). | |
404 | * | |
405 | * Upon return, rx_handler is expected to tell __netif_receive_skb() what to | |
406 | * do with the skb. | |
407 | * | |
408 | * If the rx_handler consumed to skb in some way, it should return | |
409 | * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for | |
410 | * the skb to be delivered in some other ways. | |
411 | * | |
412 | * If the rx_handler changed skb->dev, to divert the skb to another | |
413 | * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the | |
414 | * new device will be called if it exists. | |
415 | * | |
416 | * If the rx_handler consider the skb should be ignored, it should return | |
417 | * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that | |
418 | * are registred on exact device (ptype->dev == skb->dev). | |
419 | * | |
420 | * If the rx_handler didn't changed skb->dev, but want the skb to be normally | |
421 | * delivered, it should return RX_HANDLER_PASS. | |
422 | * | |
423 | * A device without a registered rx_handler will behave as if rx_handler | |
424 | * returned RX_HANDLER_PASS. | |
425 | */ | |
426 | ||
427 | enum rx_handler_result { | |
428 | RX_HANDLER_CONSUMED, | |
429 | RX_HANDLER_ANOTHER, | |
430 | RX_HANDLER_EXACT, | |
431 | RX_HANDLER_PASS, | |
432 | }; | |
433 | typedef enum rx_handler_result rx_handler_result_t; | |
434 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); | |
ab95bfe0 | 435 | |
b3c97528 | 436 | extern void __napi_schedule(struct napi_struct *n); |
bea3348e | 437 | |
a0a46196 DM |
438 | static inline int napi_disable_pending(struct napi_struct *n) |
439 | { | |
440 | return test_bit(NAPI_STATE_DISABLE, &n->state); | |
441 | } | |
442 | ||
bea3348e SH |
443 | /** |
444 | * napi_schedule_prep - check if napi can be scheduled | |
445 | * @n: napi context | |
446 | * | |
447 | * Test if NAPI routine is already running, and if not mark | |
448 | * it as running. This is used as a condition variable | |
a0a46196 DM |
449 | * insure only one NAPI poll instance runs. We also make |
450 | * sure there is no pending NAPI disable. | |
bea3348e SH |
451 | */ |
452 | static inline int napi_schedule_prep(struct napi_struct *n) | |
453 | { | |
a0a46196 DM |
454 | return !napi_disable_pending(n) && |
455 | !test_and_set_bit(NAPI_STATE_SCHED, &n->state); | |
bea3348e SH |
456 | } |
457 | ||
458 | /** | |
459 | * napi_schedule - schedule NAPI poll | |
460 | * @n: napi context | |
461 | * | |
462 | * Schedule NAPI poll routine to be called if it is not already | |
463 | * running. | |
464 | */ | |
465 | static inline void napi_schedule(struct napi_struct *n) | |
466 | { | |
467 | if (napi_schedule_prep(n)) | |
468 | __napi_schedule(n); | |
469 | } | |
470 | ||
bfe13f54 RD |
471 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ |
472 | static inline int napi_reschedule(struct napi_struct *napi) | |
473 | { | |
474 | if (napi_schedule_prep(napi)) { | |
475 | __napi_schedule(napi); | |
476 | return 1; | |
477 | } | |
478 | return 0; | |
479 | } | |
480 | ||
bea3348e SH |
481 | /** |
482 | * napi_complete - NAPI processing complete | |
483 | * @n: napi context | |
484 | * | |
485 | * Mark NAPI processing as complete. | |
486 | */ | |
d565b0a1 HX |
487 | extern void __napi_complete(struct napi_struct *n); |
488 | extern void napi_complete(struct napi_struct *n); | |
bea3348e SH |
489 | |
490 | /** | |
491 | * napi_disable - prevent NAPI from scheduling | |
492 | * @n: napi context | |
493 | * | |
494 | * Stop NAPI from being scheduled on this context. | |
495 | * Waits till any outstanding processing completes. | |
496 | */ | |
497 | static inline void napi_disable(struct napi_struct *n) | |
498 | { | |
a0a46196 | 499 | set_bit(NAPI_STATE_DISABLE, &n->state); |
bea3348e | 500 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) |
43cc7380 | 501 | msleep(1); |
a0a46196 | 502 | clear_bit(NAPI_STATE_DISABLE, &n->state); |
bea3348e SH |
503 | } |
504 | ||
505 | /** | |
506 | * napi_enable - enable NAPI scheduling | |
507 | * @n: napi context | |
508 | * | |
509 | * Resume NAPI from being scheduled on this context. | |
510 | * Must be paired with napi_disable. | |
511 | */ | |
512 | static inline void napi_enable(struct napi_struct *n) | |
513 | { | |
514 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | |
515 | smp_mb__before_clear_bit(); | |
516 | clear_bit(NAPI_STATE_SCHED, &n->state); | |
517 | } | |
518 | ||
c264c3de SH |
519 | #ifdef CONFIG_SMP |
520 | /** | |
521 | * napi_synchronize - wait until NAPI is not running | |
522 | * @n: napi context | |
523 | * | |
524 | * Wait until NAPI is done being scheduled on this context. | |
525 | * Waits till any outstanding processing completes but | |
526 | * does not disable future activations. | |
527 | */ | |
528 | static inline void napi_synchronize(const struct napi_struct *n) | |
529 | { | |
530 | while (test_bit(NAPI_STATE_SCHED, &n->state)) | |
531 | msleep(1); | |
532 | } | |
533 | #else | |
534 | # define napi_synchronize(n) barrier() | |
535 | #endif | |
536 | ||
d94d9fee | 537 | enum netdev_queue_state_t { |
79d16385 | 538 | __QUEUE_STATE_XOFF, |
c3f26a26 | 539 | __QUEUE_STATE_FROZEN, |
5a0d2268 ED |
540 | #define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \ |
541 | (1 << __QUEUE_STATE_FROZEN)) | |
79d16385 DM |
542 | }; |
543 | ||
bb949fbd | 544 | struct netdev_queue { |
6a321cb3 ED |
545 | /* |
546 | * read mostly part | |
547 | */ | |
bb949fbd | 548 | struct net_device *dev; |
b0e1e646 | 549 | struct Qdisc *qdisc; |
79d16385 | 550 | unsigned long state; |
b0e1e646 | 551 | struct Qdisc *qdisc_sleeping; |
1d24eb48 TH |
552 | #ifdef CONFIG_RPS |
553 | struct kobject kobj; | |
554 | #endif | |
f2cd2d3e ED |
555 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) |
556 | int numa_node; | |
557 | #endif | |
6a321cb3 ED |
558 | /* |
559 | * write mostly part | |
560 | */ | |
561 | spinlock_t _xmit_lock ____cacheline_aligned_in_smp; | |
562 | int xmit_lock_owner; | |
9d21493b ED |
563 | /* |
564 | * please use this field instead of dev->trans_start | |
565 | */ | |
566 | unsigned long trans_start; | |
e8a0464c | 567 | } ____cacheline_aligned_in_smp; |
bb949fbd | 568 | |
f2cd2d3e ED |
569 | static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) |
570 | { | |
571 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | |
572 | return q->numa_node; | |
573 | #else | |
b236da69 | 574 | return NUMA_NO_NODE; |
f2cd2d3e ED |
575 | #endif |
576 | } | |
577 | ||
578 | static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) | |
579 | { | |
580 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | |
581 | q->numa_node = node; | |
582 | #endif | |
583 | } | |
584 | ||
df334545 | 585 | #ifdef CONFIG_RPS |
0a9627f2 TH |
586 | /* |
587 | * This structure holds an RPS map which can be of variable length. The | |
588 | * map is an array of CPUs. | |
589 | */ | |
590 | struct rps_map { | |
591 | unsigned int len; | |
592 | struct rcu_head rcu; | |
593 | u16 cpus[0]; | |
594 | }; | |
595 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16))) | |
596 | ||
fec5e652 | 597 | /* |
c445477d BH |
598 | * The rps_dev_flow structure contains the mapping of a flow to a CPU, the |
599 | * tail pointer for that CPU's input queue at the time of last enqueue, and | |
600 | * a hardware filter index. | |
fec5e652 TH |
601 | */ |
602 | struct rps_dev_flow { | |
603 | u16 cpu; | |
c445477d | 604 | u16 filter; |
fec5e652 TH |
605 | unsigned int last_qtail; |
606 | }; | |
c445477d | 607 | #define RPS_NO_FILTER 0xffff |
fec5e652 TH |
608 | |
609 | /* | |
610 | * The rps_dev_flow_table structure contains a table of flow mappings. | |
611 | */ | |
612 | struct rps_dev_flow_table { | |
613 | unsigned int mask; | |
614 | struct rcu_head rcu; | |
615 | struct work_struct free_work; | |
616 | struct rps_dev_flow flows[0]; | |
617 | }; | |
618 | #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ | |
619 | (_num * sizeof(struct rps_dev_flow))) | |
620 | ||
621 | /* | |
622 | * The rps_sock_flow_table contains mappings of flows to the last CPU | |
623 | * on which they were processed by the application (set in recvmsg). | |
624 | */ | |
625 | struct rps_sock_flow_table { | |
626 | unsigned int mask; | |
627 | u16 ents[0]; | |
628 | }; | |
629 | #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ | |
630 | (_num * sizeof(u16))) | |
631 | ||
632 | #define RPS_NO_CPU 0xffff | |
633 | ||
634 | static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, | |
635 | u32 hash) | |
636 | { | |
637 | if (table && hash) { | |
638 | unsigned int cpu, index = hash & table->mask; | |
639 | ||
640 | /* We only give a hint, preemption can change cpu under us */ | |
641 | cpu = raw_smp_processor_id(); | |
642 | ||
643 | if (table->ents[index] != cpu) | |
644 | table->ents[index] = cpu; | |
645 | } | |
646 | } | |
647 | ||
648 | static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, | |
649 | u32 hash) | |
650 | { | |
651 | if (table && hash) | |
652 | table->ents[hash & table->mask] = RPS_NO_CPU; | |
653 | } | |
654 | ||
6e3f7faf | 655 | extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; |
fec5e652 | 656 | |
c445477d BH |
657 | #ifdef CONFIG_RFS_ACCEL |
658 | extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, | |
659 | u32 flow_id, u16 filter_id); | |
660 | #endif | |
661 | ||
0a9627f2 TH |
662 | /* This structure contains an instance of an RX queue. */ |
663 | struct netdev_rx_queue { | |
6e3f7faf ED |
664 | struct rps_map __rcu *rps_map; |
665 | struct rps_dev_flow_table __rcu *rps_flow_table; | |
666 | struct kobject kobj; | |
fe822240 | 667 | struct net_device *dev; |
0a9627f2 | 668 | } ____cacheline_aligned_in_smp; |
fec5e652 | 669 | #endif /* CONFIG_RPS */ |
d314774c | 670 | |
bf264145 TH |
671 | #ifdef CONFIG_XPS |
672 | /* | |
673 | * This structure holds an XPS map which can be of variable length. The | |
674 | * map is an array of queues. | |
675 | */ | |
676 | struct xps_map { | |
677 | unsigned int len; | |
678 | unsigned int alloc_len; | |
679 | struct rcu_head rcu; | |
680 | u16 queues[0]; | |
681 | }; | |
682 | #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16))) | |
683 | #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \ | |
684 | / sizeof(u16)) | |
685 | ||
686 | /* | |
687 | * This structure holds all XPS maps for device. Maps are indexed by CPU. | |
688 | */ | |
689 | struct xps_dev_maps { | |
690 | struct rcu_head rcu; | |
a4177869 | 691 | struct xps_map __rcu *cpu_map[0]; |
bf264145 TH |
692 | }; |
693 | #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \ | |
694 | (nr_cpu_ids * sizeof(struct xps_map *))) | |
695 | #endif /* CONFIG_XPS */ | |
696 | ||
4f57c087 JF |
697 | #define TC_MAX_QUEUE 16 |
698 | #define TC_BITMASK 15 | |
699 | /* HW offloaded queuing disciplines txq count and offset maps */ | |
700 | struct netdev_tc_txq { | |
701 | u16 count; | |
702 | u16 offset; | |
703 | }; | |
704 | ||
d314774c SH |
705 | /* |
706 | * This structure defines the management hooks for network devices. | |
00829823 SH |
707 | * The following hooks can be defined; unless noted otherwise, they are |
708 | * optional and can be filled with a null pointer. | |
d314774c SH |
709 | * |
710 | * int (*ndo_init)(struct net_device *dev); | |
711 | * This function is called once when network device is registered. | |
712 | * The network device can use this to any late stage initializaton | |
713 | * or semantic validattion. It can fail with an error code which will | |
714 | * be propogated back to register_netdev | |
715 | * | |
716 | * void (*ndo_uninit)(struct net_device *dev); | |
717 | * This function is called when device is unregistered or when registration | |
718 | * fails. It is not called if init fails. | |
719 | * | |
720 | * int (*ndo_open)(struct net_device *dev); | |
721 | * This function is called when network device transistions to the up | |
722 | * state. | |
723 | * | |
724 | * int (*ndo_stop)(struct net_device *dev); | |
725 | * This function is called when network device transistions to the down | |
726 | * state. | |
727 | * | |
dc1f8bf6 SH |
728 | * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, |
729 | * struct net_device *dev); | |
00829823 | 730 | * Called when a packet needs to be transmitted. |
dc1f8bf6 SH |
731 | * Must return NETDEV_TX_OK , NETDEV_TX_BUSY. |
732 | * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) | |
00829823 SH |
733 | * Required can not be NULL. |
734 | * | |
735 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); | |
736 | * Called to decide which queue to when device supports multiple | |
737 | * transmit queues. | |
738 | * | |
d314774c SH |
739 | * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); |
740 | * This function is called to allow device receiver to make | |
741 | * changes to configuration when multicast or promiscious is enabled. | |
742 | * | |
743 | * void (*ndo_set_rx_mode)(struct net_device *dev); | |
744 | * This function is called device changes address list filtering. | |
745 | * | |
746 | * void (*ndo_set_multicast_list)(struct net_device *dev); | |
747 | * This function is called when the multicast address list changes. | |
748 | * | |
749 | * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); | |
750 | * This function is called when the Media Access Control address | |
37b607c5 | 751 | * needs to be changed. If this interface is not defined, the |
d314774c SH |
752 | * mac address can not be changed. |
753 | * | |
754 | * int (*ndo_validate_addr)(struct net_device *dev); | |
755 | * Test if Media Access Control address is valid for the device. | |
756 | * | |
757 | * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); | |
758 | * Called when a user request an ioctl which can't be handled by | |
759 | * the generic interface code. If not defined ioctl's return | |
760 | * not supported error code. | |
761 | * | |
762 | * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); | |
763 | * Used to set network devices bus interface parameters. This interface | |
764 | * is retained for legacy reason, new devices should use the bus | |
765 | * interface (PCI) for low level management. | |
766 | * | |
767 | * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); | |
768 | * Called when a user wants to change the Maximum Transfer Unit | |
769 | * of a device. If not defined, any request to change MTU will | |
770 | * will return an error. | |
771 | * | |
00829823 | 772 | * void (*ndo_tx_timeout)(struct net_device *dev); |
d314774c SH |
773 | * Callback uses when the transmitter has not made any progress |
774 | * for dev->watchdog ticks. | |
775 | * | |
3cfde79c | 776 | * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, |
28172739 | 777 | * struct rtnl_link_stats64 *storage); |
d308e38f | 778 | * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); |
d314774c | 779 | * Called when a user wants to get the network device usage |
be1f3c2c | 780 | * statistics. Drivers must do one of the following: |
3cfde79c BH |
781 | * 1. Define @ndo_get_stats64 to fill in a zero-initialised |
782 | * rtnl_link_stats64 structure passed by the caller. | |
82695d9b | 783 | * 2. Define @ndo_get_stats to update a net_device_stats structure |
be1f3c2c BH |
784 | * (which should normally be dev->stats) and return a pointer to |
785 | * it. The structure may be changed asynchronously only if each | |
786 | * field is written atomically. | |
787 | * 3. Update dev->stats asynchronously and atomically, and define | |
788 | * neither operation. | |
d314774c SH |
789 | * |
790 | * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp); | |
68763c89 | 791 | * If device support VLAN receive acceleration |
d314774c SH |
792 | * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called |
793 | * when vlan groups for the device changes. Note: grp is NULL | |
794 | * if no vlan's groups are being used. | |
795 | * | |
796 | * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); | |
797 | * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) | |
798 | * this function is called when a VLAN id is registered. | |
799 | * | |
800 | * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); | |
801 | * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) | |
802 | * this function is called when a VLAN id is unregistered. | |
803 | * | |
804 | * void (*ndo_poll_controller)(struct net_device *dev); | |
95c26df8 WM |
805 | * |
806 | * SR-IOV management functions. | |
807 | * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); | |
808 | * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); | |
809 | * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate); | |
810 | * int (*ndo_get_vf_config)(struct net_device *dev, | |
811 | * int vf, struct ifla_vf_info *ivf); | |
57b61080 SF |
812 | * int (*ndo_set_vf_port)(struct net_device *dev, int vf, |
813 | * struct nlattr *port[]); | |
814 | * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); | |
4f57c087 JF |
815 | * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) |
816 | * Called to setup 'tc' number of traffic classes in the net device. This | |
817 | * is always called from the stack with the rtnl lock held and netif tx | |
818 | * queues stopped. This allows the netdevice to perform queue management | |
819 | * safely. | |
c445477d | 820 | * |
e9bce845 YZ |
821 | * Fiber Channel over Ethernet (FCoE) offload functions. |
822 | * int (*ndo_fcoe_enable)(struct net_device *dev); | |
823 | * Called when the FCoE protocol stack wants to start using LLD for FCoE | |
824 | * so the underlying device can perform whatever needed configuration or | |
825 | * initialization to support acceleration of FCoE traffic. | |
826 | * | |
827 | * int (*ndo_fcoe_disable)(struct net_device *dev); | |
828 | * Called when the FCoE protocol stack wants to stop using LLD for FCoE | |
829 | * so the underlying device can perform whatever needed clean-ups to | |
830 | * stop supporting acceleration of FCoE traffic. | |
831 | * | |
832 | * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, | |
833 | * struct scatterlist *sgl, unsigned int sgc); | |
834 | * Called when the FCoE Initiator wants to initialize an I/O that | |
835 | * is a possible candidate for Direct Data Placement (DDP). The LLD can | |
836 | * perform necessary setup and returns 1 to indicate the device is set up | |
837 | * successfully to perform DDP on this I/O, otherwise this returns 0. | |
838 | * | |
839 | * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); | |
840 | * Called when the FCoE Initiator/Target is done with the DDPed I/O as | |
841 | * indicated by the FC exchange id 'xid', so the underlying device can | |
842 | * clean up and reuse resources for later DDP requests. | |
843 | * | |
844 | * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, | |
845 | * struct scatterlist *sgl, unsigned int sgc); | |
846 | * Called when the FCoE Target wants to initialize an I/O that | |
847 | * is a possible candidate for Direct Data Placement (DDP). The LLD can | |
848 | * perform necessary setup and returns 1 to indicate the device is set up | |
849 | * successfully to perform DDP on this I/O, otherwise this returns 0. | |
850 | * | |
851 | * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); | |
852 | * Called when the underlying device wants to override default World Wide | |
853 | * Name (WWN) generation mechanism in FCoE protocol stack to pass its own | |
854 | * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE | |
855 | * protocol stack to use. | |
856 | * | |
c445477d BH |
857 | * RFS acceleration. |
858 | * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, | |
859 | * u16 rxq_index, u32 flow_id); | |
860 | * Set hardware filter for RFS. rxq_index is the target queue index; | |
861 | * flow_id is a flow ID to be passed to rps_may_expire_flow() later. | |
862 | * Return the filter ID on success, or a negative error code. | |
fbaec0ea JP |
863 | * |
864 | * Slave management functions (for bridge, bonding, etc). User should | |
865 | * call netdev_set_master() to set dev->master properly. | |
866 | * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); | |
867 | * Called to make another netdev an underling. | |
868 | * | |
869 | * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); | |
870 | * Called to release previously enslaved netdev. | |
5455c699 MM |
871 | * |
872 | * Feature/offload setting functions. | |
873 | * u32 (*ndo_fix_features)(struct net_device *dev, u32 features); | |
874 | * Adjusts the requested feature flags according to device-specific | |
875 | * constraints, and returns the resulting flags. Must not modify | |
876 | * the device state. | |
877 | * | |
878 | * int (*ndo_set_features)(struct net_device *dev, u32 features); | |
879 | * Called to update device configuration to new features. Passed | |
880 | * feature set might be less than what was returned by ndo_fix_features()). | |
881 | * Must return >0 or -errno if it changed dev->features itself. | |
882 | * | |
d314774c SH |
883 | */ |
884 | struct net_device_ops { | |
885 | int (*ndo_init)(struct net_device *dev); | |
886 | void (*ndo_uninit)(struct net_device *dev); | |
887 | int (*ndo_open)(struct net_device *dev); | |
888 | int (*ndo_stop)(struct net_device *dev); | |
dc1f8bf6 | 889 | netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, |
00829823 SH |
890 | struct net_device *dev); |
891 | u16 (*ndo_select_queue)(struct net_device *dev, | |
892 | struct sk_buff *skb); | |
d314774c SH |
893 | void (*ndo_change_rx_flags)(struct net_device *dev, |
894 | int flags); | |
d314774c | 895 | void (*ndo_set_rx_mode)(struct net_device *dev); |
d314774c | 896 | void (*ndo_set_multicast_list)(struct net_device *dev); |
d314774c SH |
897 | int (*ndo_set_mac_address)(struct net_device *dev, |
898 | void *addr); | |
d314774c | 899 | int (*ndo_validate_addr)(struct net_device *dev); |
d314774c SH |
900 | int (*ndo_do_ioctl)(struct net_device *dev, |
901 | struct ifreq *ifr, int cmd); | |
d314774c SH |
902 | int (*ndo_set_config)(struct net_device *dev, |
903 | struct ifmap *map); | |
00829823 SH |
904 | int (*ndo_change_mtu)(struct net_device *dev, |
905 | int new_mtu); | |
906 | int (*ndo_neigh_setup)(struct net_device *dev, | |
907 | struct neigh_parms *); | |
d314774c SH |
908 | void (*ndo_tx_timeout) (struct net_device *dev); |
909 | ||
28172739 ED |
910 | struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, |
911 | struct rtnl_link_stats64 *storage); | |
d314774c SH |
912 | struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); |
913 | ||
914 | void (*ndo_vlan_rx_register)(struct net_device *dev, | |
915 | struct vlan_group *grp); | |
916 | void (*ndo_vlan_rx_add_vid)(struct net_device *dev, | |
917 | unsigned short vid); | |
918 | void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, | |
919 | unsigned short vid); | |
920 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
d314774c | 921 | void (*ndo_poll_controller)(struct net_device *dev); |
4247e161 HX |
922 | int (*ndo_netpoll_setup)(struct net_device *dev, |
923 | struct netpoll_info *info); | |
0e34e931 | 924 | void (*ndo_netpoll_cleanup)(struct net_device *dev); |
d314774c | 925 | #endif |
95c26df8 WM |
926 | int (*ndo_set_vf_mac)(struct net_device *dev, |
927 | int queue, u8 *mac); | |
928 | int (*ndo_set_vf_vlan)(struct net_device *dev, | |
929 | int queue, u16 vlan, u8 qos); | |
930 | int (*ndo_set_vf_tx_rate)(struct net_device *dev, | |
931 | int vf, int rate); | |
932 | int (*ndo_get_vf_config)(struct net_device *dev, | |
933 | int vf, | |
934 | struct ifla_vf_info *ivf); | |
57b61080 SF |
935 | int (*ndo_set_vf_port)(struct net_device *dev, |
936 | int vf, | |
937 | struct nlattr *port[]); | |
938 | int (*ndo_get_vf_port)(struct net_device *dev, | |
939 | int vf, struct sk_buff *skb); | |
4f57c087 | 940 | int (*ndo_setup_tc)(struct net_device *dev, u8 tc); |
4d288d57 | 941 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
cb454399 YZ |
942 | int (*ndo_fcoe_enable)(struct net_device *dev); |
943 | int (*ndo_fcoe_disable)(struct net_device *dev); | |
4d288d57 YZ |
944 | int (*ndo_fcoe_ddp_setup)(struct net_device *dev, |
945 | u16 xid, | |
946 | struct scatterlist *sgl, | |
947 | unsigned int sgc); | |
948 | int (*ndo_fcoe_ddp_done)(struct net_device *dev, | |
949 | u16 xid); | |
6247e086 YZ |
950 | int (*ndo_fcoe_ddp_target)(struct net_device *dev, |
951 | u16 xid, | |
952 | struct scatterlist *sgl, | |
953 | unsigned int sgc); | |
df5c7945 YZ |
954 | #define NETDEV_FCOE_WWNN 0 |
955 | #define NETDEV_FCOE_WWPN 1 | |
956 | int (*ndo_fcoe_get_wwn)(struct net_device *dev, | |
957 | u64 *wwn, int type); | |
4d288d57 | 958 | #endif |
c445477d BH |
959 | #ifdef CONFIG_RFS_ACCEL |
960 | int (*ndo_rx_flow_steer)(struct net_device *dev, | |
961 | const struct sk_buff *skb, | |
962 | u16 rxq_index, | |
963 | u32 flow_id); | |
964 | #endif | |
fbaec0ea JP |
965 | int (*ndo_add_slave)(struct net_device *dev, |
966 | struct net_device *slave_dev); | |
967 | int (*ndo_del_slave)(struct net_device *dev, | |
968 | struct net_device *slave_dev); | |
5455c699 MM |
969 | u32 (*ndo_fix_features)(struct net_device *dev, |
970 | u32 features); | |
971 | int (*ndo_set_features)(struct net_device *dev, | |
972 | u32 features); | |
d314774c SH |
973 | }; |
974 | ||
1da177e4 LT |
975 | /* |
976 | * The DEVICE structure. | |
977 | * Actually, this whole structure is a big mistake. It mixes I/O | |
978 | * data with strictly "high-level" data, and it has to know about | |
979 | * almost every data structure used in the INET module. | |
980 | * | |
981 | * FIXME: cleanup struct net_device such that network protocol info | |
982 | * moves out. | |
983 | */ | |
984 | ||
d94d9fee | 985 | struct net_device { |
1da177e4 LT |
986 | |
987 | /* | |
988 | * This is the first field of the "visible" part of this structure | |
989 | * (i.e. as seen by users in the "Space.c" file). It is the name | |
724df615 | 990 | * of the interface. |
1da177e4 LT |
991 | */ |
992 | char name[IFNAMSIZ]; | |
ed77134b | 993 | |
82f68251 | 994 | struct pm_qos_request_list pm_qos_req; |
ed77134b | 995 | |
9356b8fc ED |
996 | /* device name hash chain */ |
997 | struct hlist_node name_hlist; | |
0b815a1a SH |
998 | /* snmp alias */ |
999 | char *ifalias; | |
1da177e4 LT |
1000 | |
1001 | /* | |
1002 | * I/O specific fields | |
1003 | * FIXME: Merge these and struct ifmap into one | |
1004 | */ | |
1005 | unsigned long mem_end; /* shared mem end */ | |
1006 | unsigned long mem_start; /* shared mem start */ | |
1007 | unsigned long base_addr; /* device I/O address */ | |
1008 | unsigned int irq; /* device IRQ number */ | |
1009 | ||
1010 | /* | |
1011 | * Some hardware also needs these fields, but they are not | |
1012 | * part of the usual set specified in Space.c. | |
1013 | */ | |
1014 | ||
1da177e4 LT |
1015 | unsigned long state; |
1016 | ||
7562f876 | 1017 | struct list_head dev_list; |
bea3348e | 1018 | struct list_head napi_list; |
44a0873d | 1019 | struct list_head unreg_list; |
1da177e4 | 1020 | |
5455c699 | 1021 | /* currently active device features */ |
04ed3e74 | 1022 | u32 features; |
5455c699 MM |
1023 | /* user-changeable features */ |
1024 | u32 hw_features; | |
1025 | /* user-requested features */ | |
1026 | u32 wanted_features; | |
1aac6267 | 1027 | /* mask of features inheritable by VLAN devices */ |
04ed3e74 MM |
1028 | u32 vlan_features; |
1029 | ||
5455c699 MM |
1030 | /* Net device feature bits; if you change something, |
1031 | * also update netdev_features_strings[] in ethtool.c */ | |
1032 | ||
9356b8fc | 1033 | #define NETIF_F_SG 1 /* Scatter/gather IO. */ |
d212f87b | 1034 | #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */ |
9356b8fc ED |
1035 | #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ |
1036 | #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ | |
d212f87b | 1037 | #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */ |
9356b8fc ED |
1038 | #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ |
1039 | #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ | |
1040 | #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ | |
1041 | #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ | |
1042 | #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ | |
1043 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ | |
37c3185a | 1044 | #define NETIF_F_GSO 2048 /* Enable software GSO. */ |
e24eb521 CB |
1045 | #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ |
1046 | /* do not use LLTX in new drivers */ | |
ce286d32 | 1047 | #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ |
d565b0a1 | 1048 | #define NETIF_F_GRO 16384 /* Generic receive offload */ |
3ae7c0b2 | 1049 | #define NETIF_F_LRO 32768 /* large receive offload */ |
7967168c | 1050 | |
8dc92f7e | 1051 | /* the GSO_MASK reserves bits 16 through 23 */ |
01d5b2fc | 1052 | #define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */ |
8dc92f7e | 1053 | #define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */ |
bb2af4f5 | 1054 | #define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ |
15682bc4 | 1055 | #define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */ |
b00fabb4 | 1056 | #define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */ |
e83d360d | 1057 | #define NETIF_F_RXCSUM (1 << 29) /* Receive checksumming offload */ |
c6e1a0d1 | 1058 | #define NETIF_F_NOCACHE_COPY (1 << 30) /* Use no-cache copyfromuser */ |
eed2a12f | 1059 | #define NETIF_F_LOOPBACK (1 << 31) /* Enable loopback */ |
01d5b2fc | 1060 | |
7967168c | 1061 | /* Segmentation offload features */ |
289c79a4 | 1062 | #define NETIF_F_GSO_SHIFT 16 |
43eb99c5 | 1063 | #define NETIF_F_GSO_MASK 0x00ff0000 |
7967168c | 1064 | #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) |
f83ef8c0 | 1065 | #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) |
576a30eb | 1066 | #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) |
f83ef8c0 HX |
1067 | #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) |
1068 | #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) | |
01d5b2fc | 1069 | #define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT) |
9356b8fc | 1070 | |
5455c699 MM |
1071 | /* Features valid for ethtool to change */ |
1072 | /* = all defined minus driver/device-class-related */ | |
fa2bd7ff | 1073 | #define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ |
5455c699 | 1074 | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL) |
eed2a12f | 1075 | #define NETIF_F_ETHTOOL_BITS (0xff3fffff & ~NETIF_F_NEVER_CHANGE) |
5455c699 | 1076 | |
78eb8877 | 1077 | /* List of features with software fallbacks. */ |
d29c0c5c HX |
1078 | #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ |
1079 | NETIF_F_TSO6 | NETIF_F_UFO) | |
78eb8877 | 1080 | |
d212f87b | 1081 | |
8648b305 | 1082 | #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) |
d212f87b SH |
1083 | #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) |
1084 | #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) | |
1085 | #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) | |
8648b305 | 1086 | |
0a417704 MM |
1087 | #define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) |
1088 | ||
7f267051 YZ |
1089 | #define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ |
1090 | NETIF_F_FSO) | |
1091 | ||
0a417704 MM |
1092 | #define NETIF_F_ALL_TX_OFFLOADS (NETIF_F_ALL_CSUM | NETIF_F_SG | \ |
1093 | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ | |
fa2bd7ff | 1094 | NETIF_F_HIGHDMA | \ |
7f267051 YZ |
1095 | NETIF_F_SCTP_CSUM | \ |
1096 | NETIF_F_ALL_FCOE) | |
0a417704 | 1097 | |
b63365a2 HX |
1098 | /* |
1099 | * If one device supports one of these features, then enable them | |
1100 | * for all in netdev_increment_features. | |
1101 | */ | |
1102 | #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ | |
d314774c | 1103 | NETIF_F_SG | NETIF_F_HIGHDMA | \ |
1742f183 MM |
1104 | NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED) |
1105 | /* | |
1106 | * If one device doesn't support one of these features, then disable it | |
1107 | * for all in netdev_increment_features. | |
1108 | */ | |
1109 | #define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO) | |
b63365a2 | 1110 | |
212b573f MM |
1111 | /* changeable features with no special hardware requirements */ |
1112 | #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) | |
1113 | ||
1da177e4 LT |
1114 | /* Interface index. Unique device identifier */ |
1115 | int ifindex; | |
1116 | int iflink; | |
1117 | ||
c45d286e | 1118 | struct net_device_stats stats; |
caf586e5 ED |
1119 | atomic_long_t rx_dropped; /* dropped packets by core network |
1120 | * Do not use this in drivers. | |
1121 | */ | |
1da177e4 | 1122 | |
b86e0280 | 1123 | #ifdef CONFIG_WIRELESS_EXT |
1da177e4 LT |
1124 | /* List of functions to handle Wireless Extensions (instead of ioctl). |
1125 | * See <net/iw_handler.h> for details. Jean II */ | |
1126 | const struct iw_handler_def * wireless_handlers; | |
1127 | /* Instance data managed by the core of Wireless Extensions. */ | |
1128 | struct iw_public_data * wireless_data; | |
b86e0280 | 1129 | #endif |
d314774c SH |
1130 | /* Management operations */ |
1131 | const struct net_device_ops *netdev_ops; | |
76fd8593 | 1132 | const struct ethtool_ops *ethtool_ops; |
1da177e4 | 1133 | |
3b04ddde SH |
1134 | /* Hardware header description */ |
1135 | const struct header_ops *header_ops; | |
1136 | ||
b00055aa | 1137 | unsigned int flags; /* interface flags (a la BSD) */ |
bdc220da | 1138 | unsigned int priv_flags; /* Like 'flags' but invisible to userspace. */ |
1da177e4 | 1139 | unsigned short gflags; |
1da177e4 LT |
1140 | unsigned short padded; /* How much padding added by alloc_netdev() */ |
1141 | ||
b00055aa SR |
1142 | unsigned char operstate; /* RFC2863 operstate */ |
1143 | unsigned char link_mode; /* mapping policy to operstate */ | |
1144 | ||
bdc220da JP |
1145 | unsigned char if_port; /* Selectable AUI, TP,..*/ |
1146 | unsigned char dma; /* DMA channel */ | |
1147 | ||
cd7b5396 | 1148 | unsigned int mtu; /* interface MTU value */ |
1da177e4 LT |
1149 | unsigned short type; /* interface hardware type */ |
1150 | unsigned short hard_header_len; /* hardware hdr length */ | |
1da177e4 | 1151 | |
f5184d26 JB |
1152 | /* extra head- and tailroom the hardware may need, but not in all cases |
1153 | * can this be guaranteed, especially tailroom. Some cases also use | |
1154 | * LL_MAX_HEADER instead to allocate the skb. | |
1155 | */ | |
1156 | unsigned short needed_headroom; | |
1157 | unsigned short needed_tailroom; | |
1158 | ||
1da177e4 | 1159 | /* Interface address info. */ |
a6f9a705 | 1160 | unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ |
c1f79426 | 1161 | unsigned char addr_assign_type; /* hw address assignment type */ |
1da177e4 LT |
1162 | unsigned char addr_len; /* hardware address length */ |
1163 | unsigned short dev_id; /* for shared network cards */ | |
1164 | ||
ccffad25 | 1165 | spinlock_t addr_list_lock; |
22bedad3 JP |
1166 | struct netdev_hw_addr_list uc; /* Unicast mac addresses */ |
1167 | struct netdev_hw_addr_list mc; /* Multicast mac addresses */ | |
1168 | int uc_promisc; | |
9d45abe1 WC |
1169 | unsigned int promiscuity; |
1170 | unsigned int allmulti; | |
1da177e4 | 1171 | |
1da177e4 LT |
1172 | |
1173 | /* Protocol specific pointers */ | |
65ac6a5f JG |
1174 | |
1175 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | |
b616b09a | 1176 | struct vlan_group __rcu *vlgrp; /* VLAN group */ |
65ac6a5f | 1177 | #endif |
91da11f8 LB |
1178 | #ifdef CONFIG_NET_DSA |
1179 | void *dsa_ptr; /* dsa specific data */ | |
1180 | #endif | |
1da177e4 | 1181 | void *atalk_ptr; /* AppleTalk link */ |
95ae6b22 | 1182 | struct in_device __rcu *ip_ptr; /* IPv4 specific data */ |
fc766e4c | 1183 | struct dn_dev __rcu *dn_ptr; /* DECnet specific data */ |
198caeca | 1184 | struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */ |
1da177e4 LT |
1185 | void *ec_ptr; /* Econet specific data */ |
1186 | void *ax25_ptr; /* AX.25 specific data */ | |
704232c2 JB |
1187 | struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data, |
1188 | assign before registering */ | |
1da177e4 | 1189 | |
9356b8fc | 1190 | /* |
cd13539b | 1191 | * Cache lines mostly used on receive path (including eth_type_trans()) |
9356b8fc | 1192 | */ |
4dc89133 ED |
1193 | unsigned long last_rx; /* Time of last Rx |
1194 | * This should not be set in | |
1195 | * drivers, unless really needed, | |
1196 | * because network stack (bonding) | |
1197 | * use it if/when necessary, to | |
1198 | * avoid dirtying this cache line. | |
1199 | */ | |
1200 | ||
cd13539b ED |
1201 | struct net_device *master; /* Pointer to master device of a group, |
1202 | * which this device is member of. | |
1203 | */ | |
1204 | ||
9356b8fc | 1205 | /* Interface address info used in eth_type_trans() */ |
f001fde5 JP |
1206 | unsigned char *dev_addr; /* hw address, (before bcast |
1207 | because most packets are | |
1208 | unicast) */ | |
1209 | ||
31278e71 JP |
1210 | struct netdev_hw_addr_list dev_addrs; /* list of device |
1211 | hw addresses */ | |
9356b8fc ED |
1212 | |
1213 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ | |
1da177e4 | 1214 | |
df334545 | 1215 | #ifdef CONFIG_RPS |
0a9627f2 TH |
1216 | struct kset *queues_kset; |
1217 | ||
1218 | struct netdev_rx_queue *_rx; | |
1219 | ||
62fe0b40 | 1220 | /* Number of RX queues allocated at register_netdev() time */ |
0a9627f2 | 1221 | unsigned int num_rx_queues; |
62fe0b40 BH |
1222 | |
1223 | /* Number of RX queues currently active in device */ | |
1224 | unsigned int real_num_rx_queues; | |
c445477d BH |
1225 | |
1226 | #ifdef CONFIG_RFS_ACCEL | |
1227 | /* CPU reverse-mapping for RX completion interrupts, indexed | |
1228 | * by RX queue number. Assigned by driver. This must only be | |
1229 | * set if the ndo_rx_flow_steer operation is defined. */ | |
1230 | struct cpu_rmap *rx_cpu_rmap; | |
1231 | #endif | |
df334545 | 1232 | #endif |
0a9627f2 | 1233 | |
61391cde | 1234 | rx_handler_func_t __rcu *rx_handler; |
1235 | void __rcu *rx_handler_data; | |
e8a0464c | 1236 | |
24824a09 | 1237 | struct netdev_queue __rcu *ingress_queue; |
cd13539b ED |
1238 | |
1239 | /* | |
1240 | * Cache lines mostly used on transmit path | |
1241 | */ | |
e8a0464c | 1242 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
fd2ea0a7 DM |
1243 | |
1244 | /* Number of TX queues allocated at alloc_netdev_mq() time */ | |
e8a0464c | 1245 | unsigned int num_tx_queues; |
fd2ea0a7 DM |
1246 | |
1247 | /* Number of TX queues currently active in device */ | |
1248 | unsigned int real_num_tx_queues; | |
1249 | ||
af356afa PM |
1250 | /* root qdisc from userspace point of view */ |
1251 | struct Qdisc *qdisc; | |
1252 | ||
1da177e4 | 1253 | unsigned long tx_queue_len; /* Max frames per queue allowed */ |
c3f26a26 | 1254 | spinlock_t tx_global_lock; |
cd13539b | 1255 | |
bf264145 | 1256 | #ifdef CONFIG_XPS |
a4177869 | 1257 | struct xps_dev_maps __rcu *xps_maps; |
bf264145 | 1258 | #endif |
1d24eb48 | 1259 | |
9356b8fc | 1260 | /* These may be needed for future network-power-down code. */ |
9d21493b ED |
1261 | |
1262 | /* | |
1263 | * trans_start here is expensive for high speed devices on SMP, | |
1264 | * please use netdev_queue->trans_start instead. | |
1265 | */ | |
9356b8fc ED |
1266 | unsigned long trans_start; /* Time (in jiffies) of last Tx */ |
1267 | ||
1268 | int watchdog_timeo; /* used by dev_watchdog() */ | |
1269 | struct timer_list watchdog_timer; | |
1270 | ||
1da177e4 | 1271 | /* Number of references to this device */ |
29b4433d | 1272 | int __percpu *pcpu_refcnt; |
9356b8fc | 1273 | |
1da177e4 LT |
1274 | /* delayed register/unregister */ |
1275 | struct list_head todo_list; | |
1da177e4 LT |
1276 | /* device index hash chain */ |
1277 | struct hlist_node index_hlist; | |
1278 | ||
e014debe | 1279 | struct list_head link_watch_list; |
572a103d | 1280 | |
1da177e4 LT |
1281 | /* register/unregister state machine */ |
1282 | enum { NETREG_UNINITIALIZED=0, | |
b17a7c17 | 1283 | NETREG_REGISTERED, /* completed register_netdevice */ |
1da177e4 LT |
1284 | NETREG_UNREGISTERING, /* called unregister_netdevice */ |
1285 | NETREG_UNREGISTERED, /* completed unregister todo */ | |
1286 | NETREG_RELEASED, /* called free_netdev */ | |
937f1ba5 | 1287 | NETREG_DUMMY, /* dummy device for NAPI poll */ |
449f4544 ED |
1288 | } reg_state:8; |
1289 | ||
1290 | bool dismantle; /* device is going do be freed */ | |
a2835763 PM |
1291 | |
1292 | enum { | |
1293 | RTNL_LINK_INITIALIZED, | |
1294 | RTNL_LINK_INITIALIZING, | |
1295 | } rtnl_link_state:16; | |
1da177e4 | 1296 | |
d314774c SH |
1297 | /* Called from unregister, can be used to call free_netdev */ |
1298 | void (*destructor)(struct net_device *dev); | |
1da177e4 | 1299 | |
1da177e4 | 1300 | #ifdef CONFIG_NETPOLL |
115c1d6e | 1301 | struct netpoll_info *npinfo; |
1da177e4 | 1302 | #endif |
eae792b7 | 1303 | |
c346dca1 | 1304 | #ifdef CONFIG_NET_NS |
4a1c5371 EB |
1305 | /* Network namespace this network device is inside */ |
1306 | struct net *nd_net; | |
c346dca1 | 1307 | #endif |
4a1c5371 | 1308 | |
4951704b | 1309 | /* mid-layer private */ |
a7855c78 ED |
1310 | union { |
1311 | void *ml_priv; | |
1312 | struct pcpu_lstats __percpu *lstats; /* loopback stats */ | |
290b895e | 1313 | struct pcpu_tstats __percpu *tstats; /* tunnel stats */ |
6d81f41c | 1314 | struct pcpu_dstats __percpu *dstats; /* dummy stats */ |
a7855c78 | 1315 | }; |
eca9ebac | 1316 | /* GARP */ |
3cc77ec7 | 1317 | struct garp_port __rcu *garp_port; |
1da177e4 | 1318 | |
1da177e4 | 1319 | /* class/net/name entry */ |
43cb76d9 | 1320 | struct device dev; |
0c509a6c EB |
1321 | /* space for optional device, statistics, and wireless sysfs groups */ |
1322 | const struct attribute_group *sysfs_groups[4]; | |
38f7b870 PM |
1323 | |
1324 | /* rtnetlink link ops */ | |
1325 | const struct rtnl_link_ops *rtnl_link_ops; | |
f25f4e44 | 1326 | |
82cc1a7a PWJ |
1327 | /* for setting kernel sock attribute on TCP connection setup */ |
1328 | #define GSO_MAX_SIZE 65536 | |
1329 | unsigned int gso_max_size; | |
d314774c | 1330 | |
7a6b6f51 | 1331 | #ifdef CONFIG_DCB |
2f90b865 | 1332 | /* Data Center Bridging netlink ops */ |
32953543 | 1333 | const struct dcbnl_rtnl_ops *dcbnl_ops; |
2f90b865 | 1334 | #endif |
4f57c087 JF |
1335 | u8 num_tc; |
1336 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; | |
1337 | u8 prio_tc_map[TC_BITMASK + 1]; | |
2f90b865 | 1338 | |
4d288d57 YZ |
1339 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
1340 | /* max exchange id for FCoE LRO by ddp */ | |
1341 | unsigned int fcoe_ddp_xid; | |
1342 | #endif | |
c1f19b51 RC |
1343 | /* phy device may attach itself for hardware timestamping */ |
1344 | struct phy_device *phydev; | |
cbda10fa VD |
1345 | |
1346 | /* group the device belongs to */ | |
1347 | int group; | |
1da177e4 | 1348 | }; |
43cb76d9 | 1349 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
1da177e4 LT |
1350 | |
1351 | #define NETDEV_ALIGN 32 | |
1da177e4 | 1352 | |
4f57c087 JF |
1353 | static inline |
1354 | int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) | |
1355 | { | |
1356 | return dev->prio_tc_map[prio & TC_BITMASK]; | |
1357 | } | |
1358 | ||
1359 | static inline | |
1360 | int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) | |
1361 | { | |
1362 | if (tc >= dev->num_tc) | |
1363 | return -EINVAL; | |
1364 | ||
1365 | dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; | |
1366 | return 0; | |
1367 | } | |
1368 | ||
1369 | static inline | |
1370 | void netdev_reset_tc(struct net_device *dev) | |
1371 | { | |
1372 | dev->num_tc = 0; | |
1373 | memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); | |
1374 | memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); | |
1375 | } | |
1376 | ||
1377 | static inline | |
1378 | int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) | |
1379 | { | |
1380 | if (tc >= dev->num_tc) | |
1381 | return -EINVAL; | |
1382 | ||
1383 | dev->tc_to_txq[tc].count = count; | |
1384 | dev->tc_to_txq[tc].offset = offset; | |
1385 | return 0; | |
1386 | } | |
1387 | ||
1388 | static inline | |
1389 | int netdev_set_num_tc(struct net_device *dev, u8 num_tc) | |
1390 | { | |
1391 | if (num_tc > TC_MAX_QUEUE) | |
1392 | return -EINVAL; | |
1393 | ||
1394 | dev->num_tc = num_tc; | |
1395 | return 0; | |
1396 | } | |
1397 | ||
1398 | static inline | |
1399 | int netdev_get_num_tc(struct net_device *dev) | |
1400 | { | |
1401 | return dev->num_tc; | |
1402 | } | |
1403 | ||
e8a0464c DM |
1404 | static inline |
1405 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, | |
1406 | unsigned int index) | |
1407 | { | |
1408 | return &dev->_tx[index]; | |
1409 | } | |
1410 | ||
1411 | static inline void netdev_for_each_tx_queue(struct net_device *dev, | |
1412 | void (*f)(struct net_device *, | |
1413 | struct netdev_queue *, | |
1414 | void *), | |
1415 | void *arg) | |
1416 | { | |
1417 | unsigned int i; | |
1418 | ||
1419 | for (i = 0; i < dev->num_tx_queues; i++) | |
1420 | f(dev, &dev->_tx[i], arg); | |
1421 | } | |
1422 | ||
c346dca1 YH |
1423 | /* |
1424 | * Net namespace inlines | |
1425 | */ | |
1426 | static inline | |
1427 | struct net *dev_net(const struct net_device *dev) | |
1428 | { | |
c2d9ba9b | 1429 | return read_pnet(&dev->nd_net); |
c346dca1 YH |
1430 | } |
1431 | ||
1432 | static inline | |
f5aa23fd | 1433 | void dev_net_set(struct net_device *dev, struct net *net) |
c346dca1 YH |
1434 | { |
1435 | #ifdef CONFIG_NET_NS | |
f3005d7f DL |
1436 | release_net(dev->nd_net); |
1437 | dev->nd_net = hold_net(net); | |
c346dca1 YH |
1438 | #endif |
1439 | } | |
1440 | ||
cf85d08f LB |
1441 | static inline bool netdev_uses_dsa_tags(struct net_device *dev) |
1442 | { | |
1443 | #ifdef CONFIG_NET_DSA_TAG_DSA | |
1444 | if (dev->dsa_ptr != NULL) | |
1445 | return dsa_uses_dsa_tags(dev->dsa_ptr); | |
1446 | #endif | |
1447 | ||
1448 | return 0; | |
1449 | } | |
1450 | ||
8a83a00b AB |
1451 | #ifndef CONFIG_NET_NS |
1452 | static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev) | |
1453 | { | |
1454 | skb->dev = dev; | |
1455 | } | |
1456 | #else /* CONFIG_NET_NS */ | |
1457 | void skb_set_dev(struct sk_buff *skb, struct net_device *dev); | |
1458 | #endif | |
1459 | ||
396138f0 LB |
1460 | static inline bool netdev_uses_trailer_tags(struct net_device *dev) |
1461 | { | |
1462 | #ifdef CONFIG_NET_DSA_TAG_TRAILER | |
1463 | if (dev->dsa_ptr != NULL) | |
1464 | return dsa_uses_trailer_tags(dev->dsa_ptr); | |
1465 | #endif | |
1466 | ||
1467 | return 0; | |
1468 | } | |
1469 | ||
bea3348e SH |
1470 | /** |
1471 | * netdev_priv - access network device private data | |
1472 | * @dev: network device | |
1473 | * | |
1474 | * Get network device private data | |
1475 | */ | |
6472ce60 | 1476 | static inline void *netdev_priv(const struct net_device *dev) |
1da177e4 | 1477 | { |
1ce8e7b5 | 1478 | return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); |
1da177e4 LT |
1479 | } |
1480 | ||
1da177e4 LT |
1481 | /* Set the sysfs physical device reference for the network logical device |
1482 | * if set prior to registration will cause a symlink during initialization. | |
1483 | */ | |
43cb76d9 | 1484 | #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) |
1da177e4 | 1485 | |
384912ed MH |
1486 | /* Set the sysfs device type for the network logical device to allow |
1487 | * fin grained indentification of different network device types. For | |
1488 | * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. | |
1489 | */ | |
1490 | #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) | |
1491 | ||
3b582cc1 SH |
1492 | /** |
1493 | * netif_napi_add - initialize a napi context | |
1494 | * @dev: network device | |
1495 | * @napi: napi context | |
1496 | * @poll: polling function | |
1497 | * @weight: default weight | |
1498 | * | |
1499 | * netif_napi_add() must be used to initialize a napi context prior to calling | |
1500 | * *any* of the other napi related functions. | |
1501 | */ | |
d565b0a1 HX |
1502 | void netif_napi_add(struct net_device *dev, struct napi_struct *napi, |
1503 | int (*poll)(struct napi_struct *, int), int weight); | |
bea3348e | 1504 | |
d8156534 AD |
1505 | /** |
1506 | * netif_napi_del - remove a napi context | |
1507 | * @napi: napi context | |
1508 | * | |
1509 | * netif_napi_del() removes a napi context from the network device napi list | |
1510 | */ | |
d565b0a1 HX |
1511 | void netif_napi_del(struct napi_struct *napi); |
1512 | ||
1513 | struct napi_gro_cb { | |
78a478d0 HX |
1514 | /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ |
1515 | void *frag0; | |
1516 | ||
7489594c HX |
1517 | /* Length of frag0. */ |
1518 | unsigned int frag0_len; | |
1519 | ||
86911732 HX |
1520 | /* This indicates where we are processing relative to skb->data. */ |
1521 | int data_offset; | |
1522 | ||
d565b0a1 HX |
1523 | /* This is non-zero if the packet may be of the same flow. */ |
1524 | int same_flow; | |
1525 | ||
1526 | /* This is non-zero if the packet cannot be merged with the new skb. */ | |
1527 | int flush; | |
1528 | ||
1529 | /* Number of segments aggregated. */ | |
1530 | int count; | |
5d38a079 HX |
1531 | |
1532 | /* Free the skb? */ | |
1533 | int free; | |
d565b0a1 HX |
1534 | }; |
1535 | ||
1536 | #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) | |
d8156534 | 1537 | |
1da177e4 | 1538 | struct packet_type { |
f2ccd8fa DM |
1539 | __be16 type; /* This is really htons(ether_type). */ |
1540 | struct net_device *dev; /* NULL is wildcarded here */ | |
1541 | int (*func) (struct sk_buff *, | |
1542 | struct net_device *, | |
1543 | struct packet_type *, | |
1544 | struct net_device *); | |
576a30eb | 1545 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
04ed3e74 | 1546 | u32 features); |
a430a43d | 1547 | int (*gso_send_check)(struct sk_buff *skb); |
d565b0a1 HX |
1548 | struct sk_buff **(*gro_receive)(struct sk_buff **head, |
1549 | struct sk_buff *skb); | |
1550 | int (*gro_complete)(struct sk_buff *skb); | |
1da177e4 LT |
1551 | void *af_packet_priv; |
1552 | struct list_head list; | |
1553 | }; | |
1554 | ||
1da177e4 LT |
1555 | #include <linux/notifier.h> |
1556 | ||
1da177e4 LT |
1557 | extern rwlock_t dev_base_lock; /* Device list lock */ |
1558 | ||
7562f876 | 1559 | |
881d966b EB |
1560 | #define for_each_netdev(net, d) \ |
1561 | list_for_each_entry(d, &(net)->dev_base_head, dev_list) | |
dcbccbd4 EB |
1562 | #define for_each_netdev_reverse(net, d) \ |
1563 | list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) | |
c6d14c84 ED |
1564 | #define for_each_netdev_rcu(net, d) \ |
1565 | list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) | |
881d966b EB |
1566 | #define for_each_netdev_safe(net, d, n) \ |
1567 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) | |
1568 | #define for_each_netdev_continue(net, d) \ | |
1569 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) | |
254245d2 | 1570 | #define for_each_netdev_continue_rcu(net, d) \ |
1571 | list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) | |
881d966b | 1572 | #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) |
7562f876 | 1573 | |
a050c33f DL |
1574 | static inline struct net_device *next_net_device(struct net_device *dev) |
1575 | { | |
1576 | struct list_head *lh; | |
1577 | struct net *net; | |
1578 | ||
c346dca1 | 1579 | net = dev_net(dev); |
a050c33f DL |
1580 | lh = dev->dev_list.next; |
1581 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | |
1582 | } | |
1583 | ||
ce81b76a ED |
1584 | static inline struct net_device *next_net_device_rcu(struct net_device *dev) |
1585 | { | |
1586 | struct list_head *lh; | |
1587 | struct net *net; | |
1588 | ||
1589 | net = dev_net(dev); | |
ccf43438 | 1590 | lh = rcu_dereference(list_next_rcu(&dev->dev_list)); |
ce81b76a ED |
1591 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); |
1592 | } | |
1593 | ||
a050c33f DL |
1594 | static inline struct net_device *first_net_device(struct net *net) |
1595 | { | |
1596 | return list_empty(&net->dev_base_head) ? NULL : | |
1597 | net_device_entry(net->dev_base_head.next); | |
1598 | } | |
7562f876 | 1599 | |
ccf43438 ED |
1600 | static inline struct net_device *first_net_device_rcu(struct net *net) |
1601 | { | |
1602 | struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); | |
1603 | ||
1604 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | |
1605 | } | |
1606 | ||
1da177e4 LT |
1607 | extern int netdev_boot_setup_check(struct net_device *dev); |
1608 | extern unsigned long netdev_boot_base(const char *prefix, int unit); | |
941666c2 ED |
1609 | extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, |
1610 | const char *hwaddr); | |
881d966b EB |
1611 | extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); |
1612 | extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); | |
1da177e4 LT |
1613 | extern void dev_add_pack(struct packet_type *pt); |
1614 | extern void dev_remove_pack(struct packet_type *pt); | |
1615 | extern void __dev_remove_pack(struct packet_type *pt); | |
1616 | ||
bb69ae04 ED |
1617 | extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags, |
1618 | unsigned short mask); | |
881d966b | 1619 | extern struct net_device *dev_get_by_name(struct net *net, const char *name); |
72c9528b | 1620 | extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); |
881d966b | 1621 | extern struct net_device *__dev_get_by_name(struct net *net, const char *name); |
1da177e4 LT |
1622 | extern int dev_alloc_name(struct net_device *dev, const char *name); |
1623 | extern int dev_open(struct net_device *dev); | |
1624 | extern int dev_close(struct net_device *dev); | |
0187bdfb | 1625 | extern void dev_disable_lro(struct net_device *dev); |
1da177e4 LT |
1626 | extern int dev_queue_xmit(struct sk_buff *skb); |
1627 | extern int register_netdevice(struct net_device *dev); | |
44a0873d ED |
1628 | extern void unregister_netdevice_queue(struct net_device *dev, |
1629 | struct list_head *head); | |
9b5e383c | 1630 | extern void unregister_netdevice_many(struct list_head *head); |
44a0873d ED |
1631 | static inline void unregister_netdevice(struct net_device *dev) |
1632 | { | |
1633 | unregister_netdevice_queue(dev, NULL); | |
1634 | } | |
1635 | ||
29b4433d | 1636 | extern int netdev_refcnt_read(const struct net_device *dev); |
1da177e4 LT |
1637 | extern void free_netdev(struct net_device *dev); |
1638 | extern void synchronize_net(void); | |
1639 | extern int register_netdevice_notifier(struct notifier_block *nb); | |
1640 | extern int unregister_netdevice_notifier(struct notifier_block *nb); | |
937f1ba5 | 1641 | extern int init_dummy_netdev(struct net_device *dev); |
9d40bbda | 1642 | extern void netdev_resync_ops(struct net_device *dev); |
937f1ba5 | 1643 | |
ad7379d4 | 1644 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); |
881d966b EB |
1645 | extern struct net_device *dev_get_by_index(struct net *net, int ifindex); |
1646 | extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); | |
fb699dfd | 1647 | extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); |
1da177e4 LT |
1648 | extern int dev_restart(struct net_device *dev); |
1649 | #ifdef CONFIG_NETPOLL_TRAP | |
1650 | extern int netpoll_trap(void); | |
1651 | #endif | |
86911732 HX |
1652 | extern int skb_gro_receive(struct sk_buff **head, |
1653 | struct sk_buff *skb); | |
78a478d0 | 1654 | extern void skb_gro_reset_offset(struct sk_buff *skb); |
86911732 HX |
1655 | |
1656 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) | |
1657 | { | |
1658 | return NAPI_GRO_CB(skb)->data_offset; | |
1659 | } | |
1660 | ||
1661 | static inline unsigned int skb_gro_len(const struct sk_buff *skb) | |
1662 | { | |
1663 | return skb->len - NAPI_GRO_CB(skb)->data_offset; | |
1664 | } | |
1665 | ||
1666 | static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) | |
1667 | { | |
1668 | NAPI_GRO_CB(skb)->data_offset += len; | |
1669 | } | |
1670 | ||
a5b1cf28 HX |
1671 | static inline void *skb_gro_header_fast(struct sk_buff *skb, |
1672 | unsigned int offset) | |
86911732 | 1673 | { |
a5b1cf28 HX |
1674 | return NAPI_GRO_CB(skb)->frag0 + offset; |
1675 | } | |
78a478d0 | 1676 | |
a5b1cf28 HX |
1677 | static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) |
1678 | { | |
1679 | return NAPI_GRO_CB(skb)->frag0_len < hlen; | |
1680 | } | |
78a478d0 | 1681 | |
a5b1cf28 HX |
1682 | static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, |
1683 | unsigned int offset) | |
1684 | { | |
1685 | NAPI_GRO_CB(skb)->frag0 = NULL; | |
1686 | NAPI_GRO_CB(skb)->frag0_len = 0; | |
1687 | return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL; | |
86911732 | 1688 | } |
1da177e4 | 1689 | |
aa4b9f53 HX |
1690 | static inline void *skb_gro_mac_header(struct sk_buff *skb) |
1691 | { | |
78d3fd0b | 1692 | return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb); |
aa4b9f53 HX |
1693 | } |
1694 | ||
36e7b1b8 HX |
1695 | static inline void *skb_gro_network_header(struct sk_buff *skb) |
1696 | { | |
78d3fd0b HX |
1697 | return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + |
1698 | skb_network_offset(skb); | |
36e7b1b8 HX |
1699 | } |
1700 | ||
0c4e8581 SH |
1701 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
1702 | unsigned short type, | |
3b04ddde SH |
1703 | const void *daddr, const void *saddr, |
1704 | unsigned len) | |
0c4e8581 | 1705 | { |
f1ecfd5d | 1706 | if (!dev->header_ops || !dev->header_ops->create) |
0c4e8581 | 1707 | return 0; |
3b04ddde SH |
1708 | |
1709 | return dev->header_ops->create(skb, dev, type, daddr, saddr, len); | |
0c4e8581 SH |
1710 | } |
1711 | ||
b95cce35 SH |
1712 | static inline int dev_parse_header(const struct sk_buff *skb, |
1713 | unsigned char *haddr) | |
1714 | { | |
1715 | const struct net_device *dev = skb->dev; | |
1716 | ||
1b83336b | 1717 | if (!dev->header_ops || !dev->header_ops->parse) |
b95cce35 | 1718 | return 0; |
3b04ddde | 1719 | return dev->header_ops->parse(skb, haddr); |
b95cce35 SH |
1720 | } |
1721 | ||
1da177e4 LT |
1722 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); |
1723 | extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); | |
1724 | static inline int unregister_gifconf(unsigned int family) | |
1725 | { | |
1726 | return register_gifconf(family, NULL); | |
1727 | } | |
1728 | ||
1729 | /* | |
88751275 | 1730 | * Incoming packets are placed on per-cpu queues |
1da177e4 | 1731 | */ |
d94d9fee | 1732 | struct softnet_data { |
37437bb2 | 1733 | struct Qdisc *output_queue; |
a9cbd588 | 1734 | struct Qdisc **output_queue_tailp; |
1da177e4 | 1735 | struct list_head poll_list; |
1da177e4 | 1736 | struct sk_buff *completion_queue; |
6e7676c1 | 1737 | struct sk_buff_head process_queue; |
1da177e4 | 1738 | |
dee42870 | 1739 | /* stats */ |
cd7b5396 DM |
1740 | unsigned int processed; |
1741 | unsigned int time_squeeze; | |
1742 | unsigned int cpu_collision; | |
1743 | unsigned int received_rps; | |
dee42870 | 1744 | |
fd793d89 | 1745 | #ifdef CONFIG_RPS |
88751275 ED |
1746 | struct softnet_data *rps_ipi_list; |
1747 | ||
1748 | /* Elements below can be accessed between CPUs for RPS */ | |
0a9627f2 | 1749 | struct call_single_data csd ____cacheline_aligned_in_smp; |
88751275 ED |
1750 | struct softnet_data *rps_ipi_next; |
1751 | unsigned int cpu; | |
fec5e652 | 1752 | unsigned int input_queue_head; |
76cc8b13 | 1753 | unsigned int input_queue_tail; |
1e94d72f | 1754 | #endif |
dee42870 | 1755 | unsigned dropped; |
0a9627f2 | 1756 | struct sk_buff_head input_pkt_queue; |
bea3348e | 1757 | struct napi_struct backlog; |
1da177e4 LT |
1758 | }; |
1759 | ||
76cc8b13 | 1760 | static inline void input_queue_head_incr(struct softnet_data *sd) |
fec5e652 TH |
1761 | { |
1762 | #ifdef CONFIG_RPS | |
76cc8b13 TH |
1763 | sd->input_queue_head++; |
1764 | #endif | |
1765 | } | |
1766 | ||
1767 | static inline void input_queue_tail_incr_save(struct softnet_data *sd, | |
1768 | unsigned int *qtail) | |
1769 | { | |
1770 | #ifdef CONFIG_RPS | |
1771 | *qtail = ++sd->input_queue_tail; | |
fec5e652 TH |
1772 | #endif |
1773 | } | |
1774 | ||
0a9627f2 | 1775 | DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
1da177e4 | 1776 | |
37437bb2 | 1777 | extern void __netif_schedule(struct Qdisc *q); |
1da177e4 | 1778 | |
86d804e1 | 1779 | static inline void netif_schedule_queue(struct netdev_queue *txq) |
1da177e4 | 1780 | { |
79d16385 | 1781 | if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) |
37437bb2 | 1782 | __netif_schedule(txq->qdisc); |
86d804e1 DM |
1783 | } |
1784 | ||
fd2ea0a7 DM |
1785 | static inline void netif_tx_schedule_all(struct net_device *dev) |
1786 | { | |
1787 | unsigned int i; | |
1788 | ||
1789 | for (i = 0; i < dev->num_tx_queues; i++) | |
1790 | netif_schedule_queue(netdev_get_tx_queue(dev, i)); | |
1791 | } | |
1792 | ||
d29f749e DJ |
1793 | static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) |
1794 | { | |
1795 | clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | |
1796 | } | |
1797 | ||
bea3348e SH |
1798 | /** |
1799 | * netif_start_queue - allow transmit | |
1800 | * @dev: network device | |
1801 | * | |
1802 | * Allow upper layers to call the device hard_start_xmit routine. | |
1803 | */ | |
1da177e4 LT |
1804 | static inline void netif_start_queue(struct net_device *dev) |
1805 | { | |
e8a0464c | 1806 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
1807 | } |
1808 | ||
fd2ea0a7 DM |
1809 | static inline void netif_tx_start_all_queues(struct net_device *dev) |
1810 | { | |
1811 | unsigned int i; | |
1812 | ||
1813 | for (i = 0; i < dev->num_tx_queues; i++) { | |
1814 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
1815 | netif_tx_start_queue(txq); | |
1816 | } | |
1817 | } | |
1818 | ||
79d16385 | 1819 | static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) |
1da177e4 LT |
1820 | { |
1821 | #ifdef CONFIG_NETPOLL_TRAP | |
5f286e11 | 1822 | if (netpoll_trap()) { |
7b3d3e4f | 1823 | netif_tx_start_queue(dev_queue); |
1da177e4 | 1824 | return; |
5f286e11 | 1825 | } |
1da177e4 | 1826 | #endif |
79d16385 | 1827 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) |
37437bb2 | 1828 | __netif_schedule(dev_queue->qdisc); |
79d16385 DM |
1829 | } |
1830 | ||
d29f749e DJ |
1831 | /** |
1832 | * netif_wake_queue - restart transmit | |
1833 | * @dev: network device | |
1834 | * | |
1835 | * Allow upper layers to call the device hard_start_xmit routine. | |
1836 | * Used for flow control when transmit resources are available. | |
1837 | */ | |
79d16385 DM |
1838 | static inline void netif_wake_queue(struct net_device *dev) |
1839 | { | |
e8a0464c | 1840 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
1841 | } |
1842 | ||
fd2ea0a7 DM |
1843 | static inline void netif_tx_wake_all_queues(struct net_device *dev) |
1844 | { | |
1845 | unsigned int i; | |
1846 | ||
1847 | for (i = 0; i < dev->num_tx_queues; i++) { | |
1848 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
1849 | netif_tx_wake_queue(txq); | |
1850 | } | |
1851 | } | |
1852 | ||
d29f749e DJ |
1853 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) |
1854 | { | |
18543a64 | 1855 | if (WARN_ON(!dev_queue)) { |
256ee435 | 1856 | pr_info("netif_stop_queue() cannot be called before register_netdev()\n"); |
18543a64 GC |
1857 | return; |
1858 | } | |
d29f749e DJ |
1859 | set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); |
1860 | } | |
1861 | ||
bea3348e SH |
1862 | /** |
1863 | * netif_stop_queue - stop transmitted packets | |
1864 | * @dev: network device | |
1865 | * | |
1866 | * Stop upper layers calling the device hard_start_xmit routine. | |
1867 | * Used for flow control when transmit resources are unavailable. | |
1868 | */ | |
1da177e4 LT |
1869 | static inline void netif_stop_queue(struct net_device *dev) |
1870 | { | |
e8a0464c | 1871 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
1872 | } |
1873 | ||
fd2ea0a7 DM |
1874 | static inline void netif_tx_stop_all_queues(struct net_device *dev) |
1875 | { | |
1876 | unsigned int i; | |
1877 | ||
1878 | for (i = 0; i < dev->num_tx_queues; i++) { | |
1879 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
1880 | netif_tx_stop_queue(txq); | |
1881 | } | |
1882 | } | |
1883 | ||
d29f749e DJ |
1884 | static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) |
1885 | { | |
1886 | return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | |
1887 | } | |
1888 | ||
bea3348e SH |
1889 | /** |
1890 | * netif_queue_stopped - test if transmit queue is flowblocked | |
1891 | * @dev: network device | |
1892 | * | |
1893 | * Test if transmit queue on device is currently unable to send. | |
1894 | */ | |
1da177e4 LT |
1895 | static inline int netif_queue_stopped(const struct net_device *dev) |
1896 | { | |
e8a0464c | 1897 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
1898 | } |
1899 | ||
5a0d2268 | 1900 | static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue) |
c3f26a26 | 1901 | { |
5a0d2268 | 1902 | return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN; |
c3f26a26 DM |
1903 | } |
1904 | ||
bea3348e SH |
1905 | /** |
1906 | * netif_running - test if up | |
1907 | * @dev: network device | |
1908 | * | |
1909 | * Test if the device has been brought up. | |
1910 | */ | |
1da177e4 LT |
1911 | static inline int netif_running(const struct net_device *dev) |
1912 | { | |
1913 | return test_bit(__LINK_STATE_START, &dev->state); | |
1914 | } | |
1915 | ||
f25f4e44 PWJ |
1916 | /* |
1917 | * Routines to manage the subqueues on a device. We only need start | |
1918 | * stop, and a check if it's stopped. All other device management is | |
1919 | * done at the overall netdevice level. | |
1920 | * Also test the device if we're multiqueue. | |
1921 | */ | |
bea3348e SH |
1922 | |
1923 | /** | |
1924 | * netif_start_subqueue - allow sending packets on subqueue | |
1925 | * @dev: network device | |
1926 | * @queue_index: sub queue index | |
1927 | * | |
1928 | * Start individual transmit queue of a device with multiple transmit queues. | |
1929 | */ | |
f25f4e44 PWJ |
1930 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
1931 | { | |
fd2ea0a7 | 1932 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
7b3d3e4f KK |
1933 | |
1934 | netif_tx_start_queue(txq); | |
f25f4e44 PWJ |
1935 | } |
1936 | ||
bea3348e SH |
1937 | /** |
1938 | * netif_stop_subqueue - stop sending packets on subqueue | |
1939 | * @dev: network device | |
1940 | * @queue_index: sub queue index | |
1941 | * | |
1942 | * Stop individual transmit queue of a device with multiple transmit queues. | |
1943 | */ | |
f25f4e44 PWJ |
1944 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) |
1945 | { | |
fd2ea0a7 | 1946 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
f25f4e44 PWJ |
1947 | #ifdef CONFIG_NETPOLL_TRAP |
1948 | if (netpoll_trap()) | |
1949 | return; | |
1950 | #endif | |
7b3d3e4f | 1951 | netif_tx_stop_queue(txq); |
f25f4e44 PWJ |
1952 | } |
1953 | ||
bea3348e SH |
1954 | /** |
1955 | * netif_subqueue_stopped - test status of subqueue | |
1956 | * @dev: network device | |
1957 | * @queue_index: sub queue index | |
1958 | * | |
1959 | * Check individual transmit queue of a device with multiple transmit queues. | |
1960 | */ | |
668f895a | 1961 | static inline int __netif_subqueue_stopped(const struct net_device *dev, |
f25f4e44 PWJ |
1962 | u16 queue_index) |
1963 | { | |
fd2ea0a7 | 1964 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
7b3d3e4f KK |
1965 | |
1966 | return netif_tx_queue_stopped(txq); | |
f25f4e44 PWJ |
1967 | } |
1968 | ||
668f895a PE |
1969 | static inline int netif_subqueue_stopped(const struct net_device *dev, |
1970 | struct sk_buff *skb) | |
1971 | { | |
1972 | return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); | |
1973 | } | |
bea3348e SH |
1974 | |
1975 | /** | |
1976 | * netif_wake_subqueue - allow sending packets on subqueue | |
1977 | * @dev: network device | |
1978 | * @queue_index: sub queue index | |
1979 | * | |
1980 | * Resume individual transmit queue of a device with multiple transmit queues. | |
1981 | */ | |
f25f4e44 PWJ |
1982 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) |
1983 | { | |
fd2ea0a7 | 1984 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
f25f4e44 PWJ |
1985 | #ifdef CONFIG_NETPOLL_TRAP |
1986 | if (netpoll_trap()) | |
1987 | return; | |
1988 | #endif | |
fd2ea0a7 | 1989 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) |
37437bb2 | 1990 | __netif_schedule(txq->qdisc); |
f25f4e44 PWJ |
1991 | } |
1992 | ||
a3d22a68 VZ |
1993 | /* |
1994 | * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used | |
1995 | * as a distribution range limit for the returned value. | |
1996 | */ | |
1997 | static inline u16 skb_tx_hash(const struct net_device *dev, | |
1998 | const struct sk_buff *skb) | |
1999 | { | |
2000 | return __skb_tx_hash(dev, skb, dev->real_num_tx_queues); | |
2001 | } | |
2002 | ||
bea3348e SH |
2003 | /** |
2004 | * netif_is_multiqueue - test if device has multiple transmit queues | |
2005 | * @dev: network device | |
2006 | * | |
2007 | * Check if device has multiple transmit queues | |
bea3348e | 2008 | */ |
f25f4e44 PWJ |
2009 | static inline int netif_is_multiqueue(const struct net_device *dev) |
2010 | { | |
a02cec21 | 2011 | return dev->num_tx_queues > 1; |
f25f4e44 | 2012 | } |
1da177e4 | 2013 | |
e6484930 TH |
2014 | extern int netif_set_real_num_tx_queues(struct net_device *dev, |
2015 | unsigned int txq); | |
f0796d5c | 2016 | |
62fe0b40 BH |
2017 | #ifdef CONFIG_RPS |
2018 | extern int netif_set_real_num_rx_queues(struct net_device *dev, | |
2019 | unsigned int rxq); | |
2020 | #else | |
2021 | static inline int netif_set_real_num_rx_queues(struct net_device *dev, | |
2022 | unsigned int rxq) | |
2023 | { | |
2024 | return 0; | |
2025 | } | |
2026 | #endif | |
2027 | ||
3171d026 BH |
2028 | static inline int netif_copy_real_num_queues(struct net_device *to_dev, |
2029 | const struct net_device *from_dev) | |
2030 | { | |
2031 | netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues); | |
2032 | #ifdef CONFIG_RPS | |
2033 | return netif_set_real_num_rx_queues(to_dev, | |
2034 | from_dev->real_num_rx_queues); | |
2035 | #else | |
2036 | return 0; | |
2037 | #endif | |
2038 | } | |
2039 | ||
1da177e4 | 2040 | /* Use this variant when it is known for sure that it |
0ef47309 ML |
2041 | * is executing from hardware interrupt context or with hardware interrupts |
2042 | * disabled. | |
1da177e4 | 2043 | */ |
bea3348e | 2044 | extern void dev_kfree_skb_irq(struct sk_buff *skb); |
1da177e4 LT |
2045 | |
2046 | /* Use this variant in places where it could be invoked | |
0ef47309 ML |
2047 | * from either hardware interrupt or other context, with hardware interrupts |
2048 | * either disabled or enabled. | |
1da177e4 | 2049 | */ |
56079431 | 2050 | extern void dev_kfree_skb_any(struct sk_buff *skb); |
1da177e4 | 2051 | |
1da177e4 LT |
2052 | extern int netif_rx(struct sk_buff *skb); |
2053 | extern int netif_rx_ni(struct sk_buff *skb); | |
1da177e4 | 2054 | extern int netif_receive_skb(struct sk_buff *skb); |
5b252f0c | 2055 | extern gro_result_t dev_gro_receive(struct napi_struct *napi, |
96e93eab | 2056 | struct sk_buff *skb); |
c7c4b3b6 BH |
2057 | extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb); |
2058 | extern gro_result_t napi_gro_receive(struct napi_struct *napi, | |
d565b0a1 | 2059 | struct sk_buff *skb); |
86cac58b | 2060 | extern void napi_gro_flush(struct napi_struct *napi); |
76620aaf | 2061 | extern struct sk_buff * napi_get_frags(struct napi_struct *napi); |
c7c4b3b6 | 2062 | extern gro_result_t napi_frags_finish(struct napi_struct *napi, |
5b252f0c BH |
2063 | struct sk_buff *skb, |
2064 | gro_result_t ret); | |
76620aaf | 2065 | extern struct sk_buff * napi_frags_skb(struct napi_struct *napi); |
c7c4b3b6 | 2066 | extern gro_result_t napi_gro_frags(struct napi_struct *napi); |
76620aaf HX |
2067 | |
2068 | static inline void napi_free_frags(struct napi_struct *napi) | |
2069 | { | |
2070 | kfree_skb(napi->skb); | |
2071 | napi->skb = NULL; | |
2072 | } | |
2073 | ||
ab95bfe0 | 2074 | extern int netdev_rx_handler_register(struct net_device *dev, |
93e2c32b JP |
2075 | rx_handler_func_t *rx_handler, |
2076 | void *rx_handler_data); | |
ab95bfe0 JP |
2077 | extern void netdev_rx_handler_unregister(struct net_device *dev); |
2078 | ||
c2373ee9 | 2079 | extern int dev_valid_name(const char *name); |
881d966b EB |
2080 | extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); |
2081 | extern int dev_ethtool(struct net *net, struct ifreq *); | |
1da177e4 | 2082 | extern unsigned dev_get_flags(const struct net_device *); |
bd380811 | 2083 | extern int __dev_change_flags(struct net_device *, unsigned int flags); |
1da177e4 | 2084 | extern int dev_change_flags(struct net_device *, unsigned); |
bd380811 | 2085 | extern void __dev_notify_flags(struct net_device *, unsigned int old_flags); |
cf04a4c7 | 2086 | extern int dev_change_name(struct net_device *, const char *); |
0b815a1a | 2087 | extern int dev_set_alias(struct net_device *, const char *, size_t); |
ce286d32 EB |
2088 | extern int dev_change_net_namespace(struct net_device *, |
2089 | struct net *, const char *); | |
1da177e4 | 2090 | extern int dev_set_mtu(struct net_device *, int); |
cbda10fa | 2091 | extern void dev_set_group(struct net_device *, int); |
1da177e4 LT |
2092 | extern int dev_set_mac_address(struct net_device *, |
2093 | struct sockaddr *); | |
f6a78bfc | 2094 | extern int dev_hard_start_xmit(struct sk_buff *skb, |
fd2ea0a7 DM |
2095 | struct net_device *dev, |
2096 | struct netdev_queue *txq); | |
44540960 AB |
2097 | extern int dev_forward_skb(struct net_device *dev, |
2098 | struct sk_buff *skb); | |
1da177e4 | 2099 | |
20380731 | 2100 | extern int netdev_budget; |
1da177e4 LT |
2101 | |
2102 | /* Called by rtnetlink.c:rtnl_unlock() */ | |
2103 | extern void netdev_run_todo(void); | |
2104 | ||
bea3348e SH |
2105 | /** |
2106 | * dev_put - release reference to device | |
2107 | * @dev: network device | |
2108 | * | |
9ef4429b | 2109 | * Release reference to device to allow it to be freed. |
bea3348e | 2110 | */ |
1da177e4 LT |
2111 | static inline void dev_put(struct net_device *dev) |
2112 | { | |
29b4433d | 2113 | irqsafe_cpu_dec(*dev->pcpu_refcnt); |
1da177e4 LT |
2114 | } |
2115 | ||
bea3348e SH |
2116 | /** |
2117 | * dev_hold - get reference to device | |
2118 | * @dev: network device | |
2119 | * | |
9ef4429b | 2120 | * Hold reference to device to keep it from being freed. |
bea3348e | 2121 | */ |
15333061 SH |
2122 | static inline void dev_hold(struct net_device *dev) |
2123 | { | |
29b4433d | 2124 | irqsafe_cpu_inc(*dev->pcpu_refcnt); |
15333061 | 2125 | } |
1da177e4 LT |
2126 | |
2127 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on | |
2128 | * and _off may be called from IRQ context, but it is caller | |
2129 | * who is responsible for serialization of these calls. | |
b00055aa SR |
2130 | * |
2131 | * The name carrier is inappropriate, these functions should really be | |
2132 | * called netif_lowerlayer_*() because they represent the state of any | |
2133 | * kind of lower layer not just hardware media. | |
1da177e4 LT |
2134 | */ |
2135 | ||
2136 | extern void linkwatch_fire_event(struct net_device *dev); | |
e014debe | 2137 | extern void linkwatch_forget_dev(struct net_device *dev); |
1da177e4 | 2138 | |
bea3348e SH |
2139 | /** |
2140 | * netif_carrier_ok - test if carrier present | |
2141 | * @dev: network device | |
2142 | * | |
2143 | * Check if carrier is present on device | |
2144 | */ | |
1da177e4 LT |
2145 | static inline int netif_carrier_ok(const struct net_device *dev) |
2146 | { | |
2147 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); | |
2148 | } | |
2149 | ||
9d21493b ED |
2150 | extern unsigned long dev_trans_start(struct net_device *dev); |
2151 | ||
1da177e4 LT |
2152 | extern void __netdev_watchdog_up(struct net_device *dev); |
2153 | ||
0a242efc | 2154 | extern void netif_carrier_on(struct net_device *dev); |
1da177e4 | 2155 | |
0a242efc | 2156 | extern void netif_carrier_off(struct net_device *dev); |
1da177e4 | 2157 | |
06c4648d IC |
2158 | extern void netif_notify_peers(struct net_device *dev); |
2159 | ||
bea3348e SH |
2160 | /** |
2161 | * netif_dormant_on - mark device as dormant. | |
2162 | * @dev: network device | |
2163 | * | |
2164 | * Mark device as dormant (as per RFC2863). | |
2165 | * | |
2166 | * The dormant state indicates that the relevant interface is not | |
2167 | * actually in a condition to pass packets (i.e., it is not 'up') but is | |
2168 | * in a "pending" state, waiting for some external event. For "on- | |
2169 | * demand" interfaces, this new state identifies the situation where the | |
2170 | * interface is waiting for events to place it in the up state. | |
2171 | * | |
2172 | */ | |
b00055aa SR |
2173 | static inline void netif_dormant_on(struct net_device *dev) |
2174 | { | |
2175 | if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) | |
2176 | linkwatch_fire_event(dev); | |
2177 | } | |
2178 | ||
bea3348e SH |
2179 | /** |
2180 | * netif_dormant_off - set device as not dormant. | |
2181 | * @dev: network device | |
2182 | * | |
2183 | * Device is not in dormant state. | |
2184 | */ | |
b00055aa SR |
2185 | static inline void netif_dormant_off(struct net_device *dev) |
2186 | { | |
2187 | if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) | |
2188 | linkwatch_fire_event(dev); | |
2189 | } | |
2190 | ||
bea3348e SH |
2191 | /** |
2192 | * netif_dormant - test if carrier present | |
2193 | * @dev: network device | |
2194 | * | |
2195 | * Check if carrier is present on device | |
2196 | */ | |
b00055aa SR |
2197 | static inline int netif_dormant(const struct net_device *dev) |
2198 | { | |
2199 | return test_bit(__LINK_STATE_DORMANT, &dev->state); | |
2200 | } | |
2201 | ||
2202 | ||
bea3348e SH |
2203 | /** |
2204 | * netif_oper_up - test if device is operational | |
2205 | * @dev: network device | |
2206 | * | |
2207 | * Check if carrier is operational | |
2208 | */ | |
d94d9fee ED |
2209 | static inline int netif_oper_up(const struct net_device *dev) |
2210 | { | |
b00055aa SR |
2211 | return (dev->operstate == IF_OPER_UP || |
2212 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); | |
2213 | } | |
2214 | ||
bea3348e SH |
2215 | /** |
2216 | * netif_device_present - is device available or removed | |
2217 | * @dev: network device | |
2218 | * | |
2219 | * Check if device has not been removed from system. | |
2220 | */ | |
1da177e4 LT |
2221 | static inline int netif_device_present(struct net_device *dev) |
2222 | { | |
2223 | return test_bit(__LINK_STATE_PRESENT, &dev->state); | |
2224 | } | |
2225 | ||
56079431 | 2226 | extern void netif_device_detach(struct net_device *dev); |
1da177e4 | 2227 | |
56079431 | 2228 | extern void netif_device_attach(struct net_device *dev); |
1da177e4 LT |
2229 | |
2230 | /* | |
2231 | * Network interface message level settings | |
2232 | */ | |
1da177e4 LT |
2233 | |
2234 | enum { | |
2235 | NETIF_MSG_DRV = 0x0001, | |
2236 | NETIF_MSG_PROBE = 0x0002, | |
2237 | NETIF_MSG_LINK = 0x0004, | |
2238 | NETIF_MSG_TIMER = 0x0008, | |
2239 | NETIF_MSG_IFDOWN = 0x0010, | |
2240 | NETIF_MSG_IFUP = 0x0020, | |
2241 | NETIF_MSG_RX_ERR = 0x0040, | |
2242 | NETIF_MSG_TX_ERR = 0x0080, | |
2243 | NETIF_MSG_TX_QUEUED = 0x0100, | |
2244 | NETIF_MSG_INTR = 0x0200, | |
2245 | NETIF_MSG_TX_DONE = 0x0400, | |
2246 | NETIF_MSG_RX_STATUS = 0x0800, | |
2247 | NETIF_MSG_PKTDATA = 0x1000, | |
2248 | NETIF_MSG_HW = 0x2000, | |
2249 | NETIF_MSG_WOL = 0x4000, | |
2250 | }; | |
2251 | ||
2252 | #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) | |
2253 | #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) | |
2254 | #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) | |
2255 | #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) | |
2256 | #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) | |
2257 | #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) | |
2258 | #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) | |
2259 | #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) | |
2260 | #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) | |
2261 | #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) | |
2262 | #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) | |
2263 | #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) | |
2264 | #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) | |
2265 | #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) | |
2266 | #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) | |
2267 | ||
2268 | static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |
2269 | { | |
2270 | /* use default */ | |
2271 | if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) | |
2272 | return default_msg_enable_bits; | |
2273 | if (debug_value == 0) /* no output */ | |
2274 | return 0; | |
2275 | /* set low N bits */ | |
2276 | return (1 << debug_value) - 1; | |
2277 | } | |
2278 | ||
c773e847 | 2279 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
932ff279 | 2280 | { |
c773e847 DM |
2281 | spin_lock(&txq->_xmit_lock); |
2282 | txq->xmit_lock_owner = cpu; | |
22dd7495 JHS |
2283 | } |
2284 | ||
fd2ea0a7 DM |
2285 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) |
2286 | { | |
2287 | spin_lock_bh(&txq->_xmit_lock); | |
2288 | txq->xmit_lock_owner = smp_processor_id(); | |
2289 | } | |
2290 | ||
c3f26a26 DM |
2291 | static inline int __netif_tx_trylock(struct netdev_queue *txq) |
2292 | { | |
2293 | int ok = spin_trylock(&txq->_xmit_lock); | |
2294 | if (likely(ok)) | |
2295 | txq->xmit_lock_owner = smp_processor_id(); | |
2296 | return ok; | |
2297 | } | |
2298 | ||
2299 | static inline void __netif_tx_unlock(struct netdev_queue *txq) | |
2300 | { | |
2301 | txq->xmit_lock_owner = -1; | |
2302 | spin_unlock(&txq->_xmit_lock); | |
2303 | } | |
2304 | ||
2305 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | |
2306 | { | |
2307 | txq->xmit_lock_owner = -1; | |
2308 | spin_unlock_bh(&txq->_xmit_lock); | |
2309 | } | |
2310 | ||
08baf561 ED |
2311 | static inline void txq_trans_update(struct netdev_queue *txq) |
2312 | { | |
2313 | if (txq->xmit_lock_owner != -1) | |
2314 | txq->trans_start = jiffies; | |
2315 | } | |
2316 | ||
d29f749e DJ |
2317 | /** |
2318 | * netif_tx_lock - grab network device transmit lock | |
2319 | * @dev: network device | |
d29f749e DJ |
2320 | * |
2321 | * Get network device transmit lock | |
2322 | */ | |
22dd7495 JHS |
2323 | static inline void netif_tx_lock(struct net_device *dev) |
2324 | { | |
e8a0464c | 2325 | unsigned int i; |
c3f26a26 | 2326 | int cpu; |
c773e847 | 2327 | |
c3f26a26 DM |
2328 | spin_lock(&dev->tx_global_lock); |
2329 | cpu = smp_processor_id(); | |
e8a0464c DM |
2330 | for (i = 0; i < dev->num_tx_queues; i++) { |
2331 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c3f26a26 DM |
2332 | |
2333 | /* We are the only thread of execution doing a | |
2334 | * freeze, but we have to grab the _xmit_lock in | |
2335 | * order to synchronize with threads which are in | |
2336 | * the ->hard_start_xmit() handler and already | |
2337 | * checked the frozen bit. | |
2338 | */ | |
e8a0464c | 2339 | __netif_tx_lock(txq, cpu); |
c3f26a26 DM |
2340 | set_bit(__QUEUE_STATE_FROZEN, &txq->state); |
2341 | __netif_tx_unlock(txq); | |
e8a0464c | 2342 | } |
932ff279 HX |
2343 | } |
2344 | ||
2345 | static inline void netif_tx_lock_bh(struct net_device *dev) | |
2346 | { | |
e8a0464c DM |
2347 | local_bh_disable(); |
2348 | netif_tx_lock(dev); | |
932ff279 HX |
2349 | } |
2350 | ||
932ff279 HX |
2351 | static inline void netif_tx_unlock(struct net_device *dev) |
2352 | { | |
e8a0464c DM |
2353 | unsigned int i; |
2354 | ||
2355 | for (i = 0; i < dev->num_tx_queues; i++) { | |
2356 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c773e847 | 2357 | |
c3f26a26 DM |
2358 | /* No need to grab the _xmit_lock here. If the |
2359 | * queue is not stopped for another reason, we | |
2360 | * force a schedule. | |
2361 | */ | |
2362 | clear_bit(__QUEUE_STATE_FROZEN, &txq->state); | |
7b3d3e4f | 2363 | netif_schedule_queue(txq); |
c3f26a26 DM |
2364 | } |
2365 | spin_unlock(&dev->tx_global_lock); | |
932ff279 HX |
2366 | } |
2367 | ||
2368 | static inline void netif_tx_unlock_bh(struct net_device *dev) | |
2369 | { | |
e8a0464c DM |
2370 | netif_tx_unlock(dev); |
2371 | local_bh_enable(); | |
932ff279 HX |
2372 | } |
2373 | ||
c773e847 | 2374 | #define HARD_TX_LOCK(dev, txq, cpu) { \ |
22dd7495 | 2375 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
c773e847 | 2376 | __netif_tx_lock(txq, cpu); \ |
22dd7495 JHS |
2377 | } \ |
2378 | } | |
2379 | ||
c773e847 | 2380 | #define HARD_TX_UNLOCK(dev, txq) { \ |
22dd7495 | 2381 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
c773e847 | 2382 | __netif_tx_unlock(txq); \ |
22dd7495 JHS |
2383 | } \ |
2384 | } | |
2385 | ||
1da177e4 LT |
2386 | static inline void netif_tx_disable(struct net_device *dev) |
2387 | { | |
fd2ea0a7 | 2388 | unsigned int i; |
c3f26a26 | 2389 | int cpu; |
fd2ea0a7 | 2390 | |
c3f26a26 DM |
2391 | local_bh_disable(); |
2392 | cpu = smp_processor_id(); | |
fd2ea0a7 DM |
2393 | for (i = 0; i < dev->num_tx_queues; i++) { |
2394 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c3f26a26 DM |
2395 | |
2396 | __netif_tx_lock(txq, cpu); | |
fd2ea0a7 | 2397 | netif_tx_stop_queue(txq); |
c3f26a26 | 2398 | __netif_tx_unlock(txq); |
fd2ea0a7 | 2399 | } |
c3f26a26 | 2400 | local_bh_enable(); |
1da177e4 LT |
2401 | } |
2402 | ||
e308a5d8 DM |
2403 | static inline void netif_addr_lock(struct net_device *dev) |
2404 | { | |
2405 | spin_lock(&dev->addr_list_lock); | |
2406 | } | |
2407 | ||
2408 | static inline void netif_addr_lock_bh(struct net_device *dev) | |
2409 | { | |
2410 | spin_lock_bh(&dev->addr_list_lock); | |
2411 | } | |
2412 | ||
2413 | static inline void netif_addr_unlock(struct net_device *dev) | |
2414 | { | |
2415 | spin_unlock(&dev->addr_list_lock); | |
2416 | } | |
2417 | ||
2418 | static inline void netif_addr_unlock_bh(struct net_device *dev) | |
2419 | { | |
2420 | spin_unlock_bh(&dev->addr_list_lock); | |
2421 | } | |
2422 | ||
f001fde5 | 2423 | /* |
31278e71 | 2424 | * dev_addrs walker. Should be used only for read access. Call with |
f001fde5 JP |
2425 | * rcu_read_lock held. |
2426 | */ | |
2427 | #define for_each_dev_addr(dev, ha) \ | |
31278e71 | 2428 | list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) |
f001fde5 | 2429 | |
1da177e4 LT |
2430 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ |
2431 | ||
2432 | extern void ether_setup(struct net_device *dev); | |
2433 | ||
2434 | /* Support for loadable net-drivers */ | |
36909ea4 | 2435 | extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, |
f25f4e44 | 2436 | void (*setup)(struct net_device *), |
36909ea4 | 2437 | unsigned int txqs, unsigned int rxqs); |
f25f4e44 | 2438 | #define alloc_netdev(sizeof_priv, name, setup) \ |
36909ea4 TH |
2439 | alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1) |
2440 | ||
2441 | #define alloc_netdev_mq(sizeof_priv, name, setup, count) \ | |
2442 | alloc_netdev_mqs(sizeof_priv, name, setup, count, count) | |
2443 | ||
1da177e4 LT |
2444 | extern int register_netdev(struct net_device *dev); |
2445 | extern void unregister_netdev(struct net_device *dev); | |
f001fde5 | 2446 | |
22bedad3 JP |
2447 | /* General hardware address lists handling functions */ |
2448 | extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, | |
2449 | struct netdev_hw_addr_list *from_list, | |
2450 | int addr_len, unsigned char addr_type); | |
2451 | extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, | |
2452 | struct netdev_hw_addr_list *from_list, | |
2453 | int addr_len, unsigned char addr_type); | |
2454 | extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | |
2455 | struct netdev_hw_addr_list *from_list, | |
2456 | int addr_len); | |
2457 | extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | |
2458 | struct netdev_hw_addr_list *from_list, | |
2459 | int addr_len); | |
2460 | extern void __hw_addr_flush(struct netdev_hw_addr_list *list); | |
2461 | extern void __hw_addr_init(struct netdev_hw_addr_list *list); | |
2462 | ||
f001fde5 JP |
2463 | /* Functions used for device addresses handling */ |
2464 | extern int dev_addr_add(struct net_device *dev, unsigned char *addr, | |
2465 | unsigned char addr_type); | |
2466 | extern int dev_addr_del(struct net_device *dev, unsigned char *addr, | |
2467 | unsigned char addr_type); | |
2468 | extern int dev_addr_add_multiple(struct net_device *to_dev, | |
2469 | struct net_device *from_dev, | |
2470 | unsigned char addr_type); | |
2471 | extern int dev_addr_del_multiple(struct net_device *to_dev, | |
2472 | struct net_device *from_dev, | |
2473 | unsigned char addr_type); | |
a748ee24 JP |
2474 | extern void dev_addr_flush(struct net_device *dev); |
2475 | extern int dev_addr_init(struct net_device *dev); | |
2476 | ||
2477 | /* Functions used for unicast addresses handling */ | |
2478 | extern int dev_uc_add(struct net_device *dev, unsigned char *addr); | |
2479 | extern int dev_uc_del(struct net_device *dev, unsigned char *addr); | |
2480 | extern int dev_uc_sync(struct net_device *to, struct net_device *from); | |
2481 | extern void dev_uc_unsync(struct net_device *to, struct net_device *from); | |
2482 | extern void dev_uc_flush(struct net_device *dev); | |
2483 | extern void dev_uc_init(struct net_device *dev); | |
f001fde5 | 2484 | |
22bedad3 JP |
2485 | /* Functions used for multicast addresses handling */ |
2486 | extern int dev_mc_add(struct net_device *dev, unsigned char *addr); | |
2487 | extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr); | |
2488 | extern int dev_mc_del(struct net_device *dev, unsigned char *addr); | |
2489 | extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr); | |
2490 | extern int dev_mc_sync(struct net_device *to, struct net_device *from); | |
2491 | extern void dev_mc_unsync(struct net_device *to, struct net_device *from); | |
2492 | extern void dev_mc_flush(struct net_device *dev); | |
2493 | extern void dev_mc_init(struct net_device *dev); | |
f001fde5 | 2494 | |
4417da66 PM |
2495 | /* Functions used for secondary unicast and multicast support */ |
2496 | extern void dev_set_rx_mode(struct net_device *dev); | |
2497 | extern void __dev_set_rx_mode(struct net_device *dev); | |
dad9b335 WC |
2498 | extern int dev_set_promiscuity(struct net_device *dev, int inc); |
2499 | extern int dev_set_allmulti(struct net_device *dev, int inc); | |
1da177e4 | 2500 | extern void netdev_state_change(struct net_device *dev); |
3ca5b404 | 2501 | extern int netdev_bonding_change(struct net_device *dev, |
75c78500 | 2502 | unsigned long event); |
d8a33ac4 | 2503 | extern void netdev_features_change(struct net_device *dev); |
1da177e4 | 2504 | /* Load a device via the kmod */ |
881d966b | 2505 | extern void dev_load(struct net *net, const char *name); |
1da177e4 | 2506 | extern void dev_mcast_init(void); |
d7753516 BH |
2507 | extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, |
2508 | struct rtnl_link_stats64 *storage); | |
eeda3fd6 | 2509 | |
1da177e4 | 2510 | extern int netdev_max_backlog; |
3b098e2d | 2511 | extern int netdev_tstamp_prequeue; |
1da177e4 | 2512 | extern int weight_p; |
0a14842f | 2513 | extern int bpf_jit_enable; |
1da177e4 | 2514 | extern int netdev_set_master(struct net_device *dev, struct net_device *master); |
1765a575 JP |
2515 | extern int netdev_set_bond_master(struct net_device *dev, |
2516 | struct net_device *master); | |
84fa7933 | 2517 | extern int skb_checksum_help(struct sk_buff *skb); |
04ed3e74 | 2518 | extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features); |
fb286bb2 HX |
2519 | #ifdef CONFIG_BUG |
2520 | extern void netdev_rx_csum_fault(struct net_device *dev); | |
2521 | #else | |
2522 | static inline void netdev_rx_csum_fault(struct net_device *dev) | |
2523 | { | |
2524 | } | |
2525 | #endif | |
1da177e4 LT |
2526 | /* rx skb timestamps */ |
2527 | extern void net_enable_timestamp(void); | |
2528 | extern void net_disable_timestamp(void); | |
2529 | ||
20380731 ACM |
2530 | #ifdef CONFIG_PROC_FS |
2531 | extern void *dev_seq_start(struct seq_file *seq, loff_t *pos); | |
2532 | extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); | |
2533 | extern void dev_seq_stop(struct seq_file *seq, void *v); | |
2534 | #endif | |
2535 | ||
b8a9787e JV |
2536 | extern int netdev_class_create_file(struct class_attribute *class_attr); |
2537 | extern void netdev_class_remove_file(struct class_attribute *class_attr); | |
2538 | ||
04600794 JB |
2539 | extern struct kobj_ns_type_operations net_ns_type_operations; |
2540 | ||
3019de12 | 2541 | extern const char *netdev_drivername(const struct net_device *dev); |
6579e57b | 2542 | |
20380731 ACM |
2543 | extern void linkwatch_run_queue(void); |
2544 | ||
5455c699 MM |
2545 | static inline u32 netdev_get_wanted_features(struct net_device *dev) |
2546 | { | |
2547 | return (dev->features & ~dev->hw_features) | dev->wanted_features; | |
2548 | } | |
04ed3e74 | 2549 | u32 netdev_increment_features(u32 all, u32 one, u32 mask); |
acd1130e | 2550 | u32 netdev_fix_features(struct net_device *dev, u32 features); |
6cb6a27c | 2551 | int __netdev_update_features(struct net_device *dev); |
5455c699 | 2552 | void netdev_update_features(struct net_device *dev); |
afe12cc8 | 2553 | void netdev_change_features(struct net_device *dev); |
7f353bf2 | 2554 | |
fc4a7489 PM |
2555 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, |
2556 | struct net_device *dev); | |
2557 | ||
04ed3e74 | 2558 | u32 netif_skb_features(struct sk_buff *skb); |
58e998c6 | 2559 | |
04ed3e74 | 2560 | static inline int net_gso_ok(u32 features, int gso_type) |
576a30eb | 2561 | { |
bcd76111 | 2562 | int feature = gso_type << NETIF_F_GSO_SHIFT; |
d6b4991a | 2563 | return (features & feature) == feature; |
576a30eb HX |
2564 | } |
2565 | ||
04ed3e74 | 2566 | static inline int skb_gso_ok(struct sk_buff *skb, u32 features) |
bcd76111 | 2567 | { |
278b2513 | 2568 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && |
21dc3301 | 2569 | (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); |
bcd76111 HX |
2570 | } |
2571 | ||
fc741216 | 2572 | static inline int netif_needs_gso(struct sk_buff *skb, int features) |
7967168c | 2573 | { |
fc741216 JG |
2574 | return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || |
2575 | unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); | |
7967168c HX |
2576 | } |
2577 | ||
82cc1a7a PWJ |
2578 | static inline void netif_set_gso_max_size(struct net_device *dev, |
2579 | unsigned int size) | |
2580 | { | |
2581 | dev->gso_max_size = size; | |
2582 | } | |
2583 | ||
1765a575 JP |
2584 | static inline int netif_is_bond_slave(struct net_device *dev) |
2585 | { | |
2586 | return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; | |
2587 | } | |
2588 | ||
505d4f73 | 2589 | extern struct pernet_operations __net_initdata loopback_net_ops; |
b1b67dd4 | 2590 | |
8ae6daca DD |
2591 | int dev_ethtool_get_settings(struct net_device *dev, |
2592 | struct ethtool_cmd *cmd); | |
b1b67dd4 PM |
2593 | |
2594 | static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev) | |
2595 | { | |
4dd5ffe4 MM |
2596 | if (dev->features & NETIF_F_RXCSUM) |
2597 | return 1; | |
b1b67dd4 PM |
2598 | if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum) |
2599 | return 0; | |
2600 | return dev->ethtool_ops->get_rx_csum(dev); | |
2601 | } | |
2602 | ||
2603 | static inline u32 dev_ethtool_get_flags(struct net_device *dev) | |
2604 | { | |
2605 | if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags) | |
2606 | return 0; | |
2607 | return dev->ethtool_ops->get_flags(dev); | |
2608 | } | |
571ba423 JP |
2609 | |
2610 | /* Logging, debugging and troubleshooting/diagnostic helpers. */ | |
2611 | ||
2612 | /* netdev_printk helpers, similar to dev_printk */ | |
2613 | ||
2614 | static inline const char *netdev_name(const struct net_device *dev) | |
2615 | { | |
2616 | if (dev->reg_state != NETREG_REGISTERED) | |
2617 | return "(unregistered net_device)"; | |
2618 | return dev->name; | |
2619 | } | |
2620 | ||
256df2f3 JP |
2621 | extern int netdev_printk(const char *level, const struct net_device *dev, |
2622 | const char *format, ...) | |
2623 | __attribute__ ((format (printf, 3, 4))); | |
2624 | extern int netdev_emerg(const struct net_device *dev, const char *format, ...) | |
2625 | __attribute__ ((format (printf, 2, 3))); | |
2626 | extern int netdev_alert(const struct net_device *dev, const char *format, ...) | |
2627 | __attribute__ ((format (printf, 2, 3))); | |
2628 | extern int netdev_crit(const struct net_device *dev, const char *format, ...) | |
2629 | __attribute__ ((format (printf, 2, 3))); | |
2630 | extern int netdev_err(const struct net_device *dev, const char *format, ...) | |
2631 | __attribute__ ((format (printf, 2, 3))); | |
2632 | extern int netdev_warn(const struct net_device *dev, const char *format, ...) | |
2633 | __attribute__ ((format (printf, 2, 3))); | |
2634 | extern int netdev_notice(const struct net_device *dev, const char *format, ...) | |
2635 | __attribute__ ((format (printf, 2, 3))); | |
2636 | extern int netdev_info(const struct net_device *dev, const char *format, ...) | |
2637 | __attribute__ ((format (printf, 2, 3))); | |
571ba423 | 2638 | |
8909c9ad VK |
2639 | #define MODULE_ALIAS_NETDEV(device) \ |
2640 | MODULE_ALIAS("netdev-" device) | |
2641 | ||
571ba423 JP |
2642 | #if defined(DEBUG) |
2643 | #define netdev_dbg(__dev, format, args...) \ | |
2644 | netdev_printk(KERN_DEBUG, __dev, format, ##args) | |
2645 | #elif defined(CONFIG_DYNAMIC_DEBUG) | |
2646 | #define netdev_dbg(__dev, format, args...) \ | |
2647 | do { \ | |
2648 | dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ | |
2649 | netdev_name(__dev), ##args); \ | |
2650 | } while (0) | |
2651 | #else | |
2652 | #define netdev_dbg(__dev, format, args...) \ | |
2653 | ({ \ | |
2654 | if (0) \ | |
2655 | netdev_printk(KERN_DEBUG, __dev, format, ##args); \ | |
2656 | 0; \ | |
2657 | }) | |
2658 | #endif | |
2659 | ||
2660 | #if defined(VERBOSE_DEBUG) | |
2661 | #define netdev_vdbg netdev_dbg | |
2662 | #else | |
2663 | ||
2664 | #define netdev_vdbg(dev, format, args...) \ | |
2665 | ({ \ | |
2666 | if (0) \ | |
2667 | netdev_printk(KERN_DEBUG, dev, format, ##args); \ | |
2668 | 0; \ | |
2669 | }) | |
2670 | #endif | |
2671 | ||
2672 | /* | |
2673 | * netdev_WARN() acts like dev_printk(), but with the key difference | |
2674 | * of using a WARN/WARN_ON to get the message out, including the | |
2675 | * file/line information and a backtrace. | |
2676 | */ | |
2677 | #define netdev_WARN(dev, format, args...) \ | |
2678 | WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args); | |
2679 | ||
b3d95c5c JP |
2680 | /* netif printk helpers, similar to netdev_printk */ |
2681 | ||
2682 | #define netif_printk(priv, type, level, dev, fmt, args...) \ | |
2683 | do { \ | |
2684 | if (netif_msg_##type(priv)) \ | |
2685 | netdev_printk(level, (dev), fmt, ##args); \ | |
2686 | } while (0) | |
2687 | ||
f45f4321 JP |
2688 | #define netif_level(level, priv, type, dev, fmt, args...) \ |
2689 | do { \ | |
2690 | if (netif_msg_##type(priv)) \ | |
2691 | netdev_##level(dev, fmt, ##args); \ | |
2692 | } while (0) | |
2693 | ||
b3d95c5c | 2694 | #define netif_emerg(priv, type, dev, fmt, args...) \ |
f45f4321 | 2695 | netif_level(emerg, priv, type, dev, fmt, ##args) |
b3d95c5c | 2696 | #define netif_alert(priv, type, dev, fmt, args...) \ |
f45f4321 | 2697 | netif_level(alert, priv, type, dev, fmt, ##args) |
b3d95c5c | 2698 | #define netif_crit(priv, type, dev, fmt, args...) \ |
f45f4321 | 2699 | netif_level(crit, priv, type, dev, fmt, ##args) |
b3d95c5c | 2700 | #define netif_err(priv, type, dev, fmt, args...) \ |
f45f4321 | 2701 | netif_level(err, priv, type, dev, fmt, ##args) |
b3d95c5c | 2702 | #define netif_warn(priv, type, dev, fmt, args...) \ |
f45f4321 | 2703 | netif_level(warn, priv, type, dev, fmt, ##args) |
b3d95c5c | 2704 | #define netif_notice(priv, type, dev, fmt, args...) \ |
f45f4321 | 2705 | netif_level(notice, priv, type, dev, fmt, ##args) |
b3d95c5c | 2706 | #define netif_info(priv, type, dev, fmt, args...) \ |
f45f4321 | 2707 | netif_level(info, priv, type, dev, fmt, ##args) |
b3d95c5c JP |
2708 | |
2709 | #if defined(DEBUG) | |
2710 | #define netif_dbg(priv, type, dev, format, args...) \ | |
2711 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) | |
2712 | #elif defined(CONFIG_DYNAMIC_DEBUG) | |
2713 | #define netif_dbg(priv, type, netdev, format, args...) \ | |
2714 | do { \ | |
2715 | if (netif_msg_##type(priv)) \ | |
2716 | dynamic_dev_dbg((netdev)->dev.parent, \ | |
2717 | "%s: " format, \ | |
2718 | netdev_name(netdev), ##args); \ | |
2719 | } while (0) | |
2720 | #else | |
2721 | #define netif_dbg(priv, type, dev, format, args...) \ | |
2722 | ({ \ | |
2723 | if (0) \ | |
2724 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ | |
2725 | 0; \ | |
2726 | }) | |
2727 | #endif | |
2728 | ||
2729 | #if defined(VERBOSE_DEBUG) | |
bcfcc450 | 2730 | #define netif_vdbg netif_dbg |
b3d95c5c JP |
2731 | #else |
2732 | #define netif_vdbg(priv, type, dev, format, args...) \ | |
2733 | ({ \ | |
2734 | if (0) \ | |
a4ed89cb | 2735 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ |
b3d95c5c JP |
2736 | 0; \ |
2737 | }) | |
2738 | #endif | |
571ba423 | 2739 | |
1da177e4 LT |
2740 | #endif /* __KERNEL__ */ |
2741 | ||
385a154c | 2742 | #endif /* _LINUX_NETDEVICE_H */ |