]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Definitions for the Interfaces handler. | |
7 | * | |
8 | * Version: @(#)dev.h 1.0.10 08/12/93 | |
9 | * | |
02c30a84 | 10 | * Authors: Ross Biro |
1da177e4 LT |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
13 | * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> | |
113aa838 | 14 | * Alan Cox, <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
15 | * Bjorn Ekwall. <bj0rn@blox.se> |
16 | * Pekka Riikonen <priikone@poseidon.pspt.fi> | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License | |
20 | * as published by the Free Software Foundation; either version | |
21 | * 2 of the License, or (at your option) any later version. | |
22 | * | |
23 | * Moved to /usr/include/linux for NET3 | |
24 | */ | |
25 | #ifndef _LINUX_NETDEVICE_H | |
26 | #define _LINUX_NETDEVICE_H | |
27 | ||
d7fe0f24 | 28 | #include <linux/timer.h> |
187f1882 | 29 | #include <linux/bug.h> |
bea3348e | 30 | #include <linux/delay.h> |
60063497 | 31 | #include <linux/atomic.h> |
53511453 | 32 | #include <linux/prefetch.h> |
1da177e4 LT |
33 | #include <asm/cache.h> |
34 | #include <asm/byteorder.h> | |
35 | ||
1da177e4 | 36 | #include <linux/percpu.h> |
4d5b78c0 | 37 | #include <linux/rculist.h> |
db217334 | 38 | #include <linux/dmaengine.h> |
bea3348e | 39 | #include <linux/workqueue.h> |
114cf580 | 40 | #include <linux/dynamic_queue_limits.h> |
1da177e4 | 41 | |
b1b67dd4 | 42 | #include <linux/ethtool.h> |
a050c33f | 43 | #include <net/net_namespace.h> |
cf85d08f | 44 | #include <net/dsa.h> |
7a6b6f51 | 45 | #ifdef CONFIG_DCB |
2f90b865 AD |
46 | #include <net/dcbnl.h> |
47 | #endif | |
5bc1421e | 48 | #include <net/netprio_cgroup.h> |
a050c33f | 49 | |
a59e2ecb | 50 | #include <linux/netdev_features.h> |
77162022 | 51 | #include <linux/neighbour.h> |
607ca46e | 52 | #include <uapi/linux/netdevice.h> |
61bd3857 | 53 | #include <uapi/linux/if_bonding.h> |
a59e2ecb | 54 | |
115c1d6e | 55 | struct netpoll_info; |
313162d0 | 56 | struct device; |
c1f19b51 | 57 | struct phy_device; |
704232c2 JB |
58 | /* 802.11 specific */ |
59 | struct wireless_dev; | |
98a18b6f AA |
60 | /* 802.15.4 specific */ |
61 | struct wpan_dev; | |
03c57747 | 62 | struct mpls_dev; |
1da177e4 | 63 | |
f629d208 JP |
64 | void netdev_set_default_ethtool_ops(struct net_device *dev, |
65 | const struct ethtool_ops *ops); | |
d07d7507 | 66 | |
9a1654ba JP |
67 | /* Backlog congestion levels */ |
68 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ | |
69 | #define NET_RX_DROP 1 /* packet dropped */ | |
70 | ||
572a9d7b PM |
71 | /* |
72 | * Transmit return codes: transmit return codes originate from three different | |
73 | * namespaces: | |
74 | * | |
75 | * - qdisc return codes | |
76 | * - driver transmit return codes | |
77 | * - errno values | |
78 | * | |
79 | * Drivers are allowed to return any one of those in their hard_start_xmit() | |
80 | * function. Real network devices commonly used with qdiscs should only return | |
81 | * the driver transmit return codes though - when qdiscs are used, the actual | |
82 | * transmission happens asynchronously, so the value is not propagated to | |
83 | * higher layers. Virtual network devices transmit synchronously, in this case | |
84 | * the driver transmit return codes are consumed by dev_queue_xmit(), all | |
85 | * others are propagated to higher layers. | |
86 | */ | |
87 | ||
88 | /* qdisc ->enqueue() return codes. */ | |
89 | #define NET_XMIT_SUCCESS 0x00 | |
9a1654ba JP |
90 | #define NET_XMIT_DROP 0x01 /* skb dropped */ |
91 | #define NET_XMIT_CN 0x02 /* congestion notification */ | |
92 | #define NET_XMIT_POLICED 0x03 /* skb is shot by police */ | |
93 | #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ | |
1da177e4 | 94 | |
b9df3cb8 GR |
95 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It |
96 | * indicates that the device will soon be dropping packets, or already drops | |
97 | * some packets of the same priority; prompting us to send less aggressively. */ | |
572a9d7b | 98 | #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) |
1da177e4 LT |
99 | #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) |
100 | ||
dc1f8bf6 | 101 | /* Driver transmit return codes */ |
9a1654ba | 102 | #define NETDEV_TX_MASK 0xf0 |
572a9d7b | 103 | |
dc1f8bf6 | 104 | enum netdev_tx { |
572a9d7b | 105 | __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ |
9a1654ba JP |
106 | NETDEV_TX_OK = 0x00, /* driver took care of packet */ |
107 | NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ | |
108 | NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */ | |
dc1f8bf6 SH |
109 | }; |
110 | typedef enum netdev_tx netdev_tx_t; | |
111 | ||
9a1654ba JP |
112 | /* |
113 | * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; | |
114 | * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. | |
115 | */ | |
116 | static inline bool dev_xmit_complete(int rc) | |
117 | { | |
118 | /* | |
119 | * Positive cases with an skb consumed by a driver: | |
120 | * - successful transmission (rc == NETDEV_TX_OK) | |
121 | * - error while transmitting (rc < 0) | |
122 | * - error while queueing to a different device (rc & NET_XMIT_MASK) | |
123 | */ | |
124 | if (likely(rc < NET_XMIT_MASK)) | |
125 | return true; | |
126 | ||
127 | return false; | |
128 | } | |
129 | ||
1da177e4 LT |
130 | /* |
131 | * Compute the worst case header length according to the protocols | |
132 | * used. | |
133 | */ | |
fe2918b0 | 134 | |
c0eb4540 KS |
135 | #if defined(CONFIG_HYPERV_NET) |
136 | # define LL_MAX_HEADER 128 | |
137 | #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) | |
8388e3da DM |
138 | # if defined(CONFIG_MAC80211_MESH) |
139 | # define LL_MAX_HEADER 128 | |
140 | # else | |
141 | # define LL_MAX_HEADER 96 | |
142 | # endif | |
1da177e4 | 143 | #else |
8388e3da | 144 | # define LL_MAX_HEADER 32 |
1da177e4 LT |
145 | #endif |
146 | ||
d11ead75 BH |
147 | #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ |
148 | !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) | |
1da177e4 LT |
149 | #define MAX_HEADER LL_MAX_HEADER |
150 | #else | |
151 | #define MAX_HEADER (LL_MAX_HEADER + 48) | |
152 | #endif | |
153 | ||
154 | /* | |
be1f3c2c BH |
155 | * Old network device statistics. Fields are native words |
156 | * (unsigned long) so they can be read and written atomically. | |
1da177e4 | 157 | */ |
fe2918b0 | 158 | |
d94d9fee | 159 | struct net_device_stats { |
3cfde79c BH |
160 | unsigned long rx_packets; |
161 | unsigned long tx_packets; | |
162 | unsigned long rx_bytes; | |
163 | unsigned long tx_bytes; | |
164 | unsigned long rx_errors; | |
165 | unsigned long tx_errors; | |
166 | unsigned long rx_dropped; | |
167 | unsigned long tx_dropped; | |
168 | unsigned long multicast; | |
1da177e4 | 169 | unsigned long collisions; |
1da177e4 | 170 | unsigned long rx_length_errors; |
3cfde79c BH |
171 | unsigned long rx_over_errors; |
172 | unsigned long rx_crc_errors; | |
173 | unsigned long rx_frame_errors; | |
174 | unsigned long rx_fifo_errors; | |
175 | unsigned long rx_missed_errors; | |
1da177e4 LT |
176 | unsigned long tx_aborted_errors; |
177 | unsigned long tx_carrier_errors; | |
178 | unsigned long tx_fifo_errors; | |
179 | unsigned long tx_heartbeat_errors; | |
180 | unsigned long tx_window_errors; | |
1da177e4 LT |
181 | unsigned long rx_compressed; |
182 | unsigned long tx_compressed; | |
183 | }; | |
184 | ||
1da177e4 LT |
185 | |
186 | #include <linux/cache.h> | |
187 | #include <linux/skbuff.h> | |
188 | ||
adc9300e | 189 | #ifdef CONFIG_RPS |
c5905afb IM |
190 | #include <linux/static_key.h> |
191 | extern struct static_key rps_needed; | |
adc9300e ED |
192 | #endif |
193 | ||
1da177e4 LT |
194 | struct neighbour; |
195 | struct neigh_parms; | |
196 | struct sk_buff; | |
197 | ||
f001fde5 JP |
198 | struct netdev_hw_addr { |
199 | struct list_head list; | |
200 | unsigned char addr[MAX_ADDR_LEN]; | |
201 | unsigned char type; | |
ccffad25 JP |
202 | #define NETDEV_HW_ADDR_T_LAN 1 |
203 | #define NETDEV_HW_ADDR_T_SAN 2 | |
204 | #define NETDEV_HW_ADDR_T_SLAVE 3 | |
205 | #define NETDEV_HW_ADDR_T_UNICAST 4 | |
22bedad3 | 206 | #define NETDEV_HW_ADDR_T_MULTICAST 5 |
22bedad3 | 207 | bool global_use; |
4cd729b0 | 208 | int sync_cnt; |
8f8f103d | 209 | int refcount; |
4543fbef | 210 | int synced; |
f001fde5 JP |
211 | struct rcu_head rcu_head; |
212 | }; | |
213 | ||
31278e71 JP |
214 | struct netdev_hw_addr_list { |
215 | struct list_head list; | |
216 | int count; | |
217 | }; | |
218 | ||
22bedad3 JP |
219 | #define netdev_hw_addr_list_count(l) ((l)->count) |
220 | #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) | |
221 | #define netdev_hw_addr_list_for_each(ha, l) \ | |
222 | list_for_each_entry(ha, &(l)->list, list) | |
32e7bfc4 | 223 | |
22bedad3 JP |
224 | #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) |
225 | #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) | |
226 | #define netdev_for_each_uc_addr(ha, dev) \ | |
227 | netdev_hw_addr_list_for_each(ha, &(dev)->uc) | |
6683ece3 | 228 | |
22bedad3 JP |
229 | #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) |
230 | #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) | |
18e225f2 | 231 | #define netdev_for_each_mc_addr(ha, dev) \ |
22bedad3 | 232 | netdev_hw_addr_list_for_each(ha, &(dev)->mc) |
6683ece3 | 233 | |
d94d9fee | 234 | struct hh_cache { |
f6b72b62 | 235 | u16 hh_len; |
5c25f686 | 236 | u16 __pad; |
3644f0ce | 237 | seqlock_t hh_lock; |
1da177e4 LT |
238 | |
239 | /* cached hardware header; allow for machine alignment needs. */ | |
240 | #define HH_DATA_MOD 16 | |
241 | #define HH_DATA_OFF(__len) \ | |
5ba0eac6 | 242 | (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) |
1da177e4 LT |
243 | #define HH_DATA_ALIGN(__len) \ |
244 | (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) | |
245 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | |
246 | }; | |
247 | ||
248 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. | |
249 | * Alternative is: | |
250 | * dev->hard_header_len ? (dev->hard_header_len + | |
251 | * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 | |
252 | * | |
253 | * We could use other alignment values, but we must maintain the | |
254 | * relationship HH alignment <= LL alignment. | |
255 | */ | |
256 | #define LL_RESERVED_SPACE(dev) \ | |
f5184d26 | 257 | ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
1da177e4 | 258 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ |
f5184d26 | 259 | ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
1da177e4 | 260 | |
3b04ddde SH |
261 | struct header_ops { |
262 | int (*create) (struct sk_buff *skb, struct net_device *dev, | |
263 | unsigned short type, const void *daddr, | |
95c96174 | 264 | const void *saddr, unsigned int len); |
3b04ddde | 265 | int (*parse)(const struct sk_buff *skb, unsigned char *haddr); |
e69dd336 | 266 | int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); |
3b04ddde SH |
267 | void (*cache_update)(struct hh_cache *hh, |
268 | const struct net_device *dev, | |
269 | const unsigned char *haddr); | |
270 | }; | |
271 | ||
1da177e4 LT |
272 | /* These flag bits are private to the generic network queueing |
273 | * layer, they may not be explicitly referenced by any other | |
274 | * code. | |
275 | */ | |
276 | ||
d94d9fee | 277 | enum netdev_state_t { |
1da177e4 LT |
278 | __LINK_STATE_START, |
279 | __LINK_STATE_PRESENT, | |
1da177e4 | 280 | __LINK_STATE_NOCARRIER, |
b00055aa SR |
281 | __LINK_STATE_LINKWATCH_PENDING, |
282 | __LINK_STATE_DORMANT, | |
1da177e4 LT |
283 | }; |
284 | ||
285 | ||
286 | /* | |
287 | * This structure holds at boot time configured netdevice settings. They | |
fe2918b0 | 288 | * are then used in the device probing. |
1da177e4 LT |
289 | */ |
290 | struct netdev_boot_setup { | |
291 | char name[IFNAMSIZ]; | |
292 | struct ifmap map; | |
293 | }; | |
294 | #define NETDEV_BOOT_SETUP_MAX 8 | |
295 | ||
f629d208 | 296 | int __init netdev_boot_setup(char *str); |
1da177e4 | 297 | |
bea3348e SH |
298 | /* |
299 | * Structure for NAPI scheduling similar to tasklet but with weighting | |
300 | */ | |
301 | struct napi_struct { | |
302 | /* The poll_list must only be managed by the entity which | |
303 | * changes the state of the NAPI_STATE_SCHED bit. This means | |
304 | * whoever atomically sets that bit can add this napi_struct | |
305 | * to the per-cpu poll_list, and whoever clears that bit | |
306 | * can remove from the list right before clearing the bit. | |
307 | */ | |
308 | struct list_head poll_list; | |
309 | ||
310 | unsigned long state; | |
311 | int weight; | |
404f7c9e | 312 | unsigned int gro_count; |
bea3348e SH |
313 | int (*poll)(struct napi_struct *, int); |
314 | #ifdef CONFIG_NETPOLL | |
315 | spinlock_t poll_lock; | |
316 | int poll_owner; | |
bea3348e | 317 | #endif |
5d38a079 | 318 | struct net_device *dev; |
d565b0a1 | 319 | struct sk_buff *gro_list; |
5d38a079 | 320 | struct sk_buff *skb; |
3b47d303 | 321 | struct hrtimer timer; |
404f7c9e | 322 | struct list_head dev_list; |
af12fa6e ET |
323 | struct hlist_node napi_hash_node; |
324 | unsigned int napi_id; | |
bea3348e SH |
325 | }; |
326 | ||
d94d9fee | 327 | enum { |
bea3348e | 328 | NAPI_STATE_SCHED, /* Poll is scheduled */ |
a0a46196 | 329 | NAPI_STATE_DISABLE, /* Disable pending */ |
7b363e44 | 330 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ |
d64b5e85 ED |
331 | NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ |
332 | NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ | |
bea3348e SH |
333 | }; |
334 | ||
5b252f0c | 335 | enum gro_result { |
d1c76af9 HX |
336 | GRO_MERGED, |
337 | GRO_MERGED_FREE, | |
338 | GRO_HELD, | |
339 | GRO_NORMAL, | |
340 | GRO_DROP, | |
341 | }; | |
5b252f0c | 342 | typedef enum gro_result gro_result_t; |
d1c76af9 | 343 | |
8a4eb573 JP |
344 | /* |
345 | * enum rx_handler_result - Possible return values for rx_handlers. | |
346 | * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it | |
347 | * further. | |
348 | * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in | |
349 | * case skb->dev was changed by rx_handler. | |
350 | * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. | |
351 | * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called. | |
352 | * | |
353 | * rx_handlers are functions called from inside __netif_receive_skb(), to do | |
354 | * special processing of the skb, prior to delivery to protocol handlers. | |
355 | * | |
356 | * Currently, a net_device can only have a single rx_handler registered. Trying | |
357 | * to register a second rx_handler will return -EBUSY. | |
358 | * | |
359 | * To register a rx_handler on a net_device, use netdev_rx_handler_register(). | |
360 | * To unregister a rx_handler on a net_device, use | |
361 | * netdev_rx_handler_unregister(). | |
362 | * | |
363 | * Upon return, rx_handler is expected to tell __netif_receive_skb() what to | |
364 | * do with the skb. | |
365 | * | |
366 | * If the rx_handler consumed to skb in some way, it should return | |
367 | * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for | |
368 | * the skb to be delivered in some other ways. | |
369 | * | |
370 | * If the rx_handler changed skb->dev, to divert the skb to another | |
371 | * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the | |
372 | * new device will be called if it exists. | |
373 | * | |
374 | * If the rx_handler consider the skb should be ignored, it should return | |
375 | * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that | |
d93cf068 | 376 | * are registered on exact device (ptype->dev == skb->dev). |
8a4eb573 JP |
377 | * |
378 | * If the rx_handler didn't changed skb->dev, but want the skb to be normally | |
379 | * delivered, it should return RX_HANDLER_PASS. | |
380 | * | |
381 | * A device without a registered rx_handler will behave as if rx_handler | |
382 | * returned RX_HANDLER_PASS. | |
383 | */ | |
384 | ||
385 | enum rx_handler_result { | |
386 | RX_HANDLER_CONSUMED, | |
387 | RX_HANDLER_ANOTHER, | |
388 | RX_HANDLER_EXACT, | |
389 | RX_HANDLER_PASS, | |
390 | }; | |
391 | typedef enum rx_handler_result rx_handler_result_t; | |
392 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); | |
ab95bfe0 | 393 | |
f629d208 | 394 | void __napi_schedule(struct napi_struct *n); |
bc9ad166 | 395 | void __napi_schedule_irqoff(struct napi_struct *n); |
bea3348e | 396 | |
4d29515f | 397 | static inline bool napi_disable_pending(struct napi_struct *n) |
a0a46196 DM |
398 | { |
399 | return test_bit(NAPI_STATE_DISABLE, &n->state); | |
400 | } | |
401 | ||
bea3348e SH |
402 | /** |
403 | * napi_schedule_prep - check if napi can be scheduled | |
404 | * @n: napi context | |
405 | * | |
406 | * Test if NAPI routine is already running, and if not mark | |
407 | * it as running. This is used as a condition variable | |
a0a46196 DM |
408 | * insure only one NAPI poll instance runs. We also make |
409 | * sure there is no pending NAPI disable. | |
bea3348e | 410 | */ |
4d29515f | 411 | static inline bool napi_schedule_prep(struct napi_struct *n) |
bea3348e | 412 | { |
a0a46196 DM |
413 | return !napi_disable_pending(n) && |
414 | !test_and_set_bit(NAPI_STATE_SCHED, &n->state); | |
bea3348e SH |
415 | } |
416 | ||
417 | /** | |
418 | * napi_schedule - schedule NAPI poll | |
419 | * @n: napi context | |
420 | * | |
421 | * Schedule NAPI poll routine to be called if it is not already | |
422 | * running. | |
423 | */ | |
424 | static inline void napi_schedule(struct napi_struct *n) | |
425 | { | |
426 | if (napi_schedule_prep(n)) | |
427 | __napi_schedule(n); | |
428 | } | |
429 | ||
bc9ad166 ED |
430 | /** |
431 | * napi_schedule_irqoff - schedule NAPI poll | |
432 | * @n: napi context | |
433 | * | |
434 | * Variant of napi_schedule(), assuming hard irqs are masked. | |
435 | */ | |
436 | static inline void napi_schedule_irqoff(struct napi_struct *n) | |
437 | { | |
438 | if (napi_schedule_prep(n)) | |
439 | __napi_schedule_irqoff(n); | |
440 | } | |
441 | ||
bfe13f54 | 442 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ |
4d29515f | 443 | static inline bool napi_reschedule(struct napi_struct *napi) |
bfe13f54 RD |
444 | { |
445 | if (napi_schedule_prep(napi)) { | |
446 | __napi_schedule(napi); | |
4d29515f | 447 | return true; |
bfe13f54 | 448 | } |
4d29515f | 449 | return false; |
bfe13f54 RD |
450 | } |
451 | ||
3b47d303 ED |
452 | void __napi_complete(struct napi_struct *n); |
453 | void napi_complete_done(struct napi_struct *n, int work_done); | |
bea3348e SH |
454 | /** |
455 | * napi_complete - NAPI processing complete | |
456 | * @n: napi context | |
457 | * | |
458 | * Mark NAPI processing as complete. | |
3b47d303 | 459 | * Consider using napi_complete_done() instead. |
bea3348e | 460 | */ |
3b47d303 ED |
461 | static inline void napi_complete(struct napi_struct *n) |
462 | { | |
463 | return napi_complete_done(n, 0); | |
464 | } | |
bea3348e | 465 | |
af12fa6e ET |
466 | /** |
467 | * napi_hash_add - add a NAPI to global hashtable | |
468 | * @napi: napi context | |
469 | * | |
470 | * generate a new napi_id and store a @napi under it in napi_hash | |
93d05d4a ED |
471 | * Used for busy polling (CONFIG_NET_RX_BUSY_POLL) |
472 | * Note: This is normally automatically done from netif_napi_add(), | |
473 | * so might disappear in a future linux version. | |
af12fa6e | 474 | */ |
f629d208 | 475 | void napi_hash_add(struct napi_struct *napi); |
af12fa6e ET |
476 | |
477 | /** | |
478 | * napi_hash_del - remove a NAPI from global table | |
479 | * @napi: napi context | |
480 | * | |
481 | * Warning: caller must observe rcu grace period | |
34cbe27e ED |
482 | * before freeing memory containing @napi, if |
483 | * this function returns true. | |
93d05d4a ED |
484 | * Note: core networking stack automatically calls it |
485 | * from netif_napi_del() | |
486 | * Drivers might want to call this helper to combine all | |
487 | * the needed rcu grace periods into a single one. | |
af12fa6e | 488 | */ |
34cbe27e | 489 | bool napi_hash_del(struct napi_struct *napi); |
af12fa6e | 490 | |
bea3348e SH |
491 | /** |
492 | * napi_disable - prevent NAPI from scheduling | |
493 | * @n: napi context | |
494 | * | |
495 | * Stop NAPI from being scheduled on this context. | |
496 | * Waits till any outstanding processing completes. | |
497 | */ | |
3b47d303 | 498 | void napi_disable(struct napi_struct *n); |
bea3348e SH |
499 | |
500 | /** | |
501 | * napi_enable - enable NAPI scheduling | |
502 | * @n: napi context | |
503 | * | |
504 | * Resume NAPI from being scheduled on this context. | |
505 | * Must be paired with napi_disable. | |
506 | */ | |
507 | static inline void napi_enable(struct napi_struct *n) | |
508 | { | |
509 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | |
4e857c58 | 510 | smp_mb__before_atomic(); |
bea3348e | 511 | clear_bit(NAPI_STATE_SCHED, &n->state); |
2d8bff12 | 512 | clear_bit(NAPI_STATE_NPSVC, &n->state); |
bea3348e SH |
513 | } |
514 | ||
c264c3de SH |
515 | /** |
516 | * napi_synchronize - wait until NAPI is not running | |
517 | * @n: napi context | |
518 | * | |
519 | * Wait until NAPI is done being scheduled on this context. | |
520 | * Waits till any outstanding processing completes but | |
521 | * does not disable future activations. | |
522 | */ | |
523 | static inline void napi_synchronize(const struct napi_struct *n) | |
524 | { | |
facc432f AB |
525 | if (IS_ENABLED(CONFIG_SMP)) |
526 | while (test_bit(NAPI_STATE_SCHED, &n->state)) | |
527 | msleep(1); | |
528 | else | |
529 | barrier(); | |
c264c3de | 530 | } |
c264c3de | 531 | |
d94d9fee | 532 | enum netdev_queue_state_t { |
73466498 TH |
533 | __QUEUE_STATE_DRV_XOFF, |
534 | __QUEUE_STATE_STACK_XOFF, | |
c3f26a26 | 535 | __QUEUE_STATE_FROZEN, |
79d16385 | 536 | }; |
8e2f1a63 DB |
537 | |
538 | #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) | |
539 | #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) | |
540 | #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) | |
541 | ||
542 | #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) | |
543 | #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ | |
544 | QUEUE_STATE_FROZEN) | |
545 | #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ | |
546 | QUEUE_STATE_FROZEN) | |
547 | ||
73466498 TH |
548 | /* |
549 | * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The | |
550 | * netif_tx_* functions below are used to manipulate this flag. The | |
551 | * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit | |
552 | * queue independently. The netif_xmit_*stopped functions below are called | |
553 | * to check if the queue has been stopped by the driver or stack (either | |
554 | * of the XOFF bits are set in the state). Drivers should not need to call | |
555 | * netif_xmit*stopped functions, they should only be using netif_tx_*. | |
556 | */ | |
79d16385 | 557 | |
bb949fbd | 558 | struct netdev_queue { |
6a321cb3 ED |
559 | /* |
560 | * read mostly part | |
561 | */ | |
bb949fbd | 562 | struct net_device *dev; |
46e5da40 | 563 | struct Qdisc __rcu *qdisc; |
b0e1e646 | 564 | struct Qdisc *qdisc_sleeping; |
ccf5ff69 | 565 | #ifdef CONFIG_SYSFS |
1d24eb48 TH |
566 | struct kobject kobj; |
567 | #endif | |
f2cd2d3e ED |
568 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) |
569 | int numa_node; | |
570 | #endif | |
6a321cb3 ED |
571 | /* |
572 | * write mostly part | |
573 | */ | |
574 | spinlock_t _xmit_lock ____cacheline_aligned_in_smp; | |
575 | int xmit_lock_owner; | |
9d21493b ED |
576 | /* |
577 | * please use this field instead of dev->trans_start | |
578 | */ | |
579 | unsigned long trans_start; | |
ccf5ff69 | 580 | |
581 | /* | |
582 | * Number of TX timeouts for this queue | |
583 | * (/sys/class/net/DEV/Q/trans_timeout) | |
584 | */ | |
585 | unsigned long trans_timeout; | |
114cf580 TH |
586 | |
587 | unsigned long state; | |
588 | ||
589 | #ifdef CONFIG_BQL | |
590 | struct dql dql; | |
591 | #endif | |
822b3b2e | 592 | unsigned long tx_maxrate; |
e8a0464c | 593 | } ____cacheline_aligned_in_smp; |
bb949fbd | 594 | |
f2cd2d3e ED |
595 | static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) |
596 | { | |
597 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | |
598 | return q->numa_node; | |
599 | #else | |
b236da69 | 600 | return NUMA_NO_NODE; |
f2cd2d3e ED |
601 | #endif |
602 | } | |
603 | ||
604 | static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) | |
605 | { | |
606 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | |
607 | q->numa_node = node; | |
608 | #endif | |
609 | } | |
610 | ||
df334545 | 611 | #ifdef CONFIG_RPS |
0a9627f2 TH |
612 | /* |
613 | * This structure holds an RPS map which can be of variable length. The | |
614 | * map is an array of CPUs. | |
615 | */ | |
616 | struct rps_map { | |
617 | unsigned int len; | |
618 | struct rcu_head rcu; | |
619 | u16 cpus[0]; | |
620 | }; | |
60b778ce | 621 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) |
0a9627f2 | 622 | |
fec5e652 | 623 | /* |
c445477d BH |
624 | * The rps_dev_flow structure contains the mapping of a flow to a CPU, the |
625 | * tail pointer for that CPU's input queue at the time of last enqueue, and | |
626 | * a hardware filter index. | |
fec5e652 TH |
627 | */ |
628 | struct rps_dev_flow { | |
629 | u16 cpu; | |
c445477d | 630 | u16 filter; |
fec5e652 TH |
631 | unsigned int last_qtail; |
632 | }; | |
c445477d | 633 | #define RPS_NO_FILTER 0xffff |
fec5e652 TH |
634 | |
635 | /* | |
636 | * The rps_dev_flow_table structure contains a table of flow mappings. | |
637 | */ | |
638 | struct rps_dev_flow_table { | |
639 | unsigned int mask; | |
640 | struct rcu_head rcu; | |
fec5e652 TH |
641 | struct rps_dev_flow flows[0]; |
642 | }; | |
643 | #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ | |
60b778ce | 644 | ((_num) * sizeof(struct rps_dev_flow))) |
fec5e652 TH |
645 | |
646 | /* | |
647 | * The rps_sock_flow_table contains mappings of flows to the last CPU | |
648 | * on which they were processed by the application (set in recvmsg). | |
567e4b79 ED |
649 | * Each entry is a 32bit value. Upper part is the high order bits |
650 | * of flow hash, lower part is cpu number. | |
651 | * rps_cpu_mask is used to partition the space, depending on number of | |
652 | * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 | |
653 | * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f, | |
654 | * meaning we use 32-6=26 bits for the hash. | |
fec5e652 TH |
655 | */ |
656 | struct rps_sock_flow_table { | |
567e4b79 | 657 | u32 mask; |
93c1af6c ED |
658 | |
659 | u32 ents[0] ____cacheline_aligned_in_smp; | |
fec5e652 | 660 | }; |
567e4b79 | 661 | #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) |
fec5e652 TH |
662 | |
663 | #define RPS_NO_CPU 0xffff | |
664 | ||
567e4b79 ED |
665 | extern u32 rps_cpu_mask; |
666 | extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; | |
667 | ||
fec5e652 TH |
668 | static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, |
669 | u32 hash) | |
670 | { | |
671 | if (table && hash) { | |
567e4b79 ED |
672 | unsigned int index = hash & table->mask; |
673 | u32 val = hash & ~rps_cpu_mask; | |
fec5e652 TH |
674 | |
675 | /* We only give a hint, preemption can change cpu under us */ | |
567e4b79 | 676 | val |= raw_smp_processor_id(); |
fec5e652 | 677 | |
567e4b79 ED |
678 | if (table->ents[index] != val) |
679 | table->ents[index] = val; | |
fec5e652 TH |
680 | } |
681 | } | |
682 | ||
c445477d | 683 | #ifdef CONFIG_RFS_ACCEL |
f629d208 JP |
684 | bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, |
685 | u16 filter_id); | |
c445477d | 686 | #endif |
a953be53 | 687 | #endif /* CONFIG_RPS */ |
c445477d | 688 | |
0a9627f2 TH |
689 | /* This structure contains an instance of an RX queue. */ |
690 | struct netdev_rx_queue { | |
a953be53 | 691 | #ifdef CONFIG_RPS |
6e3f7faf ED |
692 | struct rps_map __rcu *rps_map; |
693 | struct rps_dev_flow_table __rcu *rps_flow_table; | |
a953be53 | 694 | #endif |
6e3f7faf | 695 | struct kobject kobj; |
fe822240 | 696 | struct net_device *dev; |
0a9627f2 | 697 | } ____cacheline_aligned_in_smp; |
a953be53 MD |
698 | |
699 | /* | |
700 | * RX queue sysfs structures and functions. | |
701 | */ | |
702 | struct rx_queue_attribute { | |
703 | struct attribute attr; | |
704 | ssize_t (*show)(struct netdev_rx_queue *queue, | |
705 | struct rx_queue_attribute *attr, char *buf); | |
706 | ssize_t (*store)(struct netdev_rx_queue *queue, | |
707 | struct rx_queue_attribute *attr, const char *buf, size_t len); | |
708 | }; | |
d314774c | 709 | |
bf264145 TH |
710 | #ifdef CONFIG_XPS |
711 | /* | |
712 | * This structure holds an XPS map which can be of variable length. The | |
713 | * map is an array of queues. | |
714 | */ | |
715 | struct xps_map { | |
716 | unsigned int len; | |
717 | unsigned int alloc_len; | |
718 | struct rcu_head rcu; | |
719 | u16 queues[0]; | |
720 | }; | |
60b778ce | 721 | #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) |
c59f419b HD |
722 | #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ |
723 | - sizeof(struct xps_map)) / sizeof(u16)) | |
bf264145 TH |
724 | |
725 | /* | |
726 | * This structure holds all XPS maps for device. Maps are indexed by CPU. | |
727 | */ | |
728 | struct xps_dev_maps { | |
729 | struct rcu_head rcu; | |
a4177869 | 730 | struct xps_map __rcu *cpu_map[0]; |
bf264145 TH |
731 | }; |
732 | #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \ | |
733 | (nr_cpu_ids * sizeof(struct xps_map *))) | |
734 | #endif /* CONFIG_XPS */ | |
735 | ||
4f57c087 JF |
736 | #define TC_MAX_QUEUE 16 |
737 | #define TC_BITMASK 15 | |
738 | /* HW offloaded queuing disciplines txq count and offset maps */ | |
739 | struct netdev_tc_txq { | |
740 | u16 count; | |
741 | u16 offset; | |
742 | }; | |
743 | ||
68bad94e NP |
744 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
745 | /* | |
746 | * This structure is to hold information about the device | |
747 | * configured to run FCoE protocol stack. | |
748 | */ | |
749 | struct netdev_fcoe_hbainfo { | |
750 | char manufacturer[64]; | |
751 | char serial_number[64]; | |
752 | char hardware_version[64]; | |
753 | char driver_version[64]; | |
754 | char optionrom_version[64]; | |
755 | char firmware_version[64]; | |
756 | char model[256]; | |
757 | char model_description[256]; | |
758 | }; | |
759 | #endif | |
760 | ||
02637fce | 761 | #define MAX_PHYS_ITEM_ID_LEN 32 |
66b52b0d | 762 | |
02637fce JP |
763 | /* This structure holds a unique identifier to identify some |
764 | * physical item (port for example) used by a netdevice. | |
66b52b0d | 765 | */ |
02637fce JP |
766 | struct netdev_phys_item_id { |
767 | unsigned char id[MAX_PHYS_ITEM_ID_LEN]; | |
66b52b0d JP |
768 | unsigned char id_len; |
769 | }; | |
770 | ||
d754f98b SF |
771 | static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, |
772 | struct netdev_phys_item_id *b) | |
773 | { | |
774 | return a->id_len == b->id_len && | |
775 | memcmp(a->id, b->id, a->id_len) == 0; | |
776 | } | |
777 | ||
99932d4f DB |
778 | typedef u16 (*select_queue_fallback_t)(struct net_device *dev, |
779 | struct sk_buff *skb); | |
780 | ||
d314774c SH |
781 | /* |
782 | * This structure defines the management hooks for network devices. | |
00829823 SH |
783 | * The following hooks can be defined; unless noted otherwise, they are |
784 | * optional and can be filled with a null pointer. | |
d314774c SH |
785 | * |
786 | * int (*ndo_init)(struct net_device *dev); | |
787 | * This function is called once when network device is registered. | |
788 | * The network device can use this to any late stage initializaton | |
789 | * or semantic validattion. It can fail with an error code which will | |
790 | * be propogated back to register_netdev | |
791 | * | |
792 | * void (*ndo_uninit)(struct net_device *dev); | |
793 | * This function is called when device is unregistered or when registration | |
794 | * fails. It is not called if init fails. | |
795 | * | |
796 | * int (*ndo_open)(struct net_device *dev); | |
797 | * This function is called when network device transistions to the up | |
798 | * state. | |
799 | * | |
800 | * int (*ndo_stop)(struct net_device *dev); | |
801 | * This function is called when network device transistions to the down | |
802 | * state. | |
803 | * | |
dc1f8bf6 SH |
804 | * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, |
805 | * struct net_device *dev); | |
00829823 | 806 | * Called when a packet needs to be transmitted. |
e79d8429 RR |
807 | * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop |
808 | * the queue before that can happen; it's for obsolete devices and weird | |
809 | * corner cases, but the stack really does a non-trivial amount | |
810 | * of useless work if you return NETDEV_TX_BUSY. | |
dc1f8bf6 | 811 | * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) |
00829823 SH |
812 | * Required can not be NULL. |
813 | * | |
cdba756f ED |
814 | * netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
815 | * netdev_features_t features); | |
816 | * Adjusts the requested feature flags according to device-specific | |
817 | * constraints, and returns the resulting flags. Must not modify | |
818 | * the device state. | |
819 | * | |
f663dd9a | 820 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, |
99932d4f | 821 | * void *accel_priv, select_queue_fallback_t fallback); |
00829823 SH |
822 | * Called to decide which queue to when device supports multiple |
823 | * transmit queues. | |
824 | * | |
d314774c SH |
825 | * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); |
826 | * This function is called to allow device receiver to make | |
827 | * changes to configuration when multicast or promiscious is enabled. | |
828 | * | |
829 | * void (*ndo_set_rx_mode)(struct net_device *dev); | |
830 | * This function is called device changes address list filtering. | |
01789349 JP |
831 | * If driver handles unicast address filtering, it should set |
832 | * IFF_UNICAST_FLT to its priv_flags. | |
d314774c SH |
833 | * |
834 | * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); | |
835 | * This function is called when the Media Access Control address | |
37b607c5 | 836 | * needs to be changed. If this interface is not defined, the |
d314774c SH |
837 | * mac address can not be changed. |
838 | * | |
839 | * int (*ndo_validate_addr)(struct net_device *dev); | |
840 | * Test if Media Access Control address is valid for the device. | |
841 | * | |
842 | * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); | |
843 | * Called when a user request an ioctl which can't be handled by | |
844 | * the generic interface code. If not defined ioctl's return | |
845 | * not supported error code. | |
846 | * | |
847 | * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); | |
848 | * Used to set network devices bus interface parameters. This interface | |
849 | * is retained for legacy reason, new devices should use the bus | |
850 | * interface (PCI) for low level management. | |
851 | * | |
852 | * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); | |
853 | * Called when a user wants to change the Maximum Transfer Unit | |
854 | * of a device. If not defined, any request to change MTU will | |
855 | * will return an error. | |
856 | * | |
00829823 | 857 | * void (*ndo_tx_timeout)(struct net_device *dev); |
d314774c SH |
858 | * Callback uses when the transmitter has not made any progress |
859 | * for dev->watchdog ticks. | |
860 | * | |
3cfde79c | 861 | * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, |
28172739 | 862 | * struct rtnl_link_stats64 *storage); |
d308e38f | 863 | * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); |
d314774c | 864 | * Called when a user wants to get the network device usage |
be1f3c2c | 865 | * statistics. Drivers must do one of the following: |
3cfde79c BH |
866 | * 1. Define @ndo_get_stats64 to fill in a zero-initialised |
867 | * rtnl_link_stats64 structure passed by the caller. | |
82695d9b | 868 | * 2. Define @ndo_get_stats to update a net_device_stats structure |
be1f3c2c BH |
869 | * (which should normally be dev->stats) and return a pointer to |
870 | * it. The structure may be changed asynchronously only if each | |
871 | * field is written atomically. | |
872 | * 3. Update dev->stats asynchronously and atomically, and define | |
873 | * neither operation. | |
d314774c | 874 | * |
5d632cb7 | 875 | * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); |
80d5c368 PM |
876 | * If device support VLAN filtering this function is called when a |
877 | * VLAN id is registered. | |
d314774c | 878 | * |
5d632cb7 | 879 | * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); |
80d5c368 PM |
880 | * If device support VLAN filtering this function is called when a |
881 | * VLAN id is unregistered. | |
d314774c SH |
882 | * |
883 | * void (*ndo_poll_controller)(struct net_device *dev); | |
95c26df8 WM |
884 | * |
885 | * SR-IOV management functions. | |
886 | * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); | |
887 | * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); | |
ed616689 SC |
888 | * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, |
889 | * int max_tx_rate); | |
5f8444a3 | 890 | * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); |
dd461d6a | 891 | * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); |
95c26df8 WM |
892 | * int (*ndo_get_vf_config)(struct net_device *dev, |
893 | * int vf, struct ifla_vf_info *ivf); | |
1d8faf48 | 894 | * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); |
57b61080 SF |
895 | * int (*ndo_set_vf_port)(struct net_device *dev, int vf, |
896 | * struct nlattr *port[]); | |
01a3d796 VZ |
897 | * |
898 | * Enable or disable the VF ability to query its RSS Redirection Table and | |
899 | * Hash Key. This is needed since on some devices VF share this information | |
900 | * with PF and querying it may adduce a theoretical security risk. | |
901 | * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); | |
57b61080 | 902 | * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); |
4f57c087 JF |
903 | * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) |
904 | * Called to setup 'tc' number of traffic classes in the net device. This | |
905 | * is always called from the stack with the rtnl lock held and netif tx | |
906 | * queues stopped. This allows the netdevice to perform queue management | |
907 | * safely. | |
c445477d | 908 | * |
e9bce845 YZ |
909 | * Fiber Channel over Ethernet (FCoE) offload functions. |
910 | * int (*ndo_fcoe_enable)(struct net_device *dev); | |
911 | * Called when the FCoE protocol stack wants to start using LLD for FCoE | |
912 | * so the underlying device can perform whatever needed configuration or | |
913 | * initialization to support acceleration of FCoE traffic. | |
914 | * | |
915 | * int (*ndo_fcoe_disable)(struct net_device *dev); | |
916 | * Called when the FCoE protocol stack wants to stop using LLD for FCoE | |
917 | * so the underlying device can perform whatever needed clean-ups to | |
918 | * stop supporting acceleration of FCoE traffic. | |
919 | * | |
920 | * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, | |
921 | * struct scatterlist *sgl, unsigned int sgc); | |
922 | * Called when the FCoE Initiator wants to initialize an I/O that | |
923 | * is a possible candidate for Direct Data Placement (DDP). The LLD can | |
924 | * perform necessary setup and returns 1 to indicate the device is set up | |
925 | * successfully to perform DDP on this I/O, otherwise this returns 0. | |
926 | * | |
927 | * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); | |
928 | * Called when the FCoE Initiator/Target is done with the DDPed I/O as | |
929 | * indicated by the FC exchange id 'xid', so the underlying device can | |
930 | * clean up and reuse resources for later DDP requests. | |
931 | * | |
932 | * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, | |
933 | * struct scatterlist *sgl, unsigned int sgc); | |
934 | * Called when the FCoE Target wants to initialize an I/O that | |
935 | * is a possible candidate for Direct Data Placement (DDP). The LLD can | |
936 | * perform necessary setup and returns 1 to indicate the device is set up | |
937 | * successfully to perform DDP on this I/O, otherwise this returns 0. | |
938 | * | |
68bad94e NP |
939 | * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, |
940 | * struct netdev_fcoe_hbainfo *hbainfo); | |
941 | * Called when the FCoE Protocol stack wants information on the underlying | |
942 | * device. This information is utilized by the FCoE protocol stack to | |
943 | * register attributes with Fiber Channel management service as per the | |
944 | * FC-GS Fabric Device Management Information(FDMI) specification. | |
945 | * | |
e9bce845 YZ |
946 | * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); |
947 | * Called when the underlying device wants to override default World Wide | |
948 | * Name (WWN) generation mechanism in FCoE protocol stack to pass its own | |
949 | * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE | |
950 | * protocol stack to use. | |
951 | * | |
c445477d BH |
952 | * RFS acceleration. |
953 | * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, | |
954 | * u16 rxq_index, u32 flow_id); | |
955 | * Set hardware filter for RFS. rxq_index is the target queue index; | |
956 | * flow_id is a flow ID to be passed to rps_may_expire_flow() later. | |
957 | * Return the filter ID on success, or a negative error code. | |
fbaec0ea | 958 | * |
8b98a70c | 959 | * Slave management functions (for bridge, bonding, etc). |
fbaec0ea JP |
960 | * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); |
961 | * Called to make another netdev an underling. | |
962 | * | |
963 | * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); | |
964 | * Called to release previously enslaved netdev. | |
5455c699 MM |
965 | * |
966 | * Feature/offload setting functions. | |
c8f44aff | 967 | * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); |
5455c699 MM |
968 | * Called to update device configuration to new features. Passed |
969 | * feature set might be less than what was returned by ndo_fix_features()). | |
970 | * Must return >0 or -errno if it changed dev->features itself. | |
971 | * | |
edc7d573 | 972 | * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], |
973 | * struct net_device *dev, | |
f6f6424b | 974 | * const unsigned char *addr, u16 vid, u16 flags) |
77162022 | 975 | * Adds an FDB entry to dev for addr. |
1690be63 VY |
976 | * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], |
977 | * struct net_device *dev, | |
f6f6424b | 978 | * const unsigned char *addr, u16 vid) |
77162022 JF |
979 | * Deletes the FDB entry from dev coresponding to addr. |
980 | * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, | |
5d5eacb3 JHS |
981 | * struct net_device *dev, struct net_device *filter_dev, |
982 | * int idx) | |
77162022 JF |
983 | * Used to add FDB entries to dump requests. Implementers should add |
984 | * entries to skb and update idx with the number of entries. | |
e5a55a89 | 985 | * |
ad41faa8 ND |
986 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, |
987 | * u16 flags) | |
e5a55a89 | 988 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, |
46c264da ND |
989 | * struct net_device *dev, u32 filter_mask, |
990 | * int nlflags) | |
ad41faa8 ND |
991 | * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, |
992 | * u16 flags); | |
4bf84c35 JP |
993 | * |
994 | * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); | |
995 | * Called to change device carrier. Soft-devices (like dummy, team, etc) | |
996 | * which do not represent real hardware may define this to allow their | |
997 | * userspace components to manage their virtual carrier state. Devices | |
998 | * that determine carrier state from physical hardware properties (eg | |
999 | * network cables) or protocol-dependent mechanisms (eg | |
1000 | * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. | |
66b52b0d JP |
1001 | * |
1002 | * int (*ndo_get_phys_port_id)(struct net_device *dev, | |
02637fce | 1003 | * struct netdev_phys_item_id *ppid); |
66b52b0d JP |
1004 | * Called to get ID of physical port of this device. If driver does |
1005 | * not implement this, it is assumed that the hw is not able to have | |
1006 | * multiple net devices on single physical port. | |
53cf5275 JG |
1007 | * |
1008 | * void (*ndo_add_vxlan_port)(struct net_device *dev, | |
35e42379 | 1009 | * sa_family_t sa_family, __be16 port); |
53cf5275 JG |
1010 | * Called by vxlan to notiy a driver about the UDP port and socket |
1011 | * address family that vxlan is listnening to. It is called only when | |
1012 | * a new port starts listening. The operation is protected by the | |
1013 | * vxlan_net->sock_lock. | |
1014 | * | |
a8170d2b SA |
1015 | * void (*ndo_add_geneve_port)(struct net_device *dev, |
1016 | * sa_family_t sa_family, __be16 port); | |
1017 | * Called by geneve to notify a driver about the UDP port and socket | |
1018 | * address family that geneve is listnening to. It is called only when | |
1019 | * a new port starts listening. The operation is protected by the | |
1020 | * geneve_net->sock_lock. | |
1021 | * | |
1022 | * void (*ndo_del_geneve_port)(struct net_device *dev, | |
1023 | * sa_family_t sa_family, __be16 port); | |
1024 | * Called by geneve to notify the driver about a UDP port and socket | |
1025 | * address family that geneve is not listening to anymore. The operation | |
1026 | * is protected by the geneve_net->sock_lock. | |
1027 | * | |
53cf5275 | 1028 | * void (*ndo_del_vxlan_port)(struct net_device *dev, |
35e42379 | 1029 | * sa_family_t sa_family, __be16 port); |
53cf5275 JG |
1030 | * Called by vxlan to notify the driver about a UDP port and socket |
1031 | * address family that vxlan is not listening to anymore. The operation | |
1032 | * is protected by the vxlan_net->sock_lock. | |
a6cc0cfa JF |
1033 | * |
1034 | * void* (*ndo_dfwd_add_station)(struct net_device *pdev, | |
1035 | * struct net_device *dev) | |
1036 | * Called by upper layer devices to accelerate switching or other | |
1037 | * station functionality into hardware. 'pdev is the lowerdev | |
1038 | * to use for the offload and 'dev' is the net device that will | |
1039 | * back the offload. Returns a pointer to the private structure | |
1040 | * the upper layer will maintain. | |
1041 | * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) | |
1042 | * Called by upper layer device to delete the station created | |
1043 | * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing | |
1044 | * the station and priv is the structure returned by the add | |
1045 | * operation. | |
1046 | * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb, | |
1047 | * struct net_device *dev, | |
1048 | * void *priv); | |
1049 | * Callback to use for xmit over the accelerated station. This | |
1050 | * is used in place of ndo_start_xmit on accelerated net | |
1051 | * devices. | |
5f35227e JG |
1052 | * netdev_features_t (*ndo_features_check) (struct sk_buff *skb, |
1053 | * struct net_device *dev | |
1054 | * netdev_features_t features); | |
04ffcb25 | 1055 | * Called by core transmit path to determine if device is capable of |
5f35227e JG |
1056 | * performing offload operations on a given packet. This is to give |
1057 | * the device an opportunity to implement any restrictions that cannot | |
1058 | * be otherwise expressed by feature flags. The check is called with | |
1059 | * the set of features that the stack has calculated and it returns | |
1060 | * those the driver believes to be appropriate. | |
822b3b2e JF |
1061 | * int (*ndo_set_tx_maxrate)(struct net_device *dev, |
1062 | * int queue_index, u32 maxrate); | |
1063 | * Called when a user wants to set a max-rate limitation of specific | |
1064 | * TX queue. | |
a54acb3a ND |
1065 | * int (*ndo_get_iflink)(const struct net_device *dev); |
1066 | * Called to get the iflink value of this device. | |
d746d707 AK |
1067 | * void (*ndo_change_proto_down)(struct net_device *dev, |
1068 | * bool proto_down); | |
1069 | * This function is used to pass protocol port error state information | |
1070 | * to the switch driver. The switch driver can react to the proto_down | |
1071 | * by doing a phys down on the associated switch port. | |
fc4099f1 PS |
1072 | * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); |
1073 | * This function is used to get egress tunnel information for given skb. | |
1074 | * This is useful for retrieving outer tunnel header parameters while | |
1075 | * sampling packet. | |
d746d707 | 1076 | * |
d314774c SH |
1077 | */ |
1078 | struct net_device_ops { | |
1079 | int (*ndo_init)(struct net_device *dev); | |
1080 | void (*ndo_uninit)(struct net_device *dev); | |
1081 | int (*ndo_open)(struct net_device *dev); | |
1082 | int (*ndo_stop)(struct net_device *dev); | |
cdba756f ED |
1083 | netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, |
1084 | struct net_device *dev); | |
1085 | netdev_features_t (*ndo_features_check)(struct sk_buff *skb, | |
1086 | struct net_device *dev, | |
1087 | netdev_features_t features); | |
00829823 | 1088 | u16 (*ndo_select_queue)(struct net_device *dev, |
f663dd9a | 1089 | struct sk_buff *skb, |
99932d4f DB |
1090 | void *accel_priv, |
1091 | select_queue_fallback_t fallback); | |
d314774c SH |
1092 | void (*ndo_change_rx_flags)(struct net_device *dev, |
1093 | int flags); | |
d314774c | 1094 | void (*ndo_set_rx_mode)(struct net_device *dev); |
d314774c SH |
1095 | int (*ndo_set_mac_address)(struct net_device *dev, |
1096 | void *addr); | |
d314774c | 1097 | int (*ndo_validate_addr)(struct net_device *dev); |
d314774c SH |
1098 | int (*ndo_do_ioctl)(struct net_device *dev, |
1099 | struct ifreq *ifr, int cmd); | |
d314774c SH |
1100 | int (*ndo_set_config)(struct net_device *dev, |
1101 | struct ifmap *map); | |
00829823 SH |
1102 | int (*ndo_change_mtu)(struct net_device *dev, |
1103 | int new_mtu); | |
1104 | int (*ndo_neigh_setup)(struct net_device *dev, | |
1105 | struct neigh_parms *); | |
d314774c SH |
1106 | void (*ndo_tx_timeout) (struct net_device *dev); |
1107 | ||
28172739 ED |
1108 | struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, |
1109 | struct rtnl_link_stats64 *storage); | |
d314774c SH |
1110 | struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); |
1111 | ||
8e586137 | 1112 | int (*ndo_vlan_rx_add_vid)(struct net_device *dev, |
80d5c368 | 1113 | __be16 proto, u16 vid); |
8e586137 | 1114 | int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, |
80d5c368 | 1115 | __be16 proto, u16 vid); |
d314774c | 1116 | #ifdef CONFIG_NET_POLL_CONTROLLER |
d314774c | 1117 | void (*ndo_poll_controller)(struct net_device *dev); |
4247e161 | 1118 | int (*ndo_netpoll_setup)(struct net_device *dev, |
a8779ec1 | 1119 | struct netpoll_info *info); |
0e34e931 | 1120 | void (*ndo_netpoll_cleanup)(struct net_device *dev); |
06021292 | 1121 | #endif |
e0d1095a | 1122 | #ifdef CONFIG_NET_RX_BUSY_POLL |
8b80cda5 | 1123 | int (*ndo_busy_poll)(struct napi_struct *dev); |
d314774c | 1124 | #endif |
95c26df8 WM |
1125 | int (*ndo_set_vf_mac)(struct net_device *dev, |
1126 | int queue, u8 *mac); | |
1127 | int (*ndo_set_vf_vlan)(struct net_device *dev, | |
1128 | int queue, u16 vlan, u8 qos); | |
ed616689 SC |
1129 | int (*ndo_set_vf_rate)(struct net_device *dev, |
1130 | int vf, int min_tx_rate, | |
1131 | int max_tx_rate); | |
5f8444a3 GR |
1132 | int (*ndo_set_vf_spoofchk)(struct net_device *dev, |
1133 | int vf, bool setting); | |
dd461d6a HS |
1134 | int (*ndo_set_vf_trust)(struct net_device *dev, |
1135 | int vf, bool setting); | |
95c26df8 WM |
1136 | int (*ndo_get_vf_config)(struct net_device *dev, |
1137 | int vf, | |
1138 | struct ifla_vf_info *ivf); | |
1d8faf48 RE |
1139 | int (*ndo_set_vf_link_state)(struct net_device *dev, |
1140 | int vf, int link_state); | |
3b766cd8 EBE |
1141 | int (*ndo_get_vf_stats)(struct net_device *dev, |
1142 | int vf, | |
1143 | struct ifla_vf_stats | |
1144 | *vf_stats); | |
57b61080 SF |
1145 | int (*ndo_set_vf_port)(struct net_device *dev, |
1146 | int vf, | |
1147 | struct nlattr *port[]); | |
1148 | int (*ndo_get_vf_port)(struct net_device *dev, | |
1149 | int vf, struct sk_buff *skb); | |
01a3d796 VZ |
1150 | int (*ndo_set_vf_rss_query_en)( |
1151 | struct net_device *dev, | |
1152 | int vf, bool setting); | |
4f57c087 | 1153 | int (*ndo_setup_tc)(struct net_device *dev, u8 tc); |
d11ead75 | 1154 | #if IS_ENABLED(CONFIG_FCOE) |
cb454399 YZ |
1155 | int (*ndo_fcoe_enable)(struct net_device *dev); |
1156 | int (*ndo_fcoe_disable)(struct net_device *dev); | |
4d288d57 YZ |
1157 | int (*ndo_fcoe_ddp_setup)(struct net_device *dev, |
1158 | u16 xid, | |
1159 | struct scatterlist *sgl, | |
1160 | unsigned int sgc); | |
1161 | int (*ndo_fcoe_ddp_done)(struct net_device *dev, | |
1162 | u16 xid); | |
6247e086 YZ |
1163 | int (*ndo_fcoe_ddp_target)(struct net_device *dev, |
1164 | u16 xid, | |
1165 | struct scatterlist *sgl, | |
1166 | unsigned int sgc); | |
68bad94e NP |
1167 | int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, |
1168 | struct netdev_fcoe_hbainfo *hbainfo); | |
3c9c36bc BPG |
1169 | #endif |
1170 | ||
d11ead75 | 1171 | #if IS_ENABLED(CONFIG_LIBFCOE) |
df5c7945 YZ |
1172 | #define NETDEV_FCOE_WWNN 0 |
1173 | #define NETDEV_FCOE_WWPN 1 | |
1174 | int (*ndo_fcoe_get_wwn)(struct net_device *dev, | |
1175 | u64 *wwn, int type); | |
4d288d57 | 1176 | #endif |
3c9c36bc | 1177 | |
c445477d BH |
1178 | #ifdef CONFIG_RFS_ACCEL |
1179 | int (*ndo_rx_flow_steer)(struct net_device *dev, | |
1180 | const struct sk_buff *skb, | |
1181 | u16 rxq_index, | |
1182 | u32 flow_id); | |
1183 | #endif | |
fbaec0ea JP |
1184 | int (*ndo_add_slave)(struct net_device *dev, |
1185 | struct net_device *slave_dev); | |
1186 | int (*ndo_del_slave)(struct net_device *dev, | |
1187 | struct net_device *slave_dev); | |
c8f44aff MM |
1188 | netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
1189 | netdev_features_t features); | |
5455c699 | 1190 | int (*ndo_set_features)(struct net_device *dev, |
c8f44aff | 1191 | netdev_features_t features); |
da6a8fa0 | 1192 | int (*ndo_neigh_construct)(struct neighbour *n); |
447f2191 | 1193 | void (*ndo_neigh_destroy)(struct neighbour *n); |
77162022 JF |
1194 | |
1195 | int (*ndo_fdb_add)(struct ndmsg *ndm, | |
edc7d573 | 1196 | struct nlattr *tb[], |
77162022 | 1197 | struct net_device *dev, |
6b6e2725 | 1198 | const unsigned char *addr, |
f6f6424b | 1199 | u16 vid, |
77162022 JF |
1200 | u16 flags); |
1201 | int (*ndo_fdb_del)(struct ndmsg *ndm, | |
1690be63 | 1202 | struct nlattr *tb[], |
77162022 | 1203 | struct net_device *dev, |
f6f6424b JP |
1204 | const unsigned char *addr, |
1205 | u16 vid); | |
77162022 JF |
1206 | int (*ndo_fdb_dump)(struct sk_buff *skb, |
1207 | struct netlink_callback *cb, | |
1208 | struct net_device *dev, | |
5d5eacb3 | 1209 | struct net_device *filter_dev, |
77162022 | 1210 | int idx); |
e5a55a89 JF |
1211 | |
1212 | int (*ndo_bridge_setlink)(struct net_device *dev, | |
add511b3 RP |
1213 | struct nlmsghdr *nlh, |
1214 | u16 flags); | |
e5a55a89 JF |
1215 | int (*ndo_bridge_getlink)(struct sk_buff *skb, |
1216 | u32 pid, u32 seq, | |
6cbdceeb | 1217 | struct net_device *dev, |
46c264da ND |
1218 | u32 filter_mask, |
1219 | int nlflags); | |
407af329 | 1220 | int (*ndo_bridge_dellink)(struct net_device *dev, |
add511b3 RP |
1221 | struct nlmsghdr *nlh, |
1222 | u16 flags); | |
4bf84c35 JP |
1223 | int (*ndo_change_carrier)(struct net_device *dev, |
1224 | bool new_carrier); | |
66b52b0d | 1225 | int (*ndo_get_phys_port_id)(struct net_device *dev, |
02637fce | 1226 | struct netdev_phys_item_id *ppid); |
db24a904 DA |
1227 | int (*ndo_get_phys_port_name)(struct net_device *dev, |
1228 | char *name, size_t len); | |
53cf5275 JG |
1229 | void (*ndo_add_vxlan_port)(struct net_device *dev, |
1230 | sa_family_t sa_family, | |
35e42379 | 1231 | __be16 port); |
53cf5275 JG |
1232 | void (*ndo_del_vxlan_port)(struct net_device *dev, |
1233 | sa_family_t sa_family, | |
35e42379 | 1234 | __be16 port); |
a8170d2b SA |
1235 | void (*ndo_add_geneve_port)(struct net_device *dev, |
1236 | sa_family_t sa_family, | |
1237 | __be16 port); | |
1238 | void (*ndo_del_geneve_port)(struct net_device *dev, | |
1239 | sa_family_t sa_family, | |
1240 | __be16 port); | |
a6cc0cfa JF |
1241 | void* (*ndo_dfwd_add_station)(struct net_device *pdev, |
1242 | struct net_device *dev); | |
1243 | void (*ndo_dfwd_del_station)(struct net_device *pdev, | |
1244 | void *priv); | |
1245 | ||
1246 | netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb, | |
1247 | struct net_device *dev, | |
1248 | void *priv); | |
25175ba5 | 1249 | int (*ndo_get_lock_subclass)(struct net_device *dev); |
822b3b2e JF |
1250 | int (*ndo_set_tx_maxrate)(struct net_device *dev, |
1251 | int queue_index, | |
1252 | u32 maxrate); | |
a54acb3a | 1253 | int (*ndo_get_iflink)(const struct net_device *dev); |
d746d707 AK |
1254 | int (*ndo_change_proto_down)(struct net_device *dev, |
1255 | bool proto_down); | |
fc4099f1 PS |
1256 | int (*ndo_fill_metadata_dst)(struct net_device *dev, |
1257 | struct sk_buff *skb); | |
d314774c SH |
1258 | }; |
1259 | ||
7aa98047 LR |
1260 | /** |
1261 | * enum net_device_priv_flags - &struct net_device priv_flags | |
1262 | * | |
1263 | * These are the &struct net_device, they are only set internally | |
1264 | * by drivers and used in the kernel. These flags are invisible to | |
1265 | * userspace, this means that the order of these flags can change | |
1266 | * during any kernel release. | |
1267 | * | |
1268 | * You should have a pretty good reason to be extending these flags. | |
1269 | * | |
1270 | * @IFF_802_1Q_VLAN: 802.1Q VLAN device | |
1271 | * @IFF_EBRIDGE: Ethernet bridging device | |
7aa98047 | 1272 | * @IFF_BONDING: bonding master or slave |
7aa98047 | 1273 | * @IFF_ISATAP: ISATAP interface (RFC4214) |
7aa98047 LR |
1274 | * @IFF_WAN_HDLC: WAN HDLC device |
1275 | * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to | |
1276 | * release skb->dst | |
1277 | * @IFF_DONT_BRIDGE: disallow bridging this ether dev | |
1278 | * @IFF_DISABLE_NETPOLL: disable netpoll at run-time | |
1279 | * @IFF_MACVLAN_PORT: device used as macvlan port | |
1280 | * @IFF_BRIDGE_PORT: device used as bridge port | |
1281 | * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port | |
1282 | * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit | |
1283 | * @IFF_UNICAST_FLT: Supports unicast filtering | |
1284 | * @IFF_TEAM_PORT: device used as team port | |
1285 | * @IFF_SUPP_NOFCS: device supports sending custom FCS | |
1286 | * @IFF_LIVE_ADDR_CHANGE: device supports hardware address | |
1287 | * change when it's running | |
1288 | * @IFF_MACVLAN: Macvlan device | |
007979ea | 1289 | * @IFF_L3MDEV_MASTER: device is an L3 master device |
fa8187c9 | 1290 | * @IFF_NO_QUEUE: device can run without qdisc attached |
35d4e172 | 1291 | * @IFF_OPENVSWITCH: device is a Open vSwitch master |
fee6d4c7 | 1292 | * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device |
c981e421 | 1293 | * @IFF_TEAM: device is a team device |
7aa98047 LR |
1294 | */ |
1295 | enum netdev_priv_flags { | |
1296 | IFF_802_1Q_VLAN = 1<<0, | |
1297 | IFF_EBRIDGE = 1<<1, | |
0dc1549b JP |
1298 | IFF_BONDING = 1<<2, |
1299 | IFF_ISATAP = 1<<3, | |
1300 | IFF_WAN_HDLC = 1<<4, | |
1301 | IFF_XMIT_DST_RELEASE = 1<<5, | |
1302 | IFF_DONT_BRIDGE = 1<<6, | |
1303 | IFF_DISABLE_NETPOLL = 1<<7, | |
1304 | IFF_MACVLAN_PORT = 1<<8, | |
1305 | IFF_BRIDGE_PORT = 1<<9, | |
1306 | IFF_OVS_DATAPATH = 1<<10, | |
1307 | IFF_TX_SKB_SHARING = 1<<11, | |
1308 | IFF_UNICAST_FLT = 1<<12, | |
1309 | IFF_TEAM_PORT = 1<<13, | |
1310 | IFF_SUPP_NOFCS = 1<<14, | |
1311 | IFF_LIVE_ADDR_CHANGE = 1<<15, | |
1312 | IFF_MACVLAN = 1<<16, | |
1313 | IFF_XMIT_DST_RELEASE_PERM = 1<<17, | |
1314 | IFF_IPVLAN_MASTER = 1<<18, | |
1315 | IFF_IPVLAN_SLAVE = 1<<19, | |
007979ea | 1316 | IFF_L3MDEV_MASTER = 1<<20, |
0dc1549b JP |
1317 | IFF_NO_QUEUE = 1<<21, |
1318 | IFF_OPENVSWITCH = 1<<22, | |
fee6d4c7 | 1319 | IFF_L3MDEV_SLAVE = 1<<23, |
c981e421 | 1320 | IFF_TEAM = 1<<24, |
7aa98047 LR |
1321 | }; |
1322 | ||
1323 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN | |
1324 | #define IFF_EBRIDGE IFF_EBRIDGE | |
7aa98047 | 1325 | #define IFF_BONDING IFF_BONDING |
7aa98047 | 1326 | #define IFF_ISATAP IFF_ISATAP |
7aa98047 LR |
1327 | #define IFF_WAN_HDLC IFF_WAN_HDLC |
1328 | #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE | |
1329 | #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE | |
1330 | #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL | |
1331 | #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT | |
1332 | #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT | |
1333 | #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH | |
1334 | #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING | |
1335 | #define IFF_UNICAST_FLT IFF_UNICAST_FLT | |
1336 | #define IFF_TEAM_PORT IFF_TEAM_PORT | |
1337 | #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS | |
1338 | #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE | |
1339 | #define IFF_MACVLAN IFF_MACVLAN | |
02875878 | 1340 | #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM |
2ad7bf36 MB |
1341 | #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER |
1342 | #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE | |
007979ea | 1343 | #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER |
fa8187c9 | 1344 | #define IFF_NO_QUEUE IFF_NO_QUEUE |
35d4e172 | 1345 | #define IFF_OPENVSWITCH IFF_OPENVSWITCH |
8f25348b | 1346 | #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE |
c981e421 | 1347 | #define IFF_TEAM IFF_TEAM |
7aa98047 | 1348 | |
536721b1 KK |
1349 | /** |
1350 | * struct net_device - The DEVICE structure. | |
1351 | * Actually, this whole structure is a big mistake. It mixes I/O | |
1352 | * data with strictly "high-level" data, and it has to know about | |
1353 | * almost every data structure used in the INET module. | |
1354 | * | |
1355 | * @name: This is the first field of the "visible" part of this structure | |
1356 | * (i.e. as seen by users in the "Space.c" file). It is the name | |
1357 | * of the interface. | |
1358 | * | |
1359 | * @name_hlist: Device name hash chain, please keep it close to name[] | |
1360 | * @ifalias: SNMP alias | |
1361 | * @mem_end: Shared memory end | |
1362 | * @mem_start: Shared memory start | |
1363 | * @base_addr: Device I/O address | |
1364 | * @irq: Device IRQ number | |
1365 | * | |
14ffbbb8 TG |
1366 | * @carrier_changes: Stats to monitor carrier on<->off transitions |
1367 | * | |
536721b1 KK |
1368 | * @state: Generic network queuing layer state, see netdev_state_t |
1369 | * @dev_list: The global list of network devices | |
1370 | * @napi_list: List entry, that is used for polling napi devices | |
1371 | * @unreg_list: List entry, that is used, when we are unregistering the | |
1372 | * device, see the function unregister_netdev | |
1373 | * @close_list: List entry, that is used, when we are closing the device | |
1374 | * | |
1375 | * @adj_list: Directly linked devices, like slaves for bonding | |
1376 | * @all_adj_list: All linked devices, *including* neighbours | |
1377 | * @features: Currently active device features | |
1378 | * @hw_features: User-changeable features | |
1379 | * | |
1380 | * @wanted_features: User-requested features | |
1381 | * @vlan_features: Mask of features inheritable by VLAN devices | |
1382 | * | |
1383 | * @hw_enc_features: Mask of features inherited by encapsulating devices | |
1384 | * This field indicates what encapsulation | |
1385 | * offloads the hardware is capable of doing, | |
1386 | * and drivers will need to set them appropriately. | |
1387 | * | |
1388 | * @mpls_features: Mask of features inheritable by MPLS | |
1389 | * | |
1390 | * @ifindex: interface index | |
388069d3 | 1391 | * @group: The group, that the device belongs to |
536721b1 KK |
1392 | * |
1393 | * @stats: Statistics struct, which was left as a legacy, use | |
1394 | * rtnl_link_stats64 instead | |
1395 | * | |
1396 | * @rx_dropped: Dropped packets by core network, | |
1397 | * do not use this in drivers | |
1398 | * @tx_dropped: Dropped packets by core network, | |
1399 | * do not use this in drivers | |
1400 | * | |
536721b1 KK |
1401 | * @wireless_handlers: List of functions to handle Wireless Extensions, |
1402 | * instead of ioctl, | |
1403 | * see <net/iw_handler.h> for details. | |
1404 | * @wireless_data: Instance data managed by the core of wireless extensions | |
1405 | * | |
1406 | * @netdev_ops: Includes several pointers to callbacks, | |
1407 | * if one wants to override the ndo_*() functions | |
1408 | * @ethtool_ops: Management operations | |
d476059e | 1409 | * @header_ops: Includes callbacks for creating,parsing,caching,etc |
536721b1 KK |
1410 | * of Layer 2 headers. |
1411 | * | |
1412 | * @flags: Interface flags (a la BSD) | |
1413 | * @priv_flags: Like 'flags' but invisible to userspace, | |
1414 | * see if.h for the definitions | |
1415 | * @gflags: Global flags ( kept as legacy ) | |
1416 | * @padded: How much padding added by alloc_netdev() | |
1417 | * @operstate: RFC2863 operstate | |
1418 | * @link_mode: Mapping policy to operstate | |
1419 | * @if_port: Selectable AUI, TP, ... | |
1420 | * @dma: DMA channel | |
1421 | * @mtu: Interface MTU value | |
1422 | * @type: Interface hardware type | |
880621c2 MB |
1423 | * @hard_header_len: Hardware header length, which means that this is the |
1424 | * minimum size of a packet. | |
536721b1 KK |
1425 | * |
1426 | * @needed_headroom: Extra headroom the hardware may need, but not in all | |
1427 | * cases can this be guaranteed | |
1428 | * @needed_tailroom: Extra tailroom the hardware may need, but not in all | |
1429 | * cases can this be guaranteed. Some cases also use | |
1430 | * LL_MAX_HEADER instead to allocate the skb | |
1431 | * | |
1432 | * interface address info: | |
1433 | * | |
1434 | * @perm_addr: Permanent hw address | |
1435 | * @addr_assign_type: Hw address assignment type | |
1436 | * @addr_len: Hardware address length | |
1437 | * @neigh_priv_len; Used in neigh_alloc(), | |
1438 | * initialized only in atm/clip.c | |
1439 | * @dev_id: Used to differentiate devices that share | |
1440 | * the same link layer address | |
1441 | * @dev_port: Used to differentiate devices that share | |
1442 | * the same function | |
1443 | * @addr_list_lock: XXX: need comments on this one | |
536721b1 KK |
1444 | * @uc_promisc: Counter, that indicates, that promiscuous mode |
1445 | * has been enabled due to the need to listen to | |
1446 | * additional unicast addresses in a device that | |
1447 | * does not implement ndo_set_rx_mode() | |
14ffbbb8 TG |
1448 | * @uc: unicast mac addresses |
1449 | * @mc: multicast mac addresses | |
1450 | * @dev_addrs: list of device hw addresses | |
1451 | * @queues_kset: Group of all Kobjects in the Tx and RX queues | |
536721b1 KK |
1452 | * @promiscuity: Number of times, the NIC is told to work in |
1453 | * Promiscuous mode, if it becomes 0 the NIC will | |
1454 | * exit from working in Promiscuous mode | |
1455 | * @allmulti: Counter, enables or disables allmulticast mode | |
1456 | * | |
1457 | * @vlan_info: VLAN info | |
1458 | * @dsa_ptr: dsa specific data | |
1459 | * @tipc_ptr: TIPC specific data | |
1460 | * @atalk_ptr: AppleTalk link | |
1461 | * @ip_ptr: IPv4 specific data | |
1462 | * @dn_ptr: DECnet specific data | |
1463 | * @ip6_ptr: IPv6 specific data | |
1464 | * @ax25_ptr: AX.25 specific data | |
1465 | * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering | |
1466 | * | |
1467 | * @last_rx: Time of last Rx | |
1468 | * @dev_addr: Hw address (before bcast, | |
1469 | * because most packets are unicast) | |
1470 | * | |
1471 | * @_rx: Array of RX queues | |
1472 | * @num_rx_queues: Number of RX queues | |
1473 | * allocated at register_netdev() time | |
1474 | * @real_num_rx_queues: Number of RX queues currently active in device | |
1475 | * | |
1476 | * @rx_handler: handler for received packets | |
1477 | * @rx_handler_data: XXX: need comments on this one | |
1478 | * @ingress_queue: XXX: need comments on this one | |
1479 | * @broadcast: hw bcast address | |
1480 | * | |
14ffbbb8 TG |
1481 | * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, |
1482 | * indexed by RX queue number. Assigned by driver. | |
1483 | * This must only be set if the ndo_rx_flow_steer | |
1484 | * operation is defined | |
1485 | * @index_hlist: Device index hash chain | |
1486 | * | |
536721b1 KK |
1487 | * @_tx: Array of TX queues |
1488 | * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time | |
1489 | * @real_num_tx_queues: Number of TX queues currently active in device | |
1490 | * @qdisc: Root qdisc from userspace point of view | |
1491 | * @tx_queue_len: Max frames per queue allowed | |
1492 | * @tx_global_lock: XXX: need comments on this one | |
1493 | * | |
1494 | * @xps_maps: XXX: need comments on this one | |
1495 | * | |
0c4f691f SF |
1496 | * @offload_fwd_mark: Offload device fwding mark |
1497 | * | |
536721b1 KK |
1498 | * @trans_start: Time (in jiffies) of last Tx |
1499 | * @watchdog_timeo: Represents the timeout that is used by | |
1500 | * the watchdog ( see dev_watchdog() ) | |
1501 | * @watchdog_timer: List of timers | |
1502 | * | |
1503 | * @pcpu_refcnt: Number of references to this device | |
1504 | * @todo_list: Delayed register/unregister | |
536721b1 KK |
1505 | * @link_watch_list: XXX: need comments on this one |
1506 | * | |
1507 | * @reg_state: Register/unregister state machine | |
1508 | * @dismantle: Device is going to be freed | |
1509 | * @rtnl_link_state: This enum represents the phases of creating | |
1510 | * a new link | |
1511 | * | |
1512 | * @destructor: Called from unregister, | |
1513 | * can be used to call free_netdev | |
1514 | * @npinfo: XXX: need comments on this one | |
1515 | * @nd_net: Network namespace this network device is inside | |
1516 | * | |
1517 | * @ml_priv: Mid-layer private | |
1518 | * @lstats: Loopback statistics | |
1519 | * @tstats: Tunnel statistics | |
1520 | * @dstats: Dummy statistics | |
1521 | * @vstats: Virtual ethernet statistics | |
1522 | * | |
1523 | * @garp_port: GARP | |
1524 | * @mrp_port: MRP | |
1525 | * | |
1526 | * @dev: Class/net/name entry | |
1527 | * @sysfs_groups: Space for optional device, statistics and wireless | |
1528 | * sysfs groups | |
1529 | * | |
1530 | * @sysfs_rx_queue_group: Space for optional per-rx queue attributes | |
1531 | * @rtnl_link_ops: Rtnl_link_ops | |
1532 | * | |
1533 | * @gso_max_size: Maximum size of generic segmentation offload | |
1534 | * @gso_max_segs: Maximum number of segments that can be passed to the | |
1535 | * NIC for GSO | |
fcbeb976 ED |
1536 | * @gso_min_segs: Minimum number of segments that can be passed to the |
1537 | * NIC for GSO | |
536721b1 KK |
1538 | * |
1539 | * @dcbnl_ops: Data Center Bridging netlink ops | |
1540 | * @num_tc: Number of traffic classes in the net device | |
1541 | * @tc_to_txq: XXX: need comments on this one | |
1542 | * @prio_tc_map XXX: need comments on this one | |
1543 | * | |
1544 | * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp | |
1545 | * | |
1546 | * @priomap: XXX: need comments on this one | |
1547 | * @phydev: Physical device may attach itself | |
1548 | * for hardware timestamping | |
1549 | * | |
1550 | * @qdisc_tx_busylock: XXX: need comments on this one | |
1551 | * | |
d746d707 AK |
1552 | * @proto_down: protocol port state information can be sent to the |
1553 | * switch driver and used to set the phys state of the | |
1554 | * switch port. | |
1555 | * | |
1da177e4 LT |
1556 | * FIXME: cleanup struct net_device such that network protocol info |
1557 | * moves out. | |
1558 | */ | |
1559 | ||
d94d9fee | 1560 | struct net_device { |
1da177e4 | 1561 | char name[IFNAMSIZ]; |
9356b8fc | 1562 | struct hlist_node name_hlist; |
0b815a1a | 1563 | char *ifalias; |
1da177e4 LT |
1564 | /* |
1565 | * I/O specific fields | |
1566 | * FIXME: Merge these and struct ifmap into one | |
1567 | */ | |
536721b1 KK |
1568 | unsigned long mem_end; |
1569 | unsigned long mem_start; | |
1570 | unsigned long base_addr; | |
1571 | int irq; | |
1da177e4 | 1572 | |
14ffbbb8 TG |
1573 | atomic_t carrier_changes; |
1574 | ||
1da177e4 | 1575 | /* |
536721b1 KK |
1576 | * Some hardware also needs these fields (state,dev_list, |
1577 | * napi_list,unreg_list,close_list) but they are not | |
1da177e4 LT |
1578 | * part of the usual set specified in Space.c. |
1579 | */ | |
1580 | ||
1da177e4 LT |
1581 | unsigned long state; |
1582 | ||
7562f876 | 1583 | struct list_head dev_list; |
bea3348e | 1584 | struct list_head napi_list; |
44a0873d | 1585 | struct list_head unreg_list; |
5cde2829 | 1586 | struct list_head close_list; |
7866a621 SN |
1587 | struct list_head ptype_all; |
1588 | struct list_head ptype_specific; | |
2f268f12 | 1589 | |
2f268f12 VF |
1590 | struct { |
1591 | struct list_head upper; | |
1592 | struct list_head lower; | |
1593 | } adj_list; | |
1594 | ||
2f268f12 VF |
1595 | struct { |
1596 | struct list_head upper; | |
1597 | struct list_head lower; | |
1598 | } all_adj_list; | |
4c3d5e7b | 1599 | |
c8f44aff | 1600 | netdev_features_t features; |
c8f44aff | 1601 | netdev_features_t hw_features; |
c8f44aff | 1602 | netdev_features_t wanted_features; |
c8f44aff | 1603 | netdev_features_t vlan_features; |
6a674e9c | 1604 | netdev_features_t hw_enc_features; |
0d89d203 | 1605 | netdev_features_t mpls_features; |
04ed3e74 | 1606 | |
1da177e4 | 1607 | int ifindex; |
7a66bbc9 | 1608 | int group; |
1da177e4 | 1609 | |
c45d286e | 1610 | struct net_device_stats stats; |
015f0688 | 1611 | |
015f0688 ED |
1612 | atomic_long_t rx_dropped; |
1613 | atomic_long_t tx_dropped; | |
1da177e4 | 1614 | |
b86e0280 | 1615 | #ifdef CONFIG_WIRELESS_EXT |
1da177e4 | 1616 | const struct iw_handler_def * wireless_handlers; |
1da177e4 | 1617 | struct iw_public_data * wireless_data; |
b86e0280 | 1618 | #endif |
d314774c | 1619 | const struct net_device_ops *netdev_ops; |
76fd8593 | 1620 | const struct ethtool_ops *ethtool_ops; |
4170604f | 1621 | #ifdef CONFIG_NET_SWITCHDEV |
9d47c0a2 | 1622 | const struct switchdev_ops *switchdev_ops; |
4170604f | 1623 | #endif |
1b69c6d0 DA |
1624 | #ifdef CONFIG_NET_L3_MASTER_DEV |
1625 | const struct l3mdev_ops *l3mdev_ops; | |
1626 | #endif | |
1da177e4 | 1627 | |
3b04ddde SH |
1628 | const struct header_ops *header_ops; |
1629 | ||
536721b1 KK |
1630 | unsigned int flags; |
1631 | unsigned int priv_flags; | |
1632 | ||
1da177e4 | 1633 | unsigned short gflags; |
536721b1 | 1634 | unsigned short padded; |
1da177e4 | 1635 | |
536721b1 KK |
1636 | unsigned char operstate; |
1637 | unsigned char link_mode; | |
b00055aa | 1638 | |
536721b1 KK |
1639 | unsigned char if_port; |
1640 | unsigned char dma; | |
bdc220da | 1641 | |
536721b1 KK |
1642 | unsigned int mtu; |
1643 | unsigned short type; | |
1644 | unsigned short hard_header_len; | |
1da177e4 | 1645 | |
f5184d26 JB |
1646 | unsigned short needed_headroom; |
1647 | unsigned short needed_tailroom; | |
1648 | ||
1da177e4 | 1649 | /* Interface address info. */ |
536721b1 KK |
1650 | unsigned char perm_addr[MAX_ADDR_LEN]; |
1651 | unsigned char addr_assign_type; | |
1652 | unsigned char addr_len; | |
a0a9663d | 1653 | unsigned short neigh_priv_len; |
536721b1 KK |
1654 | unsigned short dev_id; |
1655 | unsigned short dev_port; | |
ccffad25 | 1656 | spinlock_t addr_list_lock; |
14ffbbb8 TG |
1657 | unsigned char name_assign_type; |
1658 | bool uc_promisc; | |
536721b1 KK |
1659 | struct netdev_hw_addr_list uc; |
1660 | struct netdev_hw_addr_list mc; | |
1661 | struct netdev_hw_addr_list dev_addrs; | |
1662 | ||
4c3d5e7b ED |
1663 | #ifdef CONFIG_SYSFS |
1664 | struct kset *queues_kset; | |
1665 | #endif | |
9d45abe1 WC |
1666 | unsigned int promiscuity; |
1667 | unsigned int allmulti; | |
1da177e4 | 1668 | |
1da177e4 LT |
1669 | |
1670 | /* Protocol specific pointers */ | |
65ac6a5f | 1671 | |
d11ead75 | 1672 | #if IS_ENABLED(CONFIG_VLAN_8021Q) |
536721b1 | 1673 | struct vlan_info __rcu *vlan_info; |
65ac6a5f | 1674 | #endif |
34a430d7 | 1675 | #if IS_ENABLED(CONFIG_NET_DSA) |
536721b1 | 1676 | struct dsa_switch_tree *dsa_ptr; |
37cb0620 YX |
1677 | #endif |
1678 | #if IS_ENABLED(CONFIG_TIPC) | |
536721b1 | 1679 | struct tipc_bearer __rcu *tipc_ptr; |
91da11f8 | 1680 | #endif |
536721b1 KK |
1681 | void *atalk_ptr; |
1682 | struct in_device __rcu *ip_ptr; | |
1683 | struct dn_dev __rcu *dn_ptr; | |
1684 | struct inet6_dev __rcu *ip6_ptr; | |
1685 | void *ax25_ptr; | |
1686 | struct wireless_dev *ieee80211_ptr; | |
98a18b6f | 1687 | struct wpan_dev *ieee802154_ptr; |
03c57747 RS |
1688 | #if IS_ENABLED(CONFIG_MPLS_ROUTING) |
1689 | struct mpls_dev __rcu *mpls_ptr; | |
1690 | #endif | |
1da177e4 | 1691 | |
9356b8fc | 1692 | /* |
cd13539b | 1693 | * Cache lines mostly used on receive path (including eth_type_trans()) |
9356b8fc | 1694 | */ |
536721b1 | 1695 | unsigned long last_rx; |
4dc89133 | 1696 | |
9356b8fc | 1697 | /* Interface address info used in eth_type_trans() */ |
536721b1 | 1698 | unsigned char *dev_addr; |
f001fde5 | 1699 | |
0a9627f2 | 1700 | |
a953be53 | 1701 | #ifdef CONFIG_SYSFS |
0a9627f2 TH |
1702 | struct netdev_rx_queue *_rx; |
1703 | ||
0a9627f2 | 1704 | unsigned int num_rx_queues; |
62fe0b40 | 1705 | unsigned int real_num_rx_queues; |
c445477d | 1706 | |
df334545 | 1707 | #endif |
0a9627f2 | 1708 | |
3b47d303 | 1709 | unsigned long gro_flush_timeout; |
61391cde | 1710 | rx_handler_func_t __rcu *rx_handler; |
1711 | void __rcu *rx_handler_data; | |
e8a0464c | 1712 | |
4cda01e8 | 1713 | #ifdef CONFIG_NET_CLS_ACT |
d2788d34 DB |
1714 | struct tcf_proto __rcu *ingress_cl_list; |
1715 | #endif | |
24824a09 | 1716 | struct netdev_queue __rcu *ingress_queue; |
e687ad60 PN |
1717 | #ifdef CONFIG_NETFILTER_INGRESS |
1718 | struct list_head nf_hooks_ingress; | |
1719 | #endif | |
d2788d34 | 1720 | |
536721b1 | 1721 | unsigned char broadcast[MAX_ADDR_LEN]; |
14ffbbb8 TG |
1722 | #ifdef CONFIG_RFS_ACCEL |
1723 | struct cpu_rmap *rx_cpu_rmap; | |
1724 | #endif | |
1725 | struct hlist_node index_hlist; | |
cd13539b ED |
1726 | |
1727 | /* | |
1728 | * Cache lines mostly used on transmit path | |
1729 | */ | |
e8a0464c DM |
1730 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
1731 | unsigned int num_tx_queues; | |
fd2ea0a7 | 1732 | unsigned int real_num_tx_queues; |
af356afa | 1733 | struct Qdisc *qdisc; |
536721b1 | 1734 | unsigned long tx_queue_len; |
c3f26a26 | 1735 | spinlock_t tx_global_lock; |
14ffbbb8 | 1736 | int watchdog_timeo; |
cd13539b | 1737 | |
bf264145 | 1738 | #ifdef CONFIG_XPS |
a4177869 | 1739 | struct xps_dev_maps __rcu *xps_maps; |
bf264145 | 1740 | #endif |
1f211a1b DB |
1741 | #ifdef CONFIG_NET_CLS_ACT |
1742 | struct tcf_proto __rcu *egress_cl_list; | |
1743 | #endif | |
0c4f691f SF |
1744 | #ifdef CONFIG_NET_SWITCHDEV |
1745 | u32 offload_fwd_mark; | |
1746 | #endif | |
1747 | ||
9356b8fc | 1748 | /* These may be needed for future network-power-down code. */ |
9d21493b ED |
1749 | |
1750 | /* | |
1751 | * trans_start here is expensive for high speed devices on SMP, | |
1752 | * please use netdev_queue->trans_start instead. | |
1753 | */ | |
536721b1 | 1754 | unsigned long trans_start; |
9356b8fc | 1755 | |
9356b8fc ED |
1756 | struct timer_list watchdog_timer; |
1757 | ||
29b4433d | 1758 | int __percpu *pcpu_refcnt; |
1da177e4 | 1759 | struct list_head todo_list; |
1da177e4 | 1760 | |
e014debe | 1761 | struct list_head link_watch_list; |
572a103d | 1762 | |
1da177e4 | 1763 | enum { NETREG_UNINITIALIZED=0, |
b17a7c17 | 1764 | NETREG_REGISTERED, /* completed register_netdevice */ |
1da177e4 LT |
1765 | NETREG_UNREGISTERING, /* called unregister_netdevice */ |
1766 | NETREG_UNREGISTERED, /* completed unregister todo */ | |
1767 | NETREG_RELEASED, /* called free_netdev */ | |
937f1ba5 | 1768 | NETREG_DUMMY, /* dummy device for NAPI poll */ |
449f4544 ED |
1769 | } reg_state:8; |
1770 | ||
536721b1 | 1771 | bool dismantle; |
a2835763 PM |
1772 | |
1773 | enum { | |
1774 | RTNL_LINK_INITIALIZED, | |
1775 | RTNL_LINK_INITIALIZING, | |
1776 | } rtnl_link_state:16; | |
1da177e4 | 1777 | |
d314774c | 1778 | void (*destructor)(struct net_device *dev); |
1da177e4 | 1779 | |
1da177e4 | 1780 | #ifdef CONFIG_NETPOLL |
5fbee843 | 1781 | struct netpoll_info __rcu *npinfo; |
1da177e4 | 1782 | #endif |
eae792b7 | 1783 | |
0c5c9fb5 | 1784 | possible_net_t nd_net; |
4a1c5371 | 1785 | |
4951704b | 1786 | /* mid-layer private */ |
a7855c78 | 1787 | union { |
536721b1 KK |
1788 | void *ml_priv; |
1789 | struct pcpu_lstats __percpu *lstats; | |
8f84985f | 1790 | struct pcpu_sw_netstats __percpu *tstats; |
536721b1 KK |
1791 | struct pcpu_dstats __percpu *dstats; |
1792 | struct pcpu_vstats __percpu *vstats; | |
a7855c78 | 1793 | }; |
536721b1 | 1794 | |
3cc77ec7 | 1795 | struct garp_port __rcu *garp_port; |
febf018d | 1796 | struct mrp_port __rcu *mrp_port; |
1da177e4 | 1797 | |
536721b1 | 1798 | struct device dev; |
0c509a6c | 1799 | const struct attribute_group *sysfs_groups[4]; |
a953be53 | 1800 | const struct attribute_group *sysfs_rx_queue_group; |
38f7b870 | 1801 | |
38f7b870 | 1802 | const struct rtnl_link_ops *rtnl_link_ops; |
f25f4e44 | 1803 | |
82cc1a7a PWJ |
1804 | /* for setting kernel sock attribute on TCP connection setup */ |
1805 | #define GSO_MAX_SIZE 65536 | |
1806 | unsigned int gso_max_size; | |
30b678d8 BH |
1807 | #define GSO_MAX_SEGS 65535 |
1808 | u16 gso_max_segs; | |
fcbeb976 | 1809 | u16 gso_min_segs; |
7a6b6f51 | 1810 | #ifdef CONFIG_DCB |
32953543 | 1811 | const struct dcbnl_rtnl_ops *dcbnl_ops; |
2f90b865 | 1812 | #endif |
4f57c087 JF |
1813 | u8 num_tc; |
1814 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; | |
1815 | u8 prio_tc_map[TC_BITMASK + 1]; | |
2f90b865 | 1816 | |
d11ead75 | 1817 | #if IS_ENABLED(CONFIG_FCOE) |
4d288d57 | 1818 | unsigned int fcoe_ddp_xid; |
5bc1421e | 1819 | #endif |
86f8515f | 1820 | #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) |
5bc1421e | 1821 | struct netprio_map __rcu *priomap; |
4d288d57 | 1822 | #endif |
c1f19b51 | 1823 | struct phy_device *phydev; |
23d3b8bf | 1824 | struct lock_class_key *qdisc_tx_busylock; |
d746d707 | 1825 | bool proto_down; |
1da177e4 | 1826 | }; |
43cb76d9 | 1827 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
1da177e4 LT |
1828 | |
1829 | #define NETDEV_ALIGN 32 | |
1da177e4 | 1830 | |
4f57c087 JF |
1831 | static inline |
1832 | int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) | |
1833 | { | |
1834 | return dev->prio_tc_map[prio & TC_BITMASK]; | |
1835 | } | |
1836 | ||
1837 | static inline | |
1838 | int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) | |
1839 | { | |
1840 | if (tc >= dev->num_tc) | |
1841 | return -EINVAL; | |
1842 | ||
1843 | dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; | |
1844 | return 0; | |
1845 | } | |
1846 | ||
1847 | static inline | |
1848 | void netdev_reset_tc(struct net_device *dev) | |
1849 | { | |
1850 | dev->num_tc = 0; | |
1851 | memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); | |
1852 | memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); | |
1853 | } | |
1854 | ||
1855 | static inline | |
1856 | int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) | |
1857 | { | |
1858 | if (tc >= dev->num_tc) | |
1859 | return -EINVAL; | |
1860 | ||
1861 | dev->tc_to_txq[tc].count = count; | |
1862 | dev->tc_to_txq[tc].offset = offset; | |
1863 | return 0; | |
1864 | } | |
1865 | ||
1866 | static inline | |
1867 | int netdev_set_num_tc(struct net_device *dev, u8 num_tc) | |
1868 | { | |
1869 | if (num_tc > TC_MAX_QUEUE) | |
1870 | return -EINVAL; | |
1871 | ||
1872 | dev->num_tc = num_tc; | |
1873 | return 0; | |
1874 | } | |
1875 | ||
1876 | static inline | |
1877 | int netdev_get_num_tc(struct net_device *dev) | |
1878 | { | |
1879 | return dev->num_tc; | |
1880 | } | |
1881 | ||
e8a0464c DM |
1882 | static inline |
1883 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, | |
1884 | unsigned int index) | |
1885 | { | |
1886 | return &dev->_tx[index]; | |
1887 | } | |
1888 | ||
10c51b56 DB |
1889 | static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, |
1890 | const struct sk_buff *skb) | |
1891 | { | |
1892 | return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | |
1893 | } | |
1894 | ||
e8a0464c DM |
1895 | static inline void netdev_for_each_tx_queue(struct net_device *dev, |
1896 | void (*f)(struct net_device *, | |
1897 | struct netdev_queue *, | |
1898 | void *), | |
1899 | void *arg) | |
1900 | { | |
1901 | unsigned int i; | |
1902 | ||
1903 | for (i = 0; i < dev->num_tx_queues; i++) | |
1904 | f(dev, &dev->_tx[i], arg); | |
1905 | } | |
1906 | ||
f629d208 | 1907 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
f663dd9a JW |
1908 | struct sk_buff *skb, |
1909 | void *accel_priv); | |
8c4c49df | 1910 | |
c346dca1 YH |
1911 | /* |
1912 | * Net namespace inlines | |
1913 | */ | |
1914 | static inline | |
1915 | struct net *dev_net(const struct net_device *dev) | |
1916 | { | |
c2d9ba9b | 1917 | return read_pnet(&dev->nd_net); |
c346dca1 YH |
1918 | } |
1919 | ||
1920 | static inline | |
f5aa23fd | 1921 | void dev_net_set(struct net_device *dev, struct net *net) |
c346dca1 | 1922 | { |
0c5c9fb5 | 1923 | write_pnet(&dev->nd_net, net); |
c346dca1 YH |
1924 | } |
1925 | ||
3e8a72d1 | 1926 | static inline bool netdev_uses_dsa(struct net_device *dev) |
cf85d08f | 1927 | { |
3fc88677 | 1928 | #if IS_ENABLED(CONFIG_NET_DSA) |
5aed85ce FF |
1929 | if (dev->dsa_ptr != NULL) |
1930 | return dsa_uses_tagged_protocol(dev->dsa_ptr); | |
396138f0 | 1931 | #endif |
5aed85ce | 1932 | return false; |
396138f0 LB |
1933 | } |
1934 | ||
bea3348e SH |
1935 | /** |
1936 | * netdev_priv - access network device private data | |
1937 | * @dev: network device | |
1938 | * | |
1939 | * Get network device private data | |
1940 | */ | |
6472ce60 | 1941 | static inline void *netdev_priv(const struct net_device *dev) |
1da177e4 | 1942 | { |
1ce8e7b5 | 1943 | return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); |
1da177e4 LT |
1944 | } |
1945 | ||
1da177e4 LT |
1946 | /* Set the sysfs physical device reference for the network logical device |
1947 | * if set prior to registration will cause a symlink during initialization. | |
1948 | */ | |
43cb76d9 | 1949 | #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) |
1da177e4 | 1950 | |
384912ed | 1951 | /* Set the sysfs device type for the network logical device to allow |
3f79410c | 1952 | * fine-grained identification of different network device types. For |
384912ed MH |
1953 | * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. |
1954 | */ | |
1955 | #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) | |
1956 | ||
82dc3c63 ED |
1957 | /* Default NAPI poll() weight |
1958 | * Device drivers are strongly advised to not use bigger value | |
1959 | */ | |
1960 | #define NAPI_POLL_WEIGHT 64 | |
1961 | ||
3b582cc1 SH |
1962 | /** |
1963 | * netif_napi_add - initialize a napi context | |
1964 | * @dev: network device | |
1965 | * @napi: napi context | |
1966 | * @poll: polling function | |
1967 | * @weight: default weight | |
1968 | * | |
1969 | * netif_napi_add() must be used to initialize a napi context prior to calling | |
1970 | * *any* of the other napi related functions. | |
1971 | */ | |
d565b0a1 HX |
1972 | void netif_napi_add(struct net_device *dev, struct napi_struct *napi, |
1973 | int (*poll)(struct napi_struct *, int), int weight); | |
bea3348e | 1974 | |
d64b5e85 ED |
1975 | /** |
1976 | * netif_tx_napi_add - initialize a napi context | |
1977 | * @dev: network device | |
1978 | * @napi: napi context | |
1979 | * @poll: polling function | |
1980 | * @weight: default weight | |
1981 | * | |
1982 | * This variant of netif_napi_add() should be used from drivers using NAPI | |
1983 | * to exclusively poll a TX queue. | |
1984 | * This will avoid we add it into napi_hash[], thus polluting this hash table. | |
1985 | */ | |
1986 | static inline void netif_tx_napi_add(struct net_device *dev, | |
1987 | struct napi_struct *napi, | |
1988 | int (*poll)(struct napi_struct *, int), | |
1989 | int weight) | |
1990 | { | |
1991 | set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); | |
1992 | netif_napi_add(dev, napi, poll, weight); | |
1993 | } | |
1994 | ||
d8156534 AD |
1995 | /** |
1996 | * netif_napi_del - remove a napi context | |
1997 | * @napi: napi context | |
1998 | * | |
1999 | * netif_napi_del() removes a napi context from the network device napi list | |
2000 | */ | |
d565b0a1 HX |
2001 | void netif_napi_del(struct napi_struct *napi); |
2002 | ||
2003 | struct napi_gro_cb { | |
78a478d0 HX |
2004 | /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ |
2005 | void *frag0; | |
2006 | ||
7489594c HX |
2007 | /* Length of frag0. */ |
2008 | unsigned int frag0_len; | |
2009 | ||
86911732 HX |
2010 | /* This indicates where we are processing relative to skb->data. */ |
2011 | int data_offset; | |
2012 | ||
d565b0a1 | 2013 | /* This is non-zero if the packet cannot be merged with the new skb. */ |
bf5a755f JC |
2014 | u16 flush; |
2015 | ||
2016 | /* Save the IP ID here and check when we get to the transport layer */ | |
2017 | u16 flush_id; | |
d565b0a1 HX |
2018 | |
2019 | /* Number of segments aggregated. */ | |
2e71a6f8 ED |
2020 | u16 count; |
2021 | ||
15e2396d TH |
2022 | /* Start offset for remote checksum offload */ |
2023 | u16 gro_remcsum_start; | |
2024 | ||
2e71a6f8 ED |
2025 | /* jiffies when first packet was created/queued */ |
2026 | unsigned long age; | |
86347245 | 2027 | |
afe93325 | 2028 | /* Used in ipv6_gro_receive() and foo-over-udp */ |
b582ef09 OG |
2029 | u16 proto; |
2030 | ||
baa32ff4 TH |
2031 | /* This is non-zero if the packet may be of the same flow. */ |
2032 | u8 same_flow:1; | |
2033 | ||
b582ef09 | 2034 | /* Used in udp_gro_receive */ |
573e8fca TH |
2035 | u8 udp_mark:1; |
2036 | ||
2037 | /* GRO checksum is valid */ | |
2038 | u8 csum_valid:1; | |
2039 | ||
662880f4 TH |
2040 | /* Number of checksums via CHECKSUM_UNNECESSARY */ |
2041 | u8 csum_cnt:3; | |
c3c7c254 | 2042 | |
baa32ff4 TH |
2043 | /* Free the skb? */ |
2044 | u8 free:2; | |
2045 | #define NAPI_GRO_FREE 1 | |
2046 | #define NAPI_GRO_FREE_STOLEN_HEAD 2 | |
2047 | ||
efc98d08 TH |
2048 | /* Used in foo-over-udp, set in udp[46]_gro_receive */ |
2049 | u8 is_ipv6:1; | |
2050 | ||
baa32ff4 TH |
2051 | /* 7 bit hole */ |
2052 | ||
bf5a755f JC |
2053 | /* used to support CHECKSUM_COMPLETE for tunneling protocols */ |
2054 | __wsum csum; | |
2055 | ||
c3c7c254 ED |
2056 | /* used in skb_gro_receive() slow path */ |
2057 | struct sk_buff *last; | |
d565b0a1 HX |
2058 | }; |
2059 | ||
2060 | #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) | |
d8156534 | 2061 | |
1da177e4 | 2062 | struct packet_type { |
f2ccd8fa DM |
2063 | __be16 type; /* This is really htons(ether_type). */ |
2064 | struct net_device *dev; /* NULL is wildcarded here */ | |
2065 | int (*func) (struct sk_buff *, | |
2066 | struct net_device *, | |
2067 | struct packet_type *, | |
2068 | struct net_device *); | |
c0de08d0 EL |
2069 | bool (*id_match)(struct packet_type *ptype, |
2070 | struct sock *sk); | |
1da177e4 LT |
2071 | void *af_packet_priv; |
2072 | struct list_head list; | |
2073 | }; | |
2074 | ||
f191a1d1 | 2075 | struct offload_callbacks { |
576a30eb | 2076 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
c8f44aff | 2077 | netdev_features_t features); |
d565b0a1 | 2078 | struct sk_buff **(*gro_receive)(struct sk_buff **head, |
a2b12f3c | 2079 | struct sk_buff *skb); |
299603e8 | 2080 | int (*gro_complete)(struct sk_buff *skb, int nhoff); |
f191a1d1 VY |
2081 | }; |
2082 | ||
2083 | struct packet_offload { | |
2084 | __be16 type; /* This is really htons(ether_type). */ | |
bdef7de4 | 2085 | u16 priority; |
f191a1d1 VY |
2086 | struct offload_callbacks callbacks; |
2087 | struct list_head list; | |
1da177e4 LT |
2088 | }; |
2089 | ||
a2b12f3c TH |
2090 | struct udp_offload; |
2091 | ||
2092 | struct udp_offload_callbacks { | |
2093 | struct sk_buff **(*gro_receive)(struct sk_buff **head, | |
2094 | struct sk_buff *skb, | |
2095 | struct udp_offload *uoff); | |
2096 | int (*gro_complete)(struct sk_buff *skb, | |
2097 | int nhoff, | |
2098 | struct udp_offload *uoff); | |
2099 | }; | |
2100 | ||
b582ef09 OG |
2101 | struct udp_offload { |
2102 | __be16 port; | |
afe93325 | 2103 | u8 ipproto; |
a2b12f3c | 2104 | struct udp_offload_callbacks callbacks; |
b582ef09 OG |
2105 | }; |
2106 | ||
8f84985f LR |
2107 | /* often modified stats are per cpu, other are shared (netdev->stats) */ |
2108 | struct pcpu_sw_netstats { | |
2109 | u64 rx_packets; | |
2110 | u64 rx_bytes; | |
2111 | u64 tx_packets; | |
2112 | u64 tx_bytes; | |
2113 | struct u64_stats_sync syncp; | |
2114 | }; | |
2115 | ||
aabc92bb PNA |
2116 | #define __netdev_alloc_pcpu_stats(type, gfp) \ |
2117 | ({ \ | |
2118 | typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ | |
2119 | if (pcpu_stats) { \ | |
2120 | int __cpu; \ | |
2121 | for_each_possible_cpu(__cpu) { \ | |
2122 | typeof(type) *stat; \ | |
2123 | stat = per_cpu_ptr(pcpu_stats, __cpu); \ | |
2124 | u64_stats_init(&stat->syncp); \ | |
2125 | } \ | |
2126 | } \ | |
2127 | pcpu_stats; \ | |
1c213bd2 WC |
2128 | }) |
2129 | ||
aabc92bb | 2130 | #define netdev_alloc_pcpu_stats(type) \ |
326fcfa5 | 2131 | __netdev_alloc_pcpu_stats(type, GFP_KERNEL) |
aabc92bb | 2132 | |
764f5e54 JP |
2133 | enum netdev_lag_tx_type { |
2134 | NETDEV_LAG_TX_TYPE_UNKNOWN, | |
2135 | NETDEV_LAG_TX_TYPE_RANDOM, | |
2136 | NETDEV_LAG_TX_TYPE_BROADCAST, | |
2137 | NETDEV_LAG_TX_TYPE_ROUNDROBIN, | |
2138 | NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, | |
2139 | NETDEV_LAG_TX_TYPE_HASH, | |
2140 | }; | |
2141 | ||
2142 | struct netdev_lag_upper_info { | |
2143 | enum netdev_lag_tx_type tx_type; | |
2144 | }; | |
2145 | ||
fb1b2e3c JP |
2146 | struct netdev_lag_lower_state_info { |
2147 | u8 link_up : 1, | |
2148 | tx_enabled : 1; | |
2149 | }; | |
2150 | ||
1da177e4 LT |
2151 | #include <linux/notifier.h> |
2152 | ||
dcfe1421 AW |
2153 | /* netdevice notifier chain. Please remember to update the rtnetlink |
2154 | * notification exclusion list in rtnetlink_event() when adding new | |
2155 | * types. | |
2156 | */ | |
2157 | #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */ | |
2158 | #define NETDEV_DOWN 0x0002 | |
2159 | #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface | |
2160 | detected a hardware crash and restarted | |
2161 | - we can use this eg to kick tcp sessions | |
2162 | once done */ | |
2163 | #define NETDEV_CHANGE 0x0004 /* Notify device state change */ | |
2164 | #define NETDEV_REGISTER 0x0005 | |
2165 | #define NETDEV_UNREGISTER 0x0006 | |
1d486bfb | 2166 | #define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */ |
dcfe1421 AW |
2167 | #define NETDEV_CHANGEADDR 0x0008 |
2168 | #define NETDEV_GOING_DOWN 0x0009 | |
2169 | #define NETDEV_CHANGENAME 0x000A | |
2170 | #define NETDEV_FEAT_CHANGE 0x000B | |
2171 | #define NETDEV_BONDING_FAILOVER 0x000C | |
2172 | #define NETDEV_PRE_UP 0x000D | |
2173 | #define NETDEV_PRE_TYPE_CHANGE 0x000E | |
2174 | #define NETDEV_POST_TYPE_CHANGE 0x000F | |
2175 | #define NETDEV_POST_INIT 0x0010 | |
0115e8e3 | 2176 | #define NETDEV_UNREGISTER_FINAL 0x0011 |
dcfe1421 AW |
2177 | #define NETDEV_RELEASE 0x0012 |
2178 | #define NETDEV_NOTIFY_PEERS 0x0013 | |
2179 | #define NETDEV_JOIN 0x0014 | |
42e52bf9 | 2180 | #define NETDEV_CHANGEUPPER 0x0015 |
4aa5dee4 | 2181 | #define NETDEV_RESEND_IGMP 0x0016 |
1d486bfb | 2182 | #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ |
d4261e56 | 2183 | #define NETDEV_CHANGEINFODATA 0x0018 |
61bd3857 | 2184 | #define NETDEV_BONDING_INFO 0x0019 |
573c7ba0 | 2185 | #define NETDEV_PRECHANGEUPPER 0x001A |
04d48266 | 2186 | #define NETDEV_CHANGELOWERSTATE 0x001B |
dcfe1421 | 2187 | |
f629d208 JP |
2188 | int register_netdevice_notifier(struct notifier_block *nb); |
2189 | int unregister_netdevice_notifier(struct notifier_block *nb); | |
351638e7 JP |
2190 | |
2191 | struct netdev_notifier_info { | |
2192 | struct net_device *dev; | |
2193 | }; | |
2194 | ||
be9efd36 JP |
2195 | struct netdev_notifier_change_info { |
2196 | struct netdev_notifier_info info; /* must be first */ | |
2197 | unsigned int flags_changed; | |
2198 | }; | |
2199 | ||
0e4ead9d JP |
2200 | struct netdev_notifier_changeupper_info { |
2201 | struct netdev_notifier_info info; /* must be first */ | |
2202 | struct net_device *upper_dev; /* new upper dev */ | |
2203 | bool master; /* is upper dev master */ | |
2204 | bool linking; /* is the nofication for link or unlink */ | |
29bf24af | 2205 | void *upper_info; /* upper dev info */ |
0e4ead9d JP |
2206 | }; |
2207 | ||
04d48266 JP |
2208 | struct netdev_notifier_changelowerstate_info { |
2209 | struct netdev_notifier_info info; /* must be first */ | |
2210 | void *lower_state_info; /* is lower dev state */ | |
2211 | }; | |
2212 | ||
75538c2b CW |
2213 | static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, |
2214 | struct net_device *dev) | |
2215 | { | |
2216 | info->dev = dev; | |
2217 | } | |
2218 | ||
351638e7 JP |
2219 | static inline struct net_device * |
2220 | netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) | |
2221 | { | |
2222 | return info->dev; | |
2223 | } | |
2224 | ||
f629d208 | 2225 | int call_netdevice_notifiers(unsigned long val, struct net_device *dev); |
dcfe1421 AW |
2226 | |
2227 | ||
1da177e4 LT |
2228 | extern rwlock_t dev_base_lock; /* Device list lock */ |
2229 | ||
881d966b EB |
2230 | #define for_each_netdev(net, d) \ |
2231 | list_for_each_entry(d, &(net)->dev_base_head, dev_list) | |
dcbccbd4 EB |
2232 | #define for_each_netdev_reverse(net, d) \ |
2233 | list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) | |
c6d14c84 ED |
2234 | #define for_each_netdev_rcu(net, d) \ |
2235 | list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) | |
881d966b EB |
2236 | #define for_each_netdev_safe(net, d, n) \ |
2237 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) | |
2238 | #define for_each_netdev_continue(net, d) \ | |
2239 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) | |
254245d2 | 2240 | #define for_each_netdev_continue_rcu(net, d) \ |
2241 | list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) | |
8a7fbfab | 2242 | #define for_each_netdev_in_bond_rcu(bond, slave) \ |
2243 | for_each_netdev_rcu(&init_net, slave) \ | |
4ccce02e | 2244 | if (netdev_master_upper_dev_get_rcu(slave) == (bond)) |
881d966b | 2245 | #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) |
7562f876 | 2246 | |
a050c33f DL |
2247 | static inline struct net_device *next_net_device(struct net_device *dev) |
2248 | { | |
2249 | struct list_head *lh; | |
2250 | struct net *net; | |
2251 | ||
c346dca1 | 2252 | net = dev_net(dev); |
a050c33f DL |
2253 | lh = dev->dev_list.next; |
2254 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | |
2255 | } | |
2256 | ||
ce81b76a ED |
2257 | static inline struct net_device *next_net_device_rcu(struct net_device *dev) |
2258 | { | |
2259 | struct list_head *lh; | |
2260 | struct net *net; | |
2261 | ||
2262 | net = dev_net(dev); | |
ccf43438 | 2263 | lh = rcu_dereference(list_next_rcu(&dev->dev_list)); |
ce81b76a ED |
2264 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); |
2265 | } | |
2266 | ||
a050c33f DL |
2267 | static inline struct net_device *first_net_device(struct net *net) |
2268 | { | |
2269 | return list_empty(&net->dev_base_head) ? NULL : | |
2270 | net_device_entry(net->dev_base_head.next); | |
2271 | } | |
7562f876 | 2272 | |
ccf43438 ED |
2273 | static inline struct net_device *first_net_device_rcu(struct net *net) |
2274 | { | |
2275 | struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); | |
2276 | ||
2277 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | |
2278 | } | |
2279 | ||
f629d208 JP |
2280 | int netdev_boot_setup_check(struct net_device *dev); |
2281 | unsigned long netdev_boot_base(const char *prefix, int unit); | |
2282 | struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, | |
2283 | const char *hwaddr); | |
2284 | struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); | |
2285 | struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); | |
2286 | void dev_add_pack(struct packet_type *pt); | |
2287 | void dev_remove_pack(struct packet_type *pt); | |
2288 | void __dev_remove_pack(struct packet_type *pt); | |
2289 | void dev_add_offload(struct packet_offload *po); | |
2290 | void dev_remove_offload(struct packet_offload *po); | |
f629d208 | 2291 | |
a54acb3a | 2292 | int dev_get_iflink(const struct net_device *dev); |
fc4099f1 | 2293 | int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); |
6c555490 WC |
2294 | struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, |
2295 | unsigned short mask); | |
f629d208 JP |
2296 | struct net_device *dev_get_by_name(struct net *net, const char *name); |
2297 | struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); | |
2298 | struct net_device *__dev_get_by_name(struct net *net, const char *name); | |
2299 | int dev_alloc_name(struct net_device *dev, const char *name); | |
2300 | int dev_open(struct net_device *dev); | |
2301 | int dev_close(struct net_device *dev); | |
99c4a26a | 2302 | int dev_close_many(struct list_head *head, bool unlink); |
f629d208 | 2303 | void dev_disable_lro(struct net_device *dev); |
0c4b51f0 | 2304 | int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); |
2b4aa3ce | 2305 | int dev_queue_xmit(struct sk_buff *skb); |
f663dd9a | 2306 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); |
f629d208 JP |
2307 | int register_netdevice(struct net_device *dev); |
2308 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); | |
2309 | void unregister_netdevice_many(struct list_head *head); | |
44a0873d ED |
2310 | static inline void unregister_netdevice(struct net_device *dev) |
2311 | { | |
2312 | unregister_netdevice_queue(dev, NULL); | |
2313 | } | |
2314 | ||
f629d208 JP |
2315 | int netdev_refcnt_read(const struct net_device *dev); |
2316 | void free_netdev(struct net_device *dev); | |
74d332c1 | 2317 | void netdev_freemem(struct net_device *dev); |
f629d208 JP |
2318 | void synchronize_net(void); |
2319 | int init_dummy_netdev(struct net_device *dev); | |
937f1ba5 | 2320 | |
f60e5990 | 2321 | DECLARE_PER_CPU(int, xmit_recursion); |
2322 | static inline int dev_recursion_level(void) | |
2323 | { | |
2324 | return this_cpu_read(xmit_recursion); | |
2325 | } | |
2326 | ||
f629d208 JP |
2327 | struct net_device *dev_get_by_index(struct net *net, int ifindex); |
2328 | struct net_device *__dev_get_by_index(struct net *net, int ifindex); | |
2329 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); | |
2330 | int netdev_get_name(struct net *net, char *name, int ifindex); | |
2331 | int dev_restart(struct net_device *dev); | |
f629d208 | 2332 | int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); |
86911732 HX |
2333 | |
2334 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) | |
2335 | { | |
2336 | return NAPI_GRO_CB(skb)->data_offset; | |
2337 | } | |
2338 | ||
2339 | static inline unsigned int skb_gro_len(const struct sk_buff *skb) | |
2340 | { | |
2341 | return skb->len - NAPI_GRO_CB(skb)->data_offset; | |
2342 | } | |
2343 | ||
2344 | static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) | |
2345 | { | |
2346 | NAPI_GRO_CB(skb)->data_offset += len; | |
2347 | } | |
2348 | ||
a5b1cf28 HX |
2349 | static inline void *skb_gro_header_fast(struct sk_buff *skb, |
2350 | unsigned int offset) | |
86911732 | 2351 | { |
a5b1cf28 HX |
2352 | return NAPI_GRO_CB(skb)->frag0 + offset; |
2353 | } | |
78a478d0 | 2354 | |
a5b1cf28 HX |
2355 | static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) |
2356 | { | |
2357 | return NAPI_GRO_CB(skb)->frag0_len < hlen; | |
2358 | } | |
78a478d0 | 2359 | |
a5b1cf28 HX |
2360 | static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, |
2361 | unsigned int offset) | |
2362 | { | |
17dd759c HX |
2363 | if (!pskb_may_pull(skb, hlen)) |
2364 | return NULL; | |
2365 | ||
a5b1cf28 HX |
2366 | NAPI_GRO_CB(skb)->frag0 = NULL; |
2367 | NAPI_GRO_CB(skb)->frag0_len = 0; | |
17dd759c | 2368 | return skb->data + offset; |
86911732 | 2369 | } |
1da177e4 | 2370 | |
36e7b1b8 HX |
2371 | static inline void *skb_gro_network_header(struct sk_buff *skb) |
2372 | { | |
78d3fd0b HX |
2373 | return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + |
2374 | skb_network_offset(skb); | |
36e7b1b8 HX |
2375 | } |
2376 | ||
bf5a755f JC |
2377 | static inline void skb_gro_postpull_rcsum(struct sk_buff *skb, |
2378 | const void *start, unsigned int len) | |
2379 | { | |
573e8fca | 2380 | if (NAPI_GRO_CB(skb)->csum_valid) |
bf5a755f JC |
2381 | NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum, |
2382 | csum_partial(start, len, 0)); | |
2383 | } | |
2384 | ||
573e8fca TH |
2385 | /* GRO checksum functions. These are logical equivalents of the normal |
2386 | * checksum functions (in skbuff.h) except that they operate on the GRO | |
2387 | * offsets and fields in sk_buff. | |
2388 | */ | |
2389 | ||
2390 | __sum16 __skb_gro_checksum_complete(struct sk_buff *skb); | |
2391 | ||
15e2396d TH |
2392 | static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb) |
2393 | { | |
b7fe10e5 | 2394 | return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb)); |
15e2396d TH |
2395 | } |
2396 | ||
573e8fca TH |
2397 | static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, |
2398 | bool zero_okay, | |
2399 | __sum16 check) | |
2400 | { | |
6edec0e6 TH |
2401 | return ((skb->ip_summed != CHECKSUM_PARTIAL || |
2402 | skb_checksum_start_offset(skb) < | |
2403 | skb_gro_offset(skb)) && | |
15e2396d | 2404 | !skb_at_gro_remcsum_start(skb) && |
662880f4 | 2405 | NAPI_GRO_CB(skb)->csum_cnt == 0 && |
573e8fca TH |
2406 | (!zero_okay || check)); |
2407 | } | |
2408 | ||
2409 | static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb, | |
2410 | __wsum psum) | |
2411 | { | |
2412 | if (NAPI_GRO_CB(skb)->csum_valid && | |
2413 | !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum))) | |
2414 | return 0; | |
2415 | ||
2416 | NAPI_GRO_CB(skb)->csum = psum; | |
2417 | ||
2418 | return __skb_gro_checksum_complete(skb); | |
2419 | } | |
2420 | ||
573e8fca TH |
2421 | static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) |
2422 | { | |
662880f4 TH |
2423 | if (NAPI_GRO_CB(skb)->csum_cnt > 0) { |
2424 | /* Consume a checksum from CHECKSUM_UNNECESSARY */ | |
2425 | NAPI_GRO_CB(skb)->csum_cnt--; | |
2426 | } else { | |
2427 | /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we | |
2428 | * verified a new top level checksum or an encapsulated one | |
2429 | * during GRO. This saves work if we fallback to normal path. | |
2430 | */ | |
2431 | __skb_incr_checksum_unnecessary(skb); | |
573e8fca TH |
2432 | } |
2433 | } | |
2434 | ||
2435 | #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \ | |
2436 | compute_pseudo) \ | |
2437 | ({ \ | |
2438 | __sum16 __ret = 0; \ | |
2439 | if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \ | |
2440 | __ret = __skb_gro_checksum_validate_complete(skb, \ | |
2441 | compute_pseudo(skb, proto)); \ | |
5a212329 TH |
2442 | if (__ret) \ |
2443 | __skb_mark_checksum_bad(skb); \ | |
2444 | else \ | |
573e8fca TH |
2445 | skb_gro_incr_csum_unnecessary(skb); \ |
2446 | __ret; \ | |
2447 | }) | |
2448 | ||
2449 | #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \ | |
2450 | __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo) | |
2451 | ||
2452 | #define skb_gro_checksum_validate_zero_check(skb, proto, check, \ | |
2453 | compute_pseudo) \ | |
2454 | __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo) | |
2455 | ||
2456 | #define skb_gro_checksum_simple_validate(skb) \ | |
2457 | __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo) | |
2458 | ||
d96535a1 TH |
2459 | static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb) |
2460 | { | |
2461 | return (NAPI_GRO_CB(skb)->csum_cnt == 0 && | |
2462 | !NAPI_GRO_CB(skb)->csum_valid); | |
2463 | } | |
2464 | ||
2465 | static inline void __skb_gro_checksum_convert(struct sk_buff *skb, | |
2466 | __sum16 check, __wsum pseudo) | |
2467 | { | |
2468 | NAPI_GRO_CB(skb)->csum = ~pseudo; | |
2469 | NAPI_GRO_CB(skb)->csum_valid = 1; | |
2470 | } | |
2471 | ||
2472 | #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \ | |
2473 | do { \ | |
2474 | if (__skb_gro_checksum_convert_check(skb)) \ | |
2475 | __skb_gro_checksum_convert(skb, check, \ | |
2476 | compute_pseudo(skb, proto)); \ | |
2477 | } while (0) | |
2478 | ||
26c4f7da TH |
2479 | struct gro_remcsum { |
2480 | int offset; | |
2481 | __wsum delta; | |
2482 | }; | |
2483 | ||
2484 | static inline void skb_gro_remcsum_init(struct gro_remcsum *grc) | |
2485 | { | |
846cd667 | 2486 | grc->offset = 0; |
26c4f7da TH |
2487 | grc->delta = 0; |
2488 | } | |
2489 | ||
b7fe10e5 TH |
2490 | static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr, |
2491 | unsigned int off, size_t hdrlen, | |
2492 | int start, int offset, | |
2493 | struct gro_remcsum *grc, | |
2494 | bool nopartial) | |
dcdc8994 TH |
2495 | { |
2496 | __wsum delta; | |
b7fe10e5 | 2497 | size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); |
dcdc8994 TH |
2498 | |
2499 | BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); | |
2500 | ||
15e2396d | 2501 | if (!nopartial) { |
b7fe10e5 TH |
2502 | NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start; |
2503 | return ptr; | |
2504 | } | |
2505 | ||
2506 | ptr = skb_gro_header_fast(skb, off); | |
2507 | if (skb_gro_header_hard(skb, off + plen)) { | |
2508 | ptr = skb_gro_header_slow(skb, off + plen, off); | |
2509 | if (!ptr) | |
2510 | return NULL; | |
15e2396d TH |
2511 | } |
2512 | ||
b7fe10e5 TH |
2513 | delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum, |
2514 | start, offset); | |
dcdc8994 TH |
2515 | |
2516 | /* Adjust skb->csum since we changed the packet */ | |
dcdc8994 | 2517 | NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); |
26c4f7da | 2518 | |
b7fe10e5 | 2519 | grc->offset = off + hdrlen + offset; |
26c4f7da | 2520 | grc->delta = delta; |
b7fe10e5 TH |
2521 | |
2522 | return ptr; | |
dcdc8994 TH |
2523 | } |
2524 | ||
26c4f7da TH |
2525 | static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, |
2526 | struct gro_remcsum *grc) | |
2527 | { | |
b7fe10e5 TH |
2528 | void *ptr; |
2529 | size_t plen = grc->offset + sizeof(u16); | |
2530 | ||
26c4f7da TH |
2531 | if (!grc->delta) |
2532 | return; | |
2533 | ||
b7fe10e5 TH |
2534 | ptr = skb_gro_header_fast(skb, grc->offset); |
2535 | if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) { | |
2536 | ptr = skb_gro_header_slow(skb, plen, grc->offset); | |
2537 | if (!ptr) | |
2538 | return; | |
2539 | } | |
2540 | ||
2541 | remcsum_unadjust((__sum16 *)ptr, grc->delta); | |
26c4f7da | 2542 | } |
dcdc8994 | 2543 | |
6ae23ad3 TH |
2544 | struct skb_csum_offl_spec { |
2545 | __u16 ipv4_okay:1, | |
2546 | ipv6_okay:1, | |
2547 | encap_okay:1, | |
2548 | ip_options_okay:1, | |
2549 | ext_hdrs_okay:1, | |
2550 | tcp_okay:1, | |
2551 | udp_okay:1, | |
2552 | sctp_okay:1, | |
2553 | vlan_okay:1, | |
2554 | no_encapped_ipv6:1, | |
2555 | no_not_encapped:1; | |
2556 | }; | |
2557 | ||
2558 | bool __skb_csum_offload_chk(struct sk_buff *skb, | |
2559 | const struct skb_csum_offl_spec *spec, | |
2560 | bool *csum_encapped, | |
2561 | bool csum_help); | |
2562 | ||
2563 | static inline bool skb_csum_offload_chk(struct sk_buff *skb, | |
2564 | const struct skb_csum_offl_spec *spec, | |
2565 | bool *csum_encapped, | |
2566 | bool csum_help) | |
2567 | { | |
2568 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
2569 | return false; | |
2570 | ||
2571 | return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help); | |
2572 | } | |
2573 | ||
2574 | static inline bool skb_csum_offload_chk_help(struct sk_buff *skb, | |
2575 | const struct skb_csum_offl_spec *spec) | |
2576 | { | |
2577 | bool csum_encapped; | |
2578 | ||
2579 | return skb_csum_offload_chk(skb, spec, &csum_encapped, true); | |
2580 | } | |
2581 | ||
2582 | static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb) | |
2583 | { | |
2584 | static const struct skb_csum_offl_spec csum_offl_spec = { | |
2585 | .ipv4_okay = 1, | |
2586 | .ip_options_okay = 1, | |
2587 | .ipv6_okay = 1, | |
2588 | .vlan_okay = 1, | |
2589 | .tcp_okay = 1, | |
2590 | .udp_okay = 1, | |
2591 | }; | |
2592 | ||
2593 | return skb_csum_offload_chk_help(skb, &csum_offl_spec); | |
2594 | } | |
2595 | ||
2596 | static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb) | |
2597 | { | |
2598 | static const struct skb_csum_offl_spec csum_offl_spec = { | |
2599 | .ipv4_okay = 1, | |
2600 | .ip_options_okay = 1, | |
2601 | .tcp_okay = 1, | |
2602 | .udp_okay = 1, | |
2603 | .vlan_okay = 1, | |
2604 | }; | |
2605 | ||
2606 | return skb_csum_offload_chk_help(skb, &csum_offl_spec); | |
2607 | } | |
2608 | ||
0c4e8581 SH |
2609 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
2610 | unsigned short type, | |
3b04ddde | 2611 | const void *daddr, const void *saddr, |
95c96174 | 2612 | unsigned int len) |
0c4e8581 | 2613 | { |
f1ecfd5d | 2614 | if (!dev->header_ops || !dev->header_ops->create) |
0c4e8581 | 2615 | return 0; |
3b04ddde SH |
2616 | |
2617 | return dev->header_ops->create(skb, dev, type, daddr, saddr, len); | |
0c4e8581 SH |
2618 | } |
2619 | ||
b95cce35 SH |
2620 | static inline int dev_parse_header(const struct sk_buff *skb, |
2621 | unsigned char *haddr) | |
2622 | { | |
2623 | const struct net_device *dev = skb->dev; | |
2624 | ||
1b83336b | 2625 | if (!dev->header_ops || !dev->header_ops->parse) |
b95cce35 | 2626 | return 0; |
3b04ddde | 2627 | return dev->header_ops->parse(skb, haddr); |
b95cce35 SH |
2628 | } |
2629 | ||
1da177e4 | 2630 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); |
f629d208 | 2631 | int register_gifconf(unsigned int family, gifconf_func_t *gifconf); |
1da177e4 LT |
2632 | static inline int unregister_gifconf(unsigned int family) |
2633 | { | |
2634 | return register_gifconf(family, NULL); | |
2635 | } | |
2636 | ||
99bbc707 | 2637 | #ifdef CONFIG_NET_FLOW_LIMIT |
5f121b9a | 2638 | #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ |
99bbc707 WB |
2639 | struct sd_flow_limit { |
2640 | u64 count; | |
2641 | unsigned int num_buckets; | |
2642 | unsigned int history_head; | |
2643 | u16 history[FLOW_LIMIT_HISTORY]; | |
2644 | u8 buckets[]; | |
2645 | }; | |
2646 | ||
2647 | extern int netdev_flow_limit_table_len; | |
2648 | #endif /* CONFIG_NET_FLOW_LIMIT */ | |
2649 | ||
1da177e4 | 2650 | /* |
88751275 | 2651 | * Incoming packets are placed on per-cpu queues |
1da177e4 | 2652 | */ |
d94d9fee | 2653 | struct softnet_data { |
1da177e4 | 2654 | struct list_head poll_list; |
6e7676c1 | 2655 | struct sk_buff_head process_queue; |
1da177e4 | 2656 | |
dee42870 | 2657 | /* stats */ |
cd7b5396 DM |
2658 | unsigned int processed; |
2659 | unsigned int time_squeeze; | |
2660 | unsigned int cpu_collision; | |
2661 | unsigned int received_rps; | |
fd793d89 | 2662 | #ifdef CONFIG_RPS |
88751275 | 2663 | struct softnet_data *rps_ipi_list; |
4cdb1e2e ED |
2664 | #endif |
2665 | #ifdef CONFIG_NET_FLOW_LIMIT | |
2666 | struct sd_flow_limit __rcu *flow_limit; | |
2667 | #endif | |
2668 | struct Qdisc *output_queue; | |
2669 | struct Qdisc **output_queue_tailp; | |
2670 | struct sk_buff *completion_queue; | |
88751275 | 2671 | |
4cdb1e2e | 2672 | #ifdef CONFIG_RPS |
88751275 | 2673 | /* Elements below can be accessed between CPUs for RPS */ |
0a9627f2 | 2674 | struct call_single_data csd ____cacheline_aligned_in_smp; |
88751275 ED |
2675 | struct softnet_data *rps_ipi_next; |
2676 | unsigned int cpu; | |
fec5e652 | 2677 | unsigned int input_queue_head; |
76cc8b13 | 2678 | unsigned int input_queue_tail; |
1e94d72f | 2679 | #endif |
95c96174 | 2680 | unsigned int dropped; |
0a9627f2 | 2681 | struct sk_buff_head input_pkt_queue; |
bea3348e | 2682 | struct napi_struct backlog; |
99bbc707 | 2683 | |
1da177e4 LT |
2684 | }; |
2685 | ||
76cc8b13 | 2686 | static inline void input_queue_head_incr(struct softnet_data *sd) |
fec5e652 TH |
2687 | { |
2688 | #ifdef CONFIG_RPS | |
76cc8b13 TH |
2689 | sd->input_queue_head++; |
2690 | #endif | |
2691 | } | |
2692 | ||
2693 | static inline void input_queue_tail_incr_save(struct softnet_data *sd, | |
2694 | unsigned int *qtail) | |
2695 | { | |
2696 | #ifdef CONFIG_RPS | |
2697 | *qtail = ++sd->input_queue_tail; | |
fec5e652 TH |
2698 | #endif |
2699 | } | |
2700 | ||
0a9627f2 | 2701 | DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
1da177e4 | 2702 | |
f629d208 | 2703 | void __netif_schedule(struct Qdisc *q); |
46e5da40 | 2704 | void netif_schedule_queue(struct netdev_queue *txq); |
86d804e1 | 2705 | |
fd2ea0a7 DM |
2706 | static inline void netif_tx_schedule_all(struct net_device *dev) |
2707 | { | |
2708 | unsigned int i; | |
2709 | ||
2710 | for (i = 0; i < dev->num_tx_queues; i++) | |
2711 | netif_schedule_queue(netdev_get_tx_queue(dev, i)); | |
2712 | } | |
2713 | ||
d29f749e DJ |
2714 | static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) |
2715 | { | |
73466498 | 2716 | clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
d29f749e DJ |
2717 | } |
2718 | ||
bea3348e SH |
2719 | /** |
2720 | * netif_start_queue - allow transmit | |
2721 | * @dev: network device | |
2722 | * | |
2723 | * Allow upper layers to call the device hard_start_xmit routine. | |
2724 | */ | |
1da177e4 LT |
2725 | static inline void netif_start_queue(struct net_device *dev) |
2726 | { | |
e8a0464c | 2727 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
2728 | } |
2729 | ||
fd2ea0a7 DM |
2730 | static inline void netif_tx_start_all_queues(struct net_device *dev) |
2731 | { | |
2732 | unsigned int i; | |
2733 | ||
2734 | for (i = 0; i < dev->num_tx_queues; i++) { | |
2735 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
2736 | netif_tx_start_queue(txq); | |
2737 | } | |
2738 | } | |
2739 | ||
46e5da40 | 2740 | void netif_tx_wake_queue(struct netdev_queue *dev_queue); |
79d16385 | 2741 | |
d29f749e DJ |
2742 | /** |
2743 | * netif_wake_queue - restart transmit | |
2744 | * @dev: network device | |
2745 | * | |
2746 | * Allow upper layers to call the device hard_start_xmit routine. | |
2747 | * Used for flow control when transmit resources are available. | |
2748 | */ | |
79d16385 DM |
2749 | static inline void netif_wake_queue(struct net_device *dev) |
2750 | { | |
e8a0464c | 2751 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
2752 | } |
2753 | ||
fd2ea0a7 DM |
2754 | static inline void netif_tx_wake_all_queues(struct net_device *dev) |
2755 | { | |
2756 | unsigned int i; | |
2757 | ||
2758 | for (i = 0; i < dev->num_tx_queues; i++) { | |
2759 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
2760 | netif_tx_wake_queue(txq); | |
2761 | } | |
2762 | } | |
2763 | ||
d29f749e DJ |
2764 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) |
2765 | { | |
73466498 | 2766 | set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
d29f749e DJ |
2767 | } |
2768 | ||
bea3348e SH |
2769 | /** |
2770 | * netif_stop_queue - stop transmitted packets | |
2771 | * @dev: network device | |
2772 | * | |
2773 | * Stop upper layers calling the device hard_start_xmit routine. | |
2774 | * Used for flow control when transmit resources are unavailable. | |
2775 | */ | |
1da177e4 LT |
2776 | static inline void netif_stop_queue(struct net_device *dev) |
2777 | { | |
e8a0464c | 2778 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
2779 | } |
2780 | ||
a2029240 | 2781 | void netif_tx_stop_all_queues(struct net_device *dev); |
fd2ea0a7 | 2782 | |
4d29515f | 2783 | static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) |
d29f749e | 2784 | { |
73466498 | 2785 | return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
d29f749e DJ |
2786 | } |
2787 | ||
bea3348e SH |
2788 | /** |
2789 | * netif_queue_stopped - test if transmit queue is flowblocked | |
2790 | * @dev: network device | |
2791 | * | |
2792 | * Test if transmit queue on device is currently unable to send. | |
2793 | */ | |
4d29515f | 2794 | static inline bool netif_queue_stopped(const struct net_device *dev) |
1da177e4 | 2795 | { |
e8a0464c | 2796 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
2797 | } |
2798 | ||
4d29515f | 2799 | static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) |
c3f26a26 | 2800 | { |
73466498 TH |
2801 | return dev_queue->state & QUEUE_STATE_ANY_XOFF; |
2802 | } | |
2803 | ||
8e2f1a63 DB |
2804 | static inline bool |
2805 | netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) | |
73466498 TH |
2806 | { |
2807 | return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; | |
2808 | } | |
2809 | ||
8e2f1a63 DB |
2810 | static inline bool |
2811 | netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) | |
2812 | { | |
2813 | return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; | |
2814 | } | |
2815 | ||
53511453 ED |
2816 | /** |
2817 | * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write | |
2818 | * @dev_queue: pointer to transmit queue | |
2819 | * | |
2820 | * BQL enabled drivers might use this helper in their ndo_start_xmit(), | |
2821 | * to give appropriate hint to the cpu. | |
2822 | */ | |
2823 | static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) | |
2824 | { | |
2825 | #ifdef CONFIG_BQL | |
2826 | prefetchw(&dev_queue->dql.num_queued); | |
2827 | #endif | |
2828 | } | |
2829 | ||
2830 | /** | |
2831 | * netdev_txq_bql_complete_prefetchw - prefetch bql data for write | |
2832 | * @dev_queue: pointer to transmit queue | |
2833 | * | |
2834 | * BQL enabled drivers might use this helper in their TX completion path, | |
2835 | * to give appropriate hint to the cpu. | |
2836 | */ | |
2837 | static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) | |
2838 | { | |
2839 | #ifdef CONFIG_BQL | |
2840 | prefetchw(&dev_queue->dql.limit); | |
2841 | #endif | |
2842 | } | |
2843 | ||
c5d67bd7 TH |
2844 | static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, |
2845 | unsigned int bytes) | |
2846 | { | |
114cf580 TH |
2847 | #ifdef CONFIG_BQL |
2848 | dql_queued(&dev_queue->dql, bytes); | |
b37c0fbe AD |
2849 | |
2850 | if (likely(dql_avail(&dev_queue->dql) >= 0)) | |
2851 | return; | |
2852 | ||
2853 | set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); | |
2854 | ||
2855 | /* | |
2856 | * The XOFF flag must be set before checking the dql_avail below, | |
2857 | * because in netdev_tx_completed_queue we update the dql_completed | |
2858 | * before checking the XOFF flag. | |
2859 | */ | |
2860 | smp_mb(); | |
2861 | ||
2862 | /* check again in case another CPU has just made room avail */ | |
2863 | if (unlikely(dql_avail(&dev_queue->dql) >= 0)) | |
2864 | clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); | |
114cf580 | 2865 | #endif |
c5d67bd7 TH |
2866 | } |
2867 | ||
0042d0c8 FF |
2868 | /** |
2869 | * netdev_sent_queue - report the number of bytes queued to hardware | |
2870 | * @dev: network device | |
2871 | * @bytes: number of bytes queued to the hardware device queue | |
2872 | * | |
2873 | * Report the number of bytes queued for sending/completion to the network | |
2874 | * device hardware queue. @bytes should be a good approximation and should | |
2875 | * exactly match netdev_completed_queue() @bytes | |
2876 | */ | |
c5d67bd7 TH |
2877 | static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) |
2878 | { | |
2879 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); | |
2880 | } | |
2881 | ||
2882 | static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, | |
95c96174 | 2883 | unsigned int pkts, unsigned int bytes) |
c5d67bd7 | 2884 | { |
114cf580 | 2885 | #ifdef CONFIG_BQL |
b37c0fbe AD |
2886 | if (unlikely(!bytes)) |
2887 | return; | |
2888 | ||
2889 | dql_completed(&dev_queue->dql, bytes); | |
2890 | ||
2891 | /* | |
2892 | * Without the memory barrier there is a small possiblity that | |
2893 | * netdev_tx_sent_queue will miss the update and cause the queue to | |
2894 | * be stopped forever | |
2895 | */ | |
2896 | smp_mb(); | |
2897 | ||
2898 | if (dql_avail(&dev_queue->dql) < 0) | |
2899 | return; | |
2900 | ||
2901 | if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) | |
2902 | netif_schedule_queue(dev_queue); | |
114cf580 | 2903 | #endif |
c5d67bd7 TH |
2904 | } |
2905 | ||
0042d0c8 FF |
2906 | /** |
2907 | * netdev_completed_queue - report bytes and packets completed by device | |
2908 | * @dev: network device | |
2909 | * @pkts: actual number of packets sent over the medium | |
2910 | * @bytes: actual number of bytes sent over the medium | |
2911 | * | |
2912 | * Report the number of bytes and packets transmitted by the network device | |
2913 | * hardware queue over the physical medium, @bytes must exactly match the | |
2914 | * @bytes amount passed to netdev_sent_queue() | |
2915 | */ | |
c5d67bd7 | 2916 | static inline void netdev_completed_queue(struct net_device *dev, |
95c96174 | 2917 | unsigned int pkts, unsigned int bytes) |
c5d67bd7 TH |
2918 | { |
2919 | netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); | |
2920 | } | |
2921 | ||
2922 | static inline void netdev_tx_reset_queue(struct netdev_queue *q) | |
2923 | { | |
114cf580 | 2924 | #ifdef CONFIG_BQL |
5c490354 | 2925 | clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); |
114cf580 TH |
2926 | dql_reset(&q->dql); |
2927 | #endif | |
c5d67bd7 TH |
2928 | } |
2929 | ||
0042d0c8 FF |
2930 | /** |
2931 | * netdev_reset_queue - reset the packets and bytes count of a network device | |
2932 | * @dev_queue: network device | |
2933 | * | |
2934 | * Reset the bytes and packet count of a network device and clear the | |
2935 | * software flow control OFF bit for this network device | |
2936 | */ | |
c5d67bd7 TH |
2937 | static inline void netdev_reset_queue(struct net_device *dev_queue) |
2938 | { | |
2939 | netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); | |
c3f26a26 DM |
2940 | } |
2941 | ||
b9507bda DB |
2942 | /** |
2943 | * netdev_cap_txqueue - check if selected tx queue exceeds device queues | |
2944 | * @dev: network device | |
2945 | * @queue_index: given tx queue index | |
2946 | * | |
2947 | * Returns 0 if given tx queue index >= number of device tx queues, | |
2948 | * otherwise returns the originally passed tx queue index. | |
2949 | */ | |
2950 | static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) | |
2951 | { | |
2952 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { | |
2953 | net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", | |
2954 | dev->name, queue_index, | |
2955 | dev->real_num_tx_queues); | |
2956 | return 0; | |
2957 | } | |
2958 | ||
2959 | return queue_index; | |
2960 | } | |
2961 | ||
bea3348e SH |
2962 | /** |
2963 | * netif_running - test if up | |
2964 | * @dev: network device | |
2965 | * | |
2966 | * Test if the device has been brought up. | |
2967 | */ | |
4d29515f | 2968 | static inline bool netif_running(const struct net_device *dev) |
1da177e4 LT |
2969 | { |
2970 | return test_bit(__LINK_STATE_START, &dev->state); | |
2971 | } | |
2972 | ||
f25f4e44 PWJ |
2973 | /* |
2974 | * Routines to manage the subqueues on a device. We only need start | |
2975 | * stop, and a check if it's stopped. All other device management is | |
2976 | * done at the overall netdevice level. | |
2977 | * Also test the device if we're multiqueue. | |
2978 | */ | |
bea3348e SH |
2979 | |
2980 | /** | |
2981 | * netif_start_subqueue - allow sending packets on subqueue | |
2982 | * @dev: network device | |
2983 | * @queue_index: sub queue index | |
2984 | * | |
2985 | * Start individual transmit queue of a device with multiple transmit queues. | |
2986 | */ | |
f25f4e44 PWJ |
2987 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
2988 | { | |
fd2ea0a7 | 2989 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
7b3d3e4f KK |
2990 | |
2991 | netif_tx_start_queue(txq); | |
f25f4e44 PWJ |
2992 | } |
2993 | ||
bea3348e SH |
2994 | /** |
2995 | * netif_stop_subqueue - stop sending packets on subqueue | |
2996 | * @dev: network device | |
2997 | * @queue_index: sub queue index | |
2998 | * | |
2999 | * Stop individual transmit queue of a device with multiple transmit queues. | |
3000 | */ | |
f25f4e44 PWJ |
3001 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) |
3002 | { | |
fd2ea0a7 | 3003 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
7b3d3e4f | 3004 | netif_tx_stop_queue(txq); |
f25f4e44 PWJ |
3005 | } |
3006 | ||
bea3348e SH |
3007 | /** |
3008 | * netif_subqueue_stopped - test status of subqueue | |
3009 | * @dev: network device | |
3010 | * @queue_index: sub queue index | |
3011 | * | |
3012 | * Check individual transmit queue of a device with multiple transmit queues. | |
3013 | */ | |
4d29515f DM |
3014 | static inline bool __netif_subqueue_stopped(const struct net_device *dev, |
3015 | u16 queue_index) | |
f25f4e44 | 3016 | { |
fd2ea0a7 | 3017 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
7b3d3e4f KK |
3018 | |
3019 | return netif_tx_queue_stopped(txq); | |
f25f4e44 PWJ |
3020 | } |
3021 | ||
4d29515f DM |
3022 | static inline bool netif_subqueue_stopped(const struct net_device *dev, |
3023 | struct sk_buff *skb) | |
668f895a PE |
3024 | { |
3025 | return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); | |
3026 | } | |
bea3348e | 3027 | |
46e5da40 | 3028 | void netif_wake_subqueue(struct net_device *dev, u16 queue_index); |
f25f4e44 | 3029 | |
537c00de | 3030 | #ifdef CONFIG_XPS |
53af53ae | 3031 | int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, |
f629d208 | 3032 | u16 index); |
537c00de AD |
3033 | #else |
3034 | static inline int netif_set_xps_queue(struct net_device *dev, | |
3573540c | 3035 | const struct cpumask *mask, |
537c00de AD |
3036 | u16 index) |
3037 | { | |
3038 | return 0; | |
3039 | } | |
3040 | #endif | |
3041 | ||
5605c762 JP |
3042 | u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, |
3043 | unsigned int num_tx_queues); | |
3044 | ||
a3d22a68 VZ |
3045 | /* |
3046 | * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used | |
3047 | * as a distribution range limit for the returned value. | |
3048 | */ | |
3049 | static inline u16 skb_tx_hash(const struct net_device *dev, | |
0e001614 | 3050 | struct sk_buff *skb) |
a3d22a68 VZ |
3051 | { |
3052 | return __skb_tx_hash(dev, skb, dev->real_num_tx_queues); | |
3053 | } | |
3054 | ||
bea3348e SH |
3055 | /** |
3056 | * netif_is_multiqueue - test if device has multiple transmit queues | |
3057 | * @dev: network device | |
3058 | * | |
3059 | * Check if device has multiple transmit queues | |
bea3348e | 3060 | */ |
4d29515f | 3061 | static inline bool netif_is_multiqueue(const struct net_device *dev) |
f25f4e44 | 3062 | { |
a02cec21 | 3063 | return dev->num_tx_queues > 1; |
f25f4e44 | 3064 | } |
1da177e4 | 3065 | |
f629d208 | 3066 | int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); |
f0796d5c | 3067 | |
a953be53 | 3068 | #ifdef CONFIG_SYSFS |
f629d208 | 3069 | int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); |
62fe0b40 BH |
3070 | #else |
3071 | static inline int netif_set_real_num_rx_queues(struct net_device *dev, | |
3072 | unsigned int rxq) | |
3073 | { | |
3074 | return 0; | |
3075 | } | |
3076 | #endif | |
3077 | ||
a953be53 MD |
3078 | #ifdef CONFIG_SYSFS |
3079 | static inline unsigned int get_netdev_rx_queue_index( | |
3080 | struct netdev_rx_queue *queue) | |
3081 | { | |
3082 | struct net_device *dev = queue->dev; | |
3083 | int index = queue - dev->_rx; | |
3084 | ||
3085 | BUG_ON(index >= dev->num_rx_queues); | |
3086 | return index; | |
3087 | } | |
3088 | #endif | |
3089 | ||
16917b87 | 3090 | #define DEFAULT_MAX_NUM_RSS_QUEUES (8) |
f629d208 | 3091 | int netif_get_num_default_rss_queues(void); |
16917b87 | 3092 | |
e6247027 ED |
3093 | enum skb_free_reason { |
3094 | SKB_REASON_CONSUMED, | |
3095 | SKB_REASON_DROPPED, | |
3096 | }; | |
3097 | ||
3098 | void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason); | |
3099 | void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); | |
1da177e4 | 3100 | |
e6247027 ED |
3101 | /* |
3102 | * It is not allowed to call kfree_skb() or consume_skb() from hardware | |
3103 | * interrupt context or with hardware interrupts being disabled. | |
3104 | * (in_irq() || irqs_disabled()) | |
3105 | * | |
3106 | * We provide four helpers that can be used in following contexts : | |
3107 | * | |
3108 | * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, | |
3109 | * replacing kfree_skb(skb) | |
3110 | * | |
3111 | * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. | |
3112 | * Typically used in place of consume_skb(skb) in TX completion path | |
3113 | * | |
3114 | * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, | |
3115 | * replacing kfree_skb(skb) | |
3116 | * | |
3117 | * dev_consume_skb_any(skb) when caller doesn't know its current irq context, | |
3118 | * and consumed a packet. Used in place of consume_skb(skb) | |
1da177e4 | 3119 | */ |
e6247027 ED |
3120 | static inline void dev_kfree_skb_irq(struct sk_buff *skb) |
3121 | { | |
3122 | __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED); | |
3123 | } | |
3124 | ||
3125 | static inline void dev_consume_skb_irq(struct sk_buff *skb) | |
3126 | { | |
3127 | __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED); | |
3128 | } | |
3129 | ||
3130 | static inline void dev_kfree_skb_any(struct sk_buff *skb) | |
3131 | { | |
3132 | __dev_kfree_skb_any(skb, SKB_REASON_DROPPED); | |
3133 | } | |
3134 | ||
3135 | static inline void dev_consume_skb_any(struct sk_buff *skb) | |
3136 | { | |
3137 | __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); | |
3138 | } | |
1da177e4 | 3139 | |
f629d208 JP |
3140 | int netif_rx(struct sk_buff *skb); |
3141 | int netif_rx_ni(struct sk_buff *skb); | |
04eb4489 | 3142 | int netif_receive_skb(struct sk_buff *skb); |
f629d208 JP |
3143 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); |
3144 | void napi_gro_flush(struct napi_struct *napi, bool flush_old); | |
3145 | struct sk_buff *napi_get_frags(struct napi_struct *napi); | |
3146 | gro_result_t napi_gro_frags(struct napi_struct *napi); | |
bf5a755f JC |
3147 | struct packet_offload *gro_find_receive_by_type(__be16 type); |
3148 | struct packet_offload *gro_find_complete_by_type(__be16 type); | |
76620aaf HX |
3149 | |
3150 | static inline void napi_free_frags(struct napi_struct *napi) | |
3151 | { | |
3152 | kfree_skb(napi->skb); | |
3153 | napi->skb = NULL; | |
3154 | } | |
3155 | ||
f629d208 JP |
3156 | int netdev_rx_handler_register(struct net_device *dev, |
3157 | rx_handler_func_t *rx_handler, | |
3158 | void *rx_handler_data); | |
3159 | void netdev_rx_handler_unregister(struct net_device *dev); | |
3160 | ||
3161 | bool dev_valid_name(const char *name); | |
3162 | int dev_ioctl(struct net *net, unsigned int cmd, void __user *); | |
3163 | int dev_ethtool(struct net *net, struct ifreq *); | |
3164 | unsigned int dev_get_flags(const struct net_device *); | |
3165 | int __dev_change_flags(struct net_device *, unsigned int flags); | |
3166 | int dev_change_flags(struct net_device *, unsigned int); | |
cb178190 DM |
3167 | void __dev_notify_flags(struct net_device *, unsigned int old_flags, |
3168 | unsigned int gchanges); | |
f629d208 JP |
3169 | int dev_change_name(struct net_device *, const char *); |
3170 | int dev_set_alias(struct net_device *, const char *, size_t); | |
3171 | int dev_change_net_namespace(struct net_device *, struct net *, const char *); | |
3172 | int dev_set_mtu(struct net_device *, int); | |
3173 | void dev_set_group(struct net_device *, int); | |
3174 | int dev_set_mac_address(struct net_device *, struct sockaddr *); | |
3175 | int dev_change_carrier(struct net_device *, bool new_carrier); | |
3176 | int dev_get_phys_port_id(struct net_device *dev, | |
02637fce | 3177 | struct netdev_phys_item_id *ppid); |
db24a904 DA |
3178 | int dev_get_phys_port_name(struct net_device *dev, |
3179 | char *name, size_t len); | |
d746d707 | 3180 | int dev_change_proto_down(struct net_device *dev, bool proto_down); |
55a93b3e | 3181 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); |
ce93718f DM |
3182 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
3183 | struct netdev_queue *txq, int *ret); | |
a0265d28 | 3184 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
f629d208 | 3185 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
1ee481fb | 3186 | bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); |
1da177e4 | 3187 | |
20380731 | 3188 | extern int netdev_budget; |
1da177e4 LT |
3189 | |
3190 | /* Called by rtnetlink.c:rtnl_unlock() */ | |
f629d208 | 3191 | void netdev_run_todo(void); |
1da177e4 | 3192 | |
bea3348e SH |
3193 | /** |
3194 | * dev_put - release reference to device | |
3195 | * @dev: network device | |
3196 | * | |
9ef4429b | 3197 | * Release reference to device to allow it to be freed. |
bea3348e | 3198 | */ |
1da177e4 LT |
3199 | static inline void dev_put(struct net_device *dev) |
3200 | { | |
933393f5 | 3201 | this_cpu_dec(*dev->pcpu_refcnt); |
1da177e4 LT |
3202 | } |
3203 | ||
bea3348e SH |
3204 | /** |
3205 | * dev_hold - get reference to device | |
3206 | * @dev: network device | |
3207 | * | |
9ef4429b | 3208 | * Hold reference to device to keep it from being freed. |
bea3348e | 3209 | */ |
15333061 SH |
3210 | static inline void dev_hold(struct net_device *dev) |
3211 | { | |
933393f5 | 3212 | this_cpu_inc(*dev->pcpu_refcnt); |
15333061 | 3213 | } |
1da177e4 LT |
3214 | |
3215 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on | |
3216 | * and _off may be called from IRQ context, but it is caller | |
3217 | * who is responsible for serialization of these calls. | |
b00055aa SR |
3218 | * |
3219 | * The name carrier is inappropriate, these functions should really be | |
3220 | * called netif_lowerlayer_*() because they represent the state of any | |
3221 | * kind of lower layer not just hardware media. | |
1da177e4 LT |
3222 | */ |
3223 | ||
f629d208 JP |
3224 | void linkwatch_init_dev(struct net_device *dev); |
3225 | void linkwatch_fire_event(struct net_device *dev); | |
3226 | void linkwatch_forget_dev(struct net_device *dev); | |
1da177e4 | 3227 | |
bea3348e SH |
3228 | /** |
3229 | * netif_carrier_ok - test if carrier present | |
3230 | * @dev: network device | |
3231 | * | |
3232 | * Check if carrier is present on device | |
3233 | */ | |
4d29515f | 3234 | static inline bool netif_carrier_ok(const struct net_device *dev) |
1da177e4 LT |
3235 | { |
3236 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); | |
3237 | } | |
3238 | ||
f629d208 | 3239 | unsigned long dev_trans_start(struct net_device *dev); |
9d21493b | 3240 | |
f629d208 | 3241 | void __netdev_watchdog_up(struct net_device *dev); |
1da177e4 | 3242 | |
f629d208 | 3243 | void netif_carrier_on(struct net_device *dev); |
1da177e4 | 3244 | |
f629d208 | 3245 | void netif_carrier_off(struct net_device *dev); |
1da177e4 | 3246 | |
bea3348e SH |
3247 | /** |
3248 | * netif_dormant_on - mark device as dormant. | |
3249 | * @dev: network device | |
3250 | * | |
3251 | * Mark device as dormant (as per RFC2863). | |
3252 | * | |
3253 | * The dormant state indicates that the relevant interface is not | |
3254 | * actually in a condition to pass packets (i.e., it is not 'up') but is | |
3255 | * in a "pending" state, waiting for some external event. For "on- | |
3256 | * demand" interfaces, this new state identifies the situation where the | |
3257 | * interface is waiting for events to place it in the up state. | |
3258 | * | |
3259 | */ | |
b00055aa SR |
3260 | static inline void netif_dormant_on(struct net_device *dev) |
3261 | { | |
3262 | if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) | |
3263 | linkwatch_fire_event(dev); | |
3264 | } | |
3265 | ||
bea3348e SH |
3266 | /** |
3267 | * netif_dormant_off - set device as not dormant. | |
3268 | * @dev: network device | |
3269 | * | |
3270 | * Device is not in dormant state. | |
3271 | */ | |
b00055aa SR |
3272 | static inline void netif_dormant_off(struct net_device *dev) |
3273 | { | |
3274 | if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) | |
3275 | linkwatch_fire_event(dev); | |
3276 | } | |
3277 | ||
bea3348e SH |
3278 | /** |
3279 | * netif_dormant - test if carrier present | |
3280 | * @dev: network device | |
3281 | * | |
3282 | * Check if carrier is present on device | |
3283 | */ | |
4d29515f | 3284 | static inline bool netif_dormant(const struct net_device *dev) |
b00055aa SR |
3285 | { |
3286 | return test_bit(__LINK_STATE_DORMANT, &dev->state); | |
3287 | } | |
3288 | ||
3289 | ||
bea3348e SH |
3290 | /** |
3291 | * netif_oper_up - test if device is operational | |
3292 | * @dev: network device | |
3293 | * | |
3294 | * Check if carrier is operational | |
3295 | */ | |
4d29515f | 3296 | static inline bool netif_oper_up(const struct net_device *dev) |
d94d9fee | 3297 | { |
b00055aa SR |
3298 | return (dev->operstate == IF_OPER_UP || |
3299 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); | |
3300 | } | |
3301 | ||
bea3348e SH |
3302 | /** |
3303 | * netif_device_present - is device available or removed | |
3304 | * @dev: network device | |
3305 | * | |
3306 | * Check if device has not been removed from system. | |
3307 | */ | |
4d29515f | 3308 | static inline bool netif_device_present(struct net_device *dev) |
1da177e4 LT |
3309 | { |
3310 | return test_bit(__LINK_STATE_PRESENT, &dev->state); | |
3311 | } | |
3312 | ||
f629d208 | 3313 | void netif_device_detach(struct net_device *dev); |
1da177e4 | 3314 | |
f629d208 | 3315 | void netif_device_attach(struct net_device *dev); |
1da177e4 LT |
3316 | |
3317 | /* | |
3318 | * Network interface message level settings | |
3319 | */ | |
1da177e4 LT |
3320 | |
3321 | enum { | |
3322 | NETIF_MSG_DRV = 0x0001, | |
3323 | NETIF_MSG_PROBE = 0x0002, | |
3324 | NETIF_MSG_LINK = 0x0004, | |
3325 | NETIF_MSG_TIMER = 0x0008, | |
3326 | NETIF_MSG_IFDOWN = 0x0010, | |
3327 | NETIF_MSG_IFUP = 0x0020, | |
3328 | NETIF_MSG_RX_ERR = 0x0040, | |
3329 | NETIF_MSG_TX_ERR = 0x0080, | |
3330 | NETIF_MSG_TX_QUEUED = 0x0100, | |
3331 | NETIF_MSG_INTR = 0x0200, | |
3332 | NETIF_MSG_TX_DONE = 0x0400, | |
3333 | NETIF_MSG_RX_STATUS = 0x0800, | |
3334 | NETIF_MSG_PKTDATA = 0x1000, | |
3335 | NETIF_MSG_HW = 0x2000, | |
3336 | NETIF_MSG_WOL = 0x4000, | |
3337 | }; | |
3338 | ||
3339 | #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) | |
3340 | #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) | |
3341 | #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) | |
3342 | #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) | |
3343 | #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) | |
3344 | #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) | |
3345 | #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) | |
3346 | #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) | |
3347 | #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) | |
3348 | #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) | |
3349 | #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) | |
3350 | #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) | |
3351 | #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) | |
3352 | #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) | |
3353 | #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) | |
3354 | ||
3355 | static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |
3356 | { | |
3357 | /* use default */ | |
3358 | if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) | |
3359 | return default_msg_enable_bits; | |
3360 | if (debug_value == 0) /* no output */ | |
3361 | return 0; | |
3362 | /* set low N bits */ | |
3363 | return (1 << debug_value) - 1; | |
3364 | } | |
3365 | ||
c773e847 | 3366 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
932ff279 | 3367 | { |
c773e847 DM |
3368 | spin_lock(&txq->_xmit_lock); |
3369 | txq->xmit_lock_owner = cpu; | |
22dd7495 JHS |
3370 | } |
3371 | ||
fd2ea0a7 DM |
3372 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) |
3373 | { | |
3374 | spin_lock_bh(&txq->_xmit_lock); | |
3375 | txq->xmit_lock_owner = smp_processor_id(); | |
3376 | } | |
3377 | ||
4d29515f | 3378 | static inline bool __netif_tx_trylock(struct netdev_queue *txq) |
c3f26a26 | 3379 | { |
4d29515f | 3380 | bool ok = spin_trylock(&txq->_xmit_lock); |
c3f26a26 DM |
3381 | if (likely(ok)) |
3382 | txq->xmit_lock_owner = smp_processor_id(); | |
3383 | return ok; | |
3384 | } | |
3385 | ||
3386 | static inline void __netif_tx_unlock(struct netdev_queue *txq) | |
3387 | { | |
3388 | txq->xmit_lock_owner = -1; | |
3389 | spin_unlock(&txq->_xmit_lock); | |
3390 | } | |
3391 | ||
3392 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | |
3393 | { | |
3394 | txq->xmit_lock_owner = -1; | |
3395 | spin_unlock_bh(&txq->_xmit_lock); | |
3396 | } | |
3397 | ||
08baf561 ED |
3398 | static inline void txq_trans_update(struct netdev_queue *txq) |
3399 | { | |
3400 | if (txq->xmit_lock_owner != -1) | |
3401 | txq->trans_start = jiffies; | |
3402 | } | |
3403 | ||
d29f749e DJ |
3404 | /** |
3405 | * netif_tx_lock - grab network device transmit lock | |
3406 | * @dev: network device | |
d29f749e DJ |
3407 | * |
3408 | * Get network device transmit lock | |
3409 | */ | |
22dd7495 JHS |
3410 | static inline void netif_tx_lock(struct net_device *dev) |
3411 | { | |
e8a0464c | 3412 | unsigned int i; |
c3f26a26 | 3413 | int cpu; |
c773e847 | 3414 | |
c3f26a26 DM |
3415 | spin_lock(&dev->tx_global_lock); |
3416 | cpu = smp_processor_id(); | |
e8a0464c DM |
3417 | for (i = 0; i < dev->num_tx_queues; i++) { |
3418 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c3f26a26 DM |
3419 | |
3420 | /* We are the only thread of execution doing a | |
3421 | * freeze, but we have to grab the _xmit_lock in | |
3422 | * order to synchronize with threads which are in | |
3423 | * the ->hard_start_xmit() handler and already | |
3424 | * checked the frozen bit. | |
3425 | */ | |
e8a0464c | 3426 | __netif_tx_lock(txq, cpu); |
c3f26a26 DM |
3427 | set_bit(__QUEUE_STATE_FROZEN, &txq->state); |
3428 | __netif_tx_unlock(txq); | |
e8a0464c | 3429 | } |
932ff279 HX |
3430 | } |
3431 | ||
3432 | static inline void netif_tx_lock_bh(struct net_device *dev) | |
3433 | { | |
e8a0464c DM |
3434 | local_bh_disable(); |
3435 | netif_tx_lock(dev); | |
932ff279 HX |
3436 | } |
3437 | ||
932ff279 HX |
3438 | static inline void netif_tx_unlock(struct net_device *dev) |
3439 | { | |
e8a0464c DM |
3440 | unsigned int i; |
3441 | ||
3442 | for (i = 0; i < dev->num_tx_queues; i++) { | |
3443 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c773e847 | 3444 | |
c3f26a26 DM |
3445 | /* No need to grab the _xmit_lock here. If the |
3446 | * queue is not stopped for another reason, we | |
3447 | * force a schedule. | |
3448 | */ | |
3449 | clear_bit(__QUEUE_STATE_FROZEN, &txq->state); | |
7b3d3e4f | 3450 | netif_schedule_queue(txq); |
c3f26a26 DM |
3451 | } |
3452 | spin_unlock(&dev->tx_global_lock); | |
932ff279 HX |
3453 | } |
3454 | ||
3455 | static inline void netif_tx_unlock_bh(struct net_device *dev) | |
3456 | { | |
e8a0464c DM |
3457 | netif_tx_unlock(dev); |
3458 | local_bh_enable(); | |
932ff279 HX |
3459 | } |
3460 | ||
c773e847 | 3461 | #define HARD_TX_LOCK(dev, txq, cpu) { \ |
22dd7495 | 3462 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
c773e847 | 3463 | __netif_tx_lock(txq, cpu); \ |
22dd7495 JHS |
3464 | } \ |
3465 | } | |
3466 | ||
5efeac44 EB |
3467 | #define HARD_TX_TRYLOCK(dev, txq) \ |
3468 | (((dev->features & NETIF_F_LLTX) == 0) ? \ | |
3469 | __netif_tx_trylock(txq) : \ | |
3470 | true ) | |
3471 | ||
c773e847 | 3472 | #define HARD_TX_UNLOCK(dev, txq) { \ |
22dd7495 | 3473 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
c773e847 | 3474 | __netif_tx_unlock(txq); \ |
22dd7495 JHS |
3475 | } \ |
3476 | } | |
3477 | ||
1da177e4 LT |
3478 | static inline void netif_tx_disable(struct net_device *dev) |
3479 | { | |
fd2ea0a7 | 3480 | unsigned int i; |
c3f26a26 | 3481 | int cpu; |
fd2ea0a7 | 3482 | |
c3f26a26 DM |
3483 | local_bh_disable(); |
3484 | cpu = smp_processor_id(); | |
fd2ea0a7 DM |
3485 | for (i = 0; i < dev->num_tx_queues; i++) { |
3486 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c3f26a26 DM |
3487 | |
3488 | __netif_tx_lock(txq, cpu); | |
fd2ea0a7 | 3489 | netif_tx_stop_queue(txq); |
c3f26a26 | 3490 | __netif_tx_unlock(txq); |
fd2ea0a7 | 3491 | } |
c3f26a26 | 3492 | local_bh_enable(); |
1da177e4 LT |
3493 | } |
3494 | ||
e308a5d8 DM |
3495 | static inline void netif_addr_lock(struct net_device *dev) |
3496 | { | |
3497 | spin_lock(&dev->addr_list_lock); | |
3498 | } | |
3499 | ||
2429f7ac JP |
3500 | static inline void netif_addr_lock_nested(struct net_device *dev) |
3501 | { | |
25175ba5 VY |
3502 | int subclass = SINGLE_DEPTH_NESTING; |
3503 | ||
3504 | if (dev->netdev_ops->ndo_get_lock_subclass) | |
3505 | subclass = dev->netdev_ops->ndo_get_lock_subclass(dev); | |
3506 | ||
3507 | spin_lock_nested(&dev->addr_list_lock, subclass); | |
2429f7ac JP |
3508 | } |
3509 | ||
e308a5d8 DM |
3510 | static inline void netif_addr_lock_bh(struct net_device *dev) |
3511 | { | |
3512 | spin_lock_bh(&dev->addr_list_lock); | |
3513 | } | |
3514 | ||
3515 | static inline void netif_addr_unlock(struct net_device *dev) | |
3516 | { | |
3517 | spin_unlock(&dev->addr_list_lock); | |
3518 | } | |
3519 | ||
3520 | static inline void netif_addr_unlock_bh(struct net_device *dev) | |
3521 | { | |
3522 | spin_unlock_bh(&dev->addr_list_lock); | |
3523 | } | |
3524 | ||
f001fde5 | 3525 | /* |
31278e71 | 3526 | * dev_addrs walker. Should be used only for read access. Call with |
f001fde5 JP |
3527 | * rcu_read_lock held. |
3528 | */ | |
3529 | #define for_each_dev_addr(dev, ha) \ | |
31278e71 | 3530 | list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) |
f001fde5 | 3531 | |
1da177e4 LT |
3532 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ |
3533 | ||
f629d208 | 3534 | void ether_setup(struct net_device *dev); |
1da177e4 LT |
3535 | |
3536 | /* Support for loadable net-drivers */ | |
f629d208 | 3537 | struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, |
c835a677 | 3538 | unsigned char name_assign_type, |
f629d208 JP |
3539 | void (*setup)(struct net_device *), |
3540 | unsigned int txqs, unsigned int rxqs); | |
c835a677 TG |
3541 | #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ |
3542 | alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) | |
36909ea4 | 3543 | |
c835a677 TG |
3544 | #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ |
3545 | alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ | |
3546 | count) | |
36909ea4 | 3547 | |
f629d208 JP |
3548 | int register_netdev(struct net_device *dev); |
3549 | void unregister_netdev(struct net_device *dev); | |
f001fde5 | 3550 | |
22bedad3 | 3551 | /* General hardware address lists handling functions */ |
f629d208 JP |
3552 | int __hw_addr_sync(struct netdev_hw_addr_list *to_list, |
3553 | struct netdev_hw_addr_list *from_list, int addr_len); | |
3554 | void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | |
3555 | struct netdev_hw_addr_list *from_list, int addr_len); | |
670e5b8e AD |
3556 | int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, |
3557 | struct net_device *dev, | |
3558 | int (*sync)(struct net_device *, const unsigned char *), | |
3559 | int (*unsync)(struct net_device *, | |
3560 | const unsigned char *)); | |
3561 | void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, | |
3562 | struct net_device *dev, | |
3563 | int (*unsync)(struct net_device *, | |
3564 | const unsigned char *)); | |
f629d208 | 3565 | void __hw_addr_init(struct netdev_hw_addr_list *list); |
22bedad3 | 3566 | |
f001fde5 | 3567 | /* Functions used for device addresses handling */ |
f629d208 JP |
3568 | int dev_addr_add(struct net_device *dev, const unsigned char *addr, |
3569 | unsigned char addr_type); | |
3570 | int dev_addr_del(struct net_device *dev, const unsigned char *addr, | |
3571 | unsigned char addr_type); | |
f629d208 JP |
3572 | void dev_addr_flush(struct net_device *dev); |
3573 | int dev_addr_init(struct net_device *dev); | |
a748ee24 JP |
3574 | |
3575 | /* Functions used for unicast addresses handling */ | |
f629d208 JP |
3576 | int dev_uc_add(struct net_device *dev, const unsigned char *addr); |
3577 | int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); | |
3578 | int dev_uc_del(struct net_device *dev, const unsigned char *addr); | |
3579 | int dev_uc_sync(struct net_device *to, struct net_device *from); | |
3580 | int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); | |
3581 | void dev_uc_unsync(struct net_device *to, struct net_device *from); | |
3582 | void dev_uc_flush(struct net_device *dev); | |
3583 | void dev_uc_init(struct net_device *dev); | |
f001fde5 | 3584 | |
670e5b8e AD |
3585 | /** |
3586 | * __dev_uc_sync - Synchonize device's unicast list | |
3587 | * @dev: device to sync | |
3588 | * @sync: function to call if address should be added | |
3589 | * @unsync: function to call if address should be removed | |
3590 | * | |
3591 | * Add newly added addresses to the interface, and release | |
3592 | * addresses that have been deleted. | |
3593 | **/ | |
3594 | static inline int __dev_uc_sync(struct net_device *dev, | |
3595 | int (*sync)(struct net_device *, | |
3596 | const unsigned char *), | |
3597 | int (*unsync)(struct net_device *, | |
3598 | const unsigned char *)) | |
3599 | { | |
3600 | return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); | |
3601 | } | |
3602 | ||
3603 | /** | |
e793c0f7 | 3604 | * __dev_uc_unsync - Remove synchronized addresses from device |
670e5b8e AD |
3605 | * @dev: device to sync |
3606 | * @unsync: function to call if address should be removed | |
3607 | * | |
3608 | * Remove all addresses that were added to the device by dev_uc_sync(). | |
3609 | **/ | |
3610 | static inline void __dev_uc_unsync(struct net_device *dev, | |
3611 | int (*unsync)(struct net_device *, | |
3612 | const unsigned char *)) | |
3613 | { | |
3614 | __hw_addr_unsync_dev(&dev->uc, dev, unsync); | |
3615 | } | |
3616 | ||
22bedad3 | 3617 | /* Functions used for multicast addresses handling */ |
f629d208 JP |
3618 | int dev_mc_add(struct net_device *dev, const unsigned char *addr); |
3619 | int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); | |
3620 | int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); | |
3621 | int dev_mc_del(struct net_device *dev, const unsigned char *addr); | |
3622 | int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); | |
3623 | int dev_mc_sync(struct net_device *to, struct net_device *from); | |
3624 | int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); | |
3625 | void dev_mc_unsync(struct net_device *to, struct net_device *from); | |
3626 | void dev_mc_flush(struct net_device *dev); | |
3627 | void dev_mc_init(struct net_device *dev); | |
f001fde5 | 3628 | |
670e5b8e AD |
3629 | /** |
3630 | * __dev_mc_sync - Synchonize device's multicast list | |
3631 | * @dev: device to sync | |
3632 | * @sync: function to call if address should be added | |
3633 | * @unsync: function to call if address should be removed | |
3634 | * | |
3635 | * Add newly added addresses to the interface, and release | |
3636 | * addresses that have been deleted. | |
3637 | **/ | |
3638 | static inline int __dev_mc_sync(struct net_device *dev, | |
3639 | int (*sync)(struct net_device *, | |
3640 | const unsigned char *), | |
3641 | int (*unsync)(struct net_device *, | |
3642 | const unsigned char *)) | |
3643 | { | |
3644 | return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); | |
3645 | } | |
3646 | ||
3647 | /** | |
e793c0f7 | 3648 | * __dev_mc_unsync - Remove synchronized addresses from device |
670e5b8e AD |
3649 | * @dev: device to sync |
3650 | * @unsync: function to call if address should be removed | |
3651 | * | |
3652 | * Remove all addresses that were added to the device by dev_mc_sync(). | |
3653 | **/ | |
3654 | static inline void __dev_mc_unsync(struct net_device *dev, | |
3655 | int (*unsync)(struct net_device *, | |
3656 | const unsigned char *)) | |
3657 | { | |
3658 | __hw_addr_unsync_dev(&dev->mc, dev, unsync); | |
3659 | } | |
3660 | ||
4417da66 | 3661 | /* Functions used for secondary unicast and multicast support */ |
f629d208 JP |
3662 | void dev_set_rx_mode(struct net_device *dev); |
3663 | void __dev_set_rx_mode(struct net_device *dev); | |
3664 | int dev_set_promiscuity(struct net_device *dev, int inc); | |
3665 | int dev_set_allmulti(struct net_device *dev, int inc); | |
3666 | void netdev_state_change(struct net_device *dev); | |
3667 | void netdev_notify_peers(struct net_device *dev); | |
3668 | void netdev_features_change(struct net_device *dev); | |
1da177e4 | 3669 | /* Load a device via the kmod */ |
f629d208 JP |
3670 | void dev_load(struct net *net, const char *name); |
3671 | struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, | |
3672 | struct rtnl_link_stats64 *storage); | |
3673 | void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, | |
3674 | const struct net_device_stats *netdev_stats); | |
eeda3fd6 | 3675 | |
1da177e4 | 3676 | extern int netdev_max_backlog; |
3b098e2d | 3677 | extern int netdev_tstamp_prequeue; |
1da177e4 | 3678 | extern int weight_p; |
0a14842f | 3679 | extern int bpf_jit_enable; |
9ff162a8 | 3680 | |
f629d208 | 3681 | bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); |
44a40855 VY |
3682 | struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, |
3683 | struct list_head **iter); | |
f629d208 JP |
3684 | struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, |
3685 | struct list_head **iter); | |
8b5be856 | 3686 | |
44a40855 VY |
3687 | /* iterate through upper list, must be called under RCU read lock */ |
3688 | #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ | |
3689 | for (iter = &(dev)->adj_list.upper, \ | |
3690 | updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ | |
3691 | updev; \ | |
3692 | updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) | |
3693 | ||
8b5be856 | 3694 | /* iterate through upper list, must be called under RCU read lock */ |
2f268f12 VF |
3695 | #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \ |
3696 | for (iter = &(dev)->all_adj_list.upper, \ | |
3697 | updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \ | |
3698 | updev; \ | |
3699 | updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter))) | |
8b5be856 | 3700 | |
f629d208 JP |
3701 | void *netdev_lower_get_next_private(struct net_device *dev, |
3702 | struct list_head **iter); | |
3703 | void *netdev_lower_get_next_private_rcu(struct net_device *dev, | |
3704 | struct list_head **iter); | |
31088a11 VF |
3705 | |
3706 | #define netdev_for_each_lower_private(dev, priv, iter) \ | |
3707 | for (iter = (dev)->adj_list.lower.next, \ | |
3708 | priv = netdev_lower_get_next_private(dev, &(iter)); \ | |
3709 | priv; \ | |
3710 | priv = netdev_lower_get_next_private(dev, &(iter))) | |
3711 | ||
3712 | #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ | |
3713 | for (iter = &(dev)->adj_list.lower, \ | |
3714 | priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ | |
3715 | priv; \ | |
3716 | priv = netdev_lower_get_next_private_rcu(dev, &(iter))) | |
3717 | ||
4085ebe8 VY |
3718 | void *netdev_lower_get_next(struct net_device *dev, |
3719 | struct list_head **iter); | |
3720 | #define netdev_for_each_lower_dev(dev, ldev, iter) \ | |
3721 | for (iter = &(dev)->adj_list.lower, \ | |
3722 | ldev = netdev_lower_get_next(dev, &(iter)); \ | |
3723 | ldev; \ | |
3724 | ldev = netdev_lower_get_next(dev, &(iter))) | |
3725 | ||
f629d208 | 3726 | void *netdev_adjacent_get_private(struct list_head *adj_list); |
e001bfad | 3727 | void *netdev_lower_get_first_private_rcu(struct net_device *dev); |
f629d208 JP |
3728 | struct net_device *netdev_master_upper_dev_get(struct net_device *dev); |
3729 | struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); | |
3730 | int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev); | |
3731 | int netdev_master_upper_dev_link(struct net_device *dev, | |
6dffb044 | 3732 | struct net_device *upper_dev, |
29bf24af | 3733 | void *upper_priv, void *upper_info); |
f629d208 JP |
3734 | void netdev_upper_dev_unlink(struct net_device *dev, |
3735 | struct net_device *upper_dev); | |
5bb025fa | 3736 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); |
f629d208 JP |
3737 | void *netdev_lower_dev_get_private(struct net_device *dev, |
3738 | struct net_device *lower_dev); | |
04d48266 JP |
3739 | void netdev_lower_state_changed(struct net_device *lower_dev, |
3740 | void *lower_state_info); | |
960fb622 ED |
3741 | |
3742 | /* RSS keys are 40 or 52 bytes long */ | |
3743 | #define NETDEV_RSS_KEY_LEN 52 | |
3744 | extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; | |
3745 | void netdev_rss_key_fill(void *buffer, size_t len); | |
3746 | ||
4085ebe8 | 3747 | int dev_get_nest_level(struct net_device *dev, |
b618aaa9 | 3748 | bool (*type_check)(const struct net_device *dev)); |
f629d208 JP |
3749 | int skb_checksum_help(struct sk_buff *skb); |
3750 | struct sk_buff *__skb_gso_segment(struct sk_buff *skb, | |
3751 | netdev_features_t features, bool tx_path); | |
3752 | struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, | |
3753 | netdev_features_t features); | |
12b0004d | 3754 | |
61bd3857 MS |
3755 | struct netdev_bonding_info { |
3756 | ifslave slave; | |
3757 | ifbond master; | |
3758 | }; | |
3759 | ||
3760 | struct netdev_notifier_bonding_info { | |
3761 | struct netdev_notifier_info info; /* must be first */ | |
3762 | struct netdev_bonding_info bonding_info; | |
3763 | }; | |
3764 | ||
3765 | void netdev_bonding_info_change(struct net_device *dev, | |
3766 | struct netdev_bonding_info *bonding_info); | |
3767 | ||
12b0004d CW |
3768 | static inline |
3769 | struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) | |
3770 | { | |
3771 | return __skb_gso_segment(skb, features, true); | |
3772 | } | |
53d6471c | 3773 | __be16 skb_network_protocol(struct sk_buff *skb, int *depth); |
ec5f0615 PS |
3774 | |
3775 | static inline bool can_checksum_protocol(netdev_features_t features, | |
3776 | __be16 protocol) | |
3777 | { | |
c8cd0989 TH |
3778 | if (protocol == htons(ETH_P_FCOE)) |
3779 | return !!(features & NETIF_F_FCOE_CRC); | |
3780 | ||
3781 | /* Assume this is an IP checksum (not SCTP CRC) */ | |
3782 | ||
3783 | if (features & NETIF_F_HW_CSUM) { | |
3784 | /* Can checksum everything */ | |
3785 | return true; | |
3786 | } | |
3787 | ||
3788 | switch (protocol) { | |
3789 | case htons(ETH_P_IP): | |
3790 | return !!(features & NETIF_F_IP_CSUM); | |
3791 | case htons(ETH_P_IPV6): | |
3792 | return !!(features & NETIF_F_IPV6_CSUM); | |
3793 | default: | |
3794 | return false; | |
3795 | } | |
ec5f0615 | 3796 | } |
12b0004d | 3797 | |
6ae23ad3 TH |
3798 | /* Map an ethertype into IP protocol if possible */ |
3799 | static inline int eproto_to_ipproto(int eproto) | |
3800 | { | |
3801 | switch (eproto) { | |
3802 | case htons(ETH_P_IP): | |
3803 | return IPPROTO_IP; | |
3804 | case htons(ETH_P_IPV6): | |
3805 | return IPPROTO_IPV6; | |
3806 | default: | |
3807 | return -1; | |
3808 | } | |
3809 | } | |
3810 | ||
fb286bb2 | 3811 | #ifdef CONFIG_BUG |
f629d208 | 3812 | void netdev_rx_csum_fault(struct net_device *dev); |
fb286bb2 HX |
3813 | #else |
3814 | static inline void netdev_rx_csum_fault(struct net_device *dev) | |
3815 | { | |
3816 | } | |
3817 | #endif | |
1da177e4 | 3818 | /* rx skb timestamps */ |
f629d208 JP |
3819 | void net_enable_timestamp(void); |
3820 | void net_disable_timestamp(void); | |
1da177e4 | 3821 | |
20380731 | 3822 | #ifdef CONFIG_PROC_FS |
f629d208 | 3823 | int __init dev_proc_init(void); |
900ff8c6 CW |
3824 | #else |
3825 | #define dev_proc_init() 0 | |
20380731 ACM |
3826 | #endif |
3827 | ||
4798248e | 3828 | static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, |
fa2dbdc2 DM |
3829 | struct sk_buff *skb, struct net_device *dev, |
3830 | bool more) | |
4798248e | 3831 | { |
fa2dbdc2 | 3832 | skb->xmit_more = more ? 1 : 0; |
0b725a2c | 3833 | return ops->ndo_start_xmit(skb, dev); |
4798248e DM |
3834 | } |
3835 | ||
10b3ad8c | 3836 | static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, |
fa2dbdc2 | 3837 | struct netdev_queue *txq, bool more) |
4798248e DM |
3838 | { |
3839 | const struct net_device_ops *ops = dev->netdev_ops; | |
10b3ad8c | 3840 | int rc; |
4798248e | 3841 | |
fa2dbdc2 | 3842 | rc = __netdev_start_xmit(ops, skb, dev, more); |
10b3ad8c DM |
3843 | if (rc == NETDEV_TX_OK) |
3844 | txq_trans_update(txq); | |
3845 | ||
3846 | return rc; | |
4798248e DM |
3847 | } |
3848 | ||
42a2d923 LT |
3849 | int netdev_class_create_file_ns(struct class_attribute *class_attr, |
3850 | const void *ns); | |
3851 | void netdev_class_remove_file_ns(struct class_attribute *class_attr, | |
3852 | const void *ns); | |
58292cbe TH |
3853 | |
3854 | static inline int netdev_class_create_file(struct class_attribute *class_attr) | |
3855 | { | |
3856 | return netdev_class_create_file_ns(class_attr, NULL); | |
3857 | } | |
3858 | ||
3859 | static inline void netdev_class_remove_file(struct class_attribute *class_attr) | |
3860 | { | |
3861 | netdev_class_remove_file_ns(class_attr, NULL); | |
3862 | } | |
b8a9787e | 3863 | |
04600794 JB |
3864 | extern struct kobj_ns_type_operations net_ns_type_operations; |
3865 | ||
f629d208 | 3866 | const char *netdev_drivername(const struct net_device *dev); |
6579e57b | 3867 | |
f629d208 | 3868 | void linkwatch_run_queue(void); |
20380731 | 3869 | |
da08143b MK |
3870 | static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, |
3871 | netdev_features_t f2) | |
3872 | { | |
c8cd0989 TH |
3873 | if ((f1 ^ f2) & NETIF_F_HW_CSUM) { |
3874 | if (f1 & NETIF_F_HW_CSUM) | |
b6a0e72a | 3875 | f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); |
c8cd0989 | 3876 | else |
b6a0e72a | 3877 | f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); |
c8cd0989 | 3878 | } |
da08143b | 3879 | |
c8cd0989 | 3880 | return f1 & f2; |
da08143b MK |
3881 | } |
3882 | ||
c8f44aff MM |
3883 | static inline netdev_features_t netdev_get_wanted_features( |
3884 | struct net_device *dev) | |
5455c699 MM |
3885 | { |
3886 | return (dev->features & ~dev->hw_features) | dev->wanted_features; | |
3887 | } | |
c8f44aff MM |
3888 | netdev_features_t netdev_increment_features(netdev_features_t all, |
3889 | netdev_features_t one, netdev_features_t mask); | |
b0ce3508 ED |
3890 | |
3891 | /* Allow TSO being used on stacked device : | |
3892 | * Performing the GSO segmentation before last device | |
3893 | * is a performance improvement. | |
3894 | */ | |
3895 | static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, | |
3896 | netdev_features_t mask) | |
3897 | { | |
3898 | return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); | |
3899 | } | |
3900 | ||
6cb6a27c | 3901 | int __netdev_update_features(struct net_device *dev); |
5455c699 | 3902 | void netdev_update_features(struct net_device *dev); |
afe12cc8 | 3903 | void netdev_change_features(struct net_device *dev); |
7f353bf2 | 3904 | |
fc4a7489 PM |
3905 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, |
3906 | struct net_device *dev); | |
3907 | ||
e38f3025 TM |
3908 | netdev_features_t passthru_features_check(struct sk_buff *skb, |
3909 | struct net_device *dev, | |
3910 | netdev_features_t features); | |
c1e756bf | 3911 | netdev_features_t netif_skb_features(struct sk_buff *skb); |
58e998c6 | 3912 | |
4d29515f | 3913 | static inline bool net_gso_ok(netdev_features_t features, int gso_type) |
576a30eb | 3914 | { |
c8f44aff | 3915 | netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; |
0345e186 MM |
3916 | |
3917 | /* check flags correspondence */ | |
3918 | BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); | |
3919 | BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT)); | |
3920 | BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); | |
3921 | BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); | |
3922 | BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); | |
3923 | BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); | |
4b28252c TH |
3924 | BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); |
3925 | BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); | |
3926 | BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT)); | |
3927 | BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); | |
3928 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); | |
3929 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); | |
e585f236 | 3930 | BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); |
0345e186 | 3931 | |
d6b4991a | 3932 | return (features & feature) == feature; |
576a30eb HX |
3933 | } |
3934 | ||
4d29515f | 3935 | static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) |
bcd76111 | 3936 | { |
278b2513 | 3937 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && |
21dc3301 | 3938 | (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); |
bcd76111 HX |
3939 | } |
3940 | ||
8b86a61d | 3941 | static inline bool netif_needs_gso(struct sk_buff *skb, |
4d29515f | 3942 | netdev_features_t features) |
7967168c | 3943 | { |
fc741216 | 3944 | return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || |
cdbee74c YZ |
3945 | unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && |
3946 | (skb->ip_summed != CHECKSUM_UNNECESSARY))); | |
7967168c HX |
3947 | } |
3948 | ||
82cc1a7a PWJ |
3949 | static inline void netif_set_gso_max_size(struct net_device *dev, |
3950 | unsigned int size) | |
3951 | { | |
3952 | dev->gso_max_size = size; | |
3953 | } | |
3954 | ||
7a7ffbab WCC |
3955 | static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, |
3956 | int pulled_hlen, u16 mac_offset, | |
3957 | int mac_len) | |
3958 | { | |
3959 | skb->protocol = protocol; | |
3960 | skb->encapsulation = 1; | |
3961 | skb_push(skb, pulled_hlen); | |
3962 | skb_reset_transport_header(skb); | |
3963 | skb->mac_header = mac_offset; | |
3964 | skb->network_header = skb->mac_header + mac_len; | |
3965 | skb->mac_len = mac_len; | |
3966 | } | |
3967 | ||
b618aaa9 | 3968 | static inline bool netif_is_macvlan(const struct net_device *dev) |
a6cc0cfa JF |
3969 | { |
3970 | return dev->priv_flags & IFF_MACVLAN; | |
3971 | } | |
3972 | ||
b618aaa9 | 3973 | static inline bool netif_is_macvlan_port(const struct net_device *dev) |
2f33e7d5 MB |
3974 | { |
3975 | return dev->priv_flags & IFF_MACVLAN_PORT; | |
3976 | } | |
3977 | ||
b618aaa9 | 3978 | static inline bool netif_is_ipvlan(const struct net_device *dev) |
5933fea7 MB |
3979 | { |
3980 | return dev->priv_flags & IFF_IPVLAN_SLAVE; | |
3981 | } | |
3982 | ||
b618aaa9 | 3983 | static inline bool netif_is_ipvlan_port(const struct net_device *dev) |
5933fea7 MB |
3984 | { |
3985 | return dev->priv_flags & IFF_IPVLAN_MASTER; | |
3986 | } | |
3987 | ||
b618aaa9 | 3988 | static inline bool netif_is_bond_master(const struct net_device *dev) |
8a7fbfab | 3989 | { |
3990 | return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; | |
3991 | } | |
3992 | ||
b618aaa9 | 3993 | static inline bool netif_is_bond_slave(const struct net_device *dev) |
1765a575 JP |
3994 | { |
3995 | return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; | |
3996 | } | |
3997 | ||
3bdc0eba BG |
3998 | static inline bool netif_supports_nofcs(struct net_device *dev) |
3999 | { | |
4000 | return dev->priv_flags & IFF_SUPP_NOFCS; | |
4001 | } | |
4002 | ||
007979ea | 4003 | static inline bool netif_is_l3_master(const struct net_device *dev) |
4e3c8992 | 4004 | { |
007979ea | 4005 | return dev->priv_flags & IFF_L3MDEV_MASTER; |
4e3c8992 DA |
4006 | } |
4007 | ||
fee6d4c7 DA |
4008 | static inline bool netif_is_l3_slave(const struct net_device *dev) |
4009 | { | |
4010 | return dev->priv_flags & IFF_L3MDEV_SLAVE; | |
4011 | } | |
4012 | ||
0894ae3f JP |
4013 | static inline bool netif_is_bridge_master(const struct net_device *dev) |
4014 | { | |
4015 | return dev->priv_flags & IFF_EBRIDGE; | |
4016 | } | |
4017 | ||
28f9ee22 VY |
4018 | static inline bool netif_is_bridge_port(const struct net_device *dev) |
4019 | { | |
4020 | return dev->priv_flags & IFF_BRIDGE_PORT; | |
4021 | } | |
4022 | ||
35d4e172 JP |
4023 | static inline bool netif_is_ovs_master(const struct net_device *dev) |
4024 | { | |
4025 | return dev->priv_flags & IFF_OPENVSWITCH; | |
4026 | } | |
4027 | ||
b618aaa9 | 4028 | static inline bool netif_is_team_master(const struct net_device *dev) |
c981e421 JP |
4029 | { |
4030 | return dev->priv_flags & IFF_TEAM; | |
4031 | } | |
4032 | ||
b618aaa9 | 4033 | static inline bool netif_is_team_port(const struct net_device *dev) |
f7f019ee JP |
4034 | { |
4035 | return dev->priv_flags & IFF_TEAM_PORT; | |
4036 | } | |
4037 | ||
b618aaa9 | 4038 | static inline bool netif_is_lag_master(const struct net_device *dev) |
7be61833 JP |
4039 | { |
4040 | return netif_is_bond_master(dev) || netif_is_team_master(dev); | |
4041 | } | |
4042 | ||
b618aaa9 | 4043 | static inline bool netif_is_lag_port(const struct net_device *dev) |
e0ba1414 JP |
4044 | { |
4045 | return netif_is_bond_slave(dev) || netif_is_team_port(dev); | |
4046 | } | |
4047 | ||
02875878 ED |
4048 | /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ |
4049 | static inline void netif_keep_dst(struct net_device *dev) | |
4050 | { | |
4051 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); | |
4052 | } | |
4053 | ||
505d4f73 | 4054 | extern struct pernet_operations __net_initdata loopback_net_ops; |
b1b67dd4 | 4055 | |
571ba423 JP |
4056 | /* Logging, debugging and troubleshooting/diagnostic helpers. */ |
4057 | ||
4058 | /* netdev_printk helpers, similar to dev_printk */ | |
4059 | ||
4060 | static inline const char *netdev_name(const struct net_device *dev) | |
4061 | { | |
c6f854d5 VF |
4062 | if (!dev->name[0] || strchr(dev->name, '%')) |
4063 | return "(unnamed net_device)"; | |
571ba423 JP |
4064 | return dev->name; |
4065 | } | |
4066 | ||
ccc7f496 VF |
4067 | static inline const char *netdev_reg_state(const struct net_device *dev) |
4068 | { | |
4069 | switch (dev->reg_state) { | |
4070 | case NETREG_UNINITIALIZED: return " (uninitialized)"; | |
4071 | case NETREG_REGISTERED: return ""; | |
4072 | case NETREG_UNREGISTERING: return " (unregistering)"; | |
4073 | case NETREG_UNREGISTERED: return " (unregistered)"; | |
4074 | case NETREG_RELEASED: return " (released)"; | |
4075 | case NETREG_DUMMY: return " (dummy)"; | |
4076 | } | |
4077 | ||
4078 | WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); | |
4079 | return " (unknown)"; | |
4080 | } | |
4081 | ||
f629d208 | 4082 | __printf(3, 4) |
6ea754eb JP |
4083 | void netdev_printk(const char *level, const struct net_device *dev, |
4084 | const char *format, ...); | |
f629d208 | 4085 | __printf(2, 3) |
6ea754eb | 4086 | void netdev_emerg(const struct net_device *dev, const char *format, ...); |
f629d208 | 4087 | __printf(2, 3) |
6ea754eb | 4088 | void netdev_alert(const struct net_device *dev, const char *format, ...); |
f629d208 | 4089 | __printf(2, 3) |
6ea754eb | 4090 | void netdev_crit(const struct net_device *dev, const char *format, ...); |
f629d208 | 4091 | __printf(2, 3) |
6ea754eb | 4092 | void netdev_err(const struct net_device *dev, const char *format, ...); |
f629d208 | 4093 | __printf(2, 3) |
6ea754eb | 4094 | void netdev_warn(const struct net_device *dev, const char *format, ...); |
f629d208 | 4095 | __printf(2, 3) |
6ea754eb | 4096 | void netdev_notice(const struct net_device *dev, const char *format, ...); |
f629d208 | 4097 | __printf(2, 3) |
6ea754eb | 4098 | void netdev_info(const struct net_device *dev, const char *format, ...); |
571ba423 | 4099 | |
8909c9ad VK |
4100 | #define MODULE_ALIAS_NETDEV(device) \ |
4101 | MODULE_ALIAS("netdev-" device) | |
4102 | ||
b558c96f | 4103 | #if defined(CONFIG_DYNAMIC_DEBUG) |
571ba423 JP |
4104 | #define netdev_dbg(__dev, format, args...) \ |
4105 | do { \ | |
ffa10cb4 | 4106 | dynamic_netdev_dbg(__dev, format, ##args); \ |
571ba423 | 4107 | } while (0) |
b558c96f JC |
4108 | #elif defined(DEBUG) |
4109 | #define netdev_dbg(__dev, format, args...) \ | |
4110 | netdev_printk(KERN_DEBUG, __dev, format, ##args) | |
571ba423 JP |
4111 | #else |
4112 | #define netdev_dbg(__dev, format, args...) \ | |
4113 | ({ \ | |
4114 | if (0) \ | |
4115 | netdev_printk(KERN_DEBUG, __dev, format, ##args); \ | |
571ba423 JP |
4116 | }) |
4117 | #endif | |
4118 | ||
4119 | #if defined(VERBOSE_DEBUG) | |
4120 | #define netdev_vdbg netdev_dbg | |
4121 | #else | |
4122 | ||
4123 | #define netdev_vdbg(dev, format, args...) \ | |
4124 | ({ \ | |
4125 | if (0) \ | |
4126 | netdev_printk(KERN_DEBUG, dev, format, ##args); \ | |
4127 | 0; \ | |
4128 | }) | |
4129 | #endif | |
4130 | ||
4131 | /* | |
4132 | * netdev_WARN() acts like dev_printk(), but with the key difference | |
4133 | * of using a WARN/WARN_ON to get the message out, including the | |
4134 | * file/line information and a backtrace. | |
4135 | */ | |
4136 | #define netdev_WARN(dev, format, args...) \ | |
ccc7f496 VF |
4137 | WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \ |
4138 | netdev_reg_state(dev), ##args) | |
571ba423 | 4139 | |
b3d95c5c JP |
4140 | /* netif printk helpers, similar to netdev_printk */ |
4141 | ||
4142 | #define netif_printk(priv, type, level, dev, fmt, args...) \ | |
4143 | do { \ | |
4144 | if (netif_msg_##type(priv)) \ | |
4145 | netdev_printk(level, (dev), fmt, ##args); \ | |
4146 | } while (0) | |
4147 | ||
f45f4321 JP |
4148 | #define netif_level(level, priv, type, dev, fmt, args...) \ |
4149 | do { \ | |
4150 | if (netif_msg_##type(priv)) \ | |
4151 | netdev_##level(dev, fmt, ##args); \ | |
4152 | } while (0) | |
4153 | ||
b3d95c5c | 4154 | #define netif_emerg(priv, type, dev, fmt, args...) \ |
f45f4321 | 4155 | netif_level(emerg, priv, type, dev, fmt, ##args) |
b3d95c5c | 4156 | #define netif_alert(priv, type, dev, fmt, args...) \ |
f45f4321 | 4157 | netif_level(alert, priv, type, dev, fmt, ##args) |
b3d95c5c | 4158 | #define netif_crit(priv, type, dev, fmt, args...) \ |
f45f4321 | 4159 | netif_level(crit, priv, type, dev, fmt, ##args) |
b3d95c5c | 4160 | #define netif_err(priv, type, dev, fmt, args...) \ |
f45f4321 | 4161 | netif_level(err, priv, type, dev, fmt, ##args) |
b3d95c5c | 4162 | #define netif_warn(priv, type, dev, fmt, args...) \ |
f45f4321 | 4163 | netif_level(warn, priv, type, dev, fmt, ##args) |
b3d95c5c | 4164 | #define netif_notice(priv, type, dev, fmt, args...) \ |
f45f4321 | 4165 | netif_level(notice, priv, type, dev, fmt, ##args) |
b3d95c5c | 4166 | #define netif_info(priv, type, dev, fmt, args...) \ |
f45f4321 | 4167 | netif_level(info, priv, type, dev, fmt, ##args) |
b3d95c5c | 4168 | |
0053ea9c | 4169 | #if defined(CONFIG_DYNAMIC_DEBUG) |
b3d95c5c JP |
4170 | #define netif_dbg(priv, type, netdev, format, args...) \ |
4171 | do { \ | |
4172 | if (netif_msg_##type(priv)) \ | |
b5fb0a03 | 4173 | dynamic_netdev_dbg(netdev, format, ##args); \ |
b3d95c5c | 4174 | } while (0) |
0053ea9c JP |
4175 | #elif defined(DEBUG) |
4176 | #define netif_dbg(priv, type, dev, format, args...) \ | |
4177 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) | |
b3d95c5c JP |
4178 | #else |
4179 | #define netif_dbg(priv, type, dev, format, args...) \ | |
4180 | ({ \ | |
4181 | if (0) \ | |
4182 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ | |
4183 | 0; \ | |
4184 | }) | |
4185 | #endif | |
4186 | ||
4187 | #if defined(VERBOSE_DEBUG) | |
bcfcc450 | 4188 | #define netif_vdbg netif_dbg |
b3d95c5c JP |
4189 | #else |
4190 | #define netif_vdbg(priv, type, dev, format, args...) \ | |
4191 | ({ \ | |
4192 | if (0) \ | |
a4ed89cb | 4193 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ |
b3d95c5c JP |
4194 | 0; \ |
4195 | }) | |
4196 | #endif | |
571ba423 | 4197 | |
900ff8c6 CW |
4198 | /* |
4199 | * The list of packet types we will receive (as opposed to discard) | |
4200 | * and the routines to invoke. | |
4201 | * | |
4202 | * Why 16. Because with 16 the only overlap we get on a hash of the | |
4203 | * low nibble of the protocol value is RARP/SNAP/X.25. | |
4204 | * | |
4205 | * NOTE: That is no longer true with the addition of VLAN tags. Not | |
4206 | * sure which should go first, but I bet it won't make much | |
4207 | * difference if we are running VLANs. The good news is that | |
4208 | * this protocol won't be in the list unless compiled in, so | |
4209 | * the average user (w/out VLANs) will not be adversely affected. | |
4210 | * --BLG | |
4211 | * | |
4212 | * 0800 IP | |
4213 | * 8100 802.1Q VLAN | |
4214 | * 0001 802.3 | |
4215 | * 0002 AX.25 | |
4216 | * 0004 802.2 | |
4217 | * 8035 RARP | |
4218 | * 0005 SNAP | |
4219 | * 0805 X.25 | |
4220 | * 0806 ARP | |
4221 | * 8137 IPX | |
4222 | * 0009 Localtalk | |
4223 | * 86DD IPv6 | |
4224 | */ | |
4225 | #define PTYPE_HASH_SIZE (16) | |
4226 | #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) | |
4227 | ||
385a154c | 4228 | #endif /* _LINUX_NETDEVICE_H */ |