]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - include/linux/netdevice.h
Merge tag 'for-linus-4.9-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-zesty-kernel.git] / include / linux / netdevice.h
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
27
28 #include <linux/timer.h>
29 #include <linux/bug.h>
30 #include <linux/delay.h>
31 #include <linux/atomic.h>
32 #include <linux/prefetch.h>
33 #include <asm/cache.h>
34 #include <asm/byteorder.h>
35
36 #include <linux/percpu.h>
37 #include <linux/rculist.h>
38 #include <linux/dmaengine.h>
39 #include <linux/workqueue.h>
40 #include <linux/dynamic_queue_limits.h>
41
42 #include <linux/ethtool.h>
43 #include <net/net_namespace.h>
44 #include <net/dsa.h>
45 #ifdef CONFIG_DCB
46 #include <net/dcbnl.h>
47 #endif
48 #include <net/netprio_cgroup.h>
49
50 #include <linux/netdev_features.h>
51 #include <linux/neighbour.h>
52 #include <uapi/linux/netdevice.h>
53 #include <uapi/linux/if_bonding.h>
54 #include <uapi/linux/pkt_cls.h>
55 #include <linux/hashtable.h>
56
57 struct netpoll_info;
58 struct device;
59 struct phy_device;
60 /* 802.11 specific */
61 struct wireless_dev;
62 /* 802.15.4 specific */
63 struct wpan_dev;
64 struct mpls_dev;
65 /* UDP Tunnel offloads */
66 struct udp_tunnel_info;
67 struct bpf_prog;
68
69 void netdev_set_default_ethtool_ops(struct net_device *dev,
70 const struct ethtool_ops *ops);
71
72 /* Backlog congestion levels */
73 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
74 #define NET_RX_DROP 1 /* packet dropped */
75
76 /*
77 * Transmit return codes: transmit return codes originate from three different
78 * namespaces:
79 *
80 * - qdisc return codes
81 * - driver transmit return codes
82 * - errno values
83 *
84 * Drivers are allowed to return any one of those in their hard_start_xmit()
85 * function. Real network devices commonly used with qdiscs should only return
86 * the driver transmit return codes though - when qdiscs are used, the actual
87 * transmission happens asynchronously, so the value is not propagated to
88 * higher layers. Virtual network devices transmit synchronously; in this case
89 * the driver transmit return codes are consumed by dev_queue_xmit(), and all
90 * others are propagated to higher layers.
91 */
92
93 /* qdisc ->enqueue() return codes. */
94 #define NET_XMIT_SUCCESS 0x00
95 #define NET_XMIT_DROP 0x01 /* skb dropped */
96 #define NET_XMIT_CN 0x02 /* congestion notification */
97 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
98
99 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
100 * indicates that the device will soon be dropping packets, or already drops
101 * some packets of the same priority; prompting us to send less aggressively. */
102 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
103 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
104
105 /* Driver transmit return codes */
106 #define NETDEV_TX_MASK 0xf0
107
108 enum netdev_tx {
109 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
110 NETDEV_TX_OK = 0x00, /* driver took care of packet */
111 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
112 };
113 typedef enum netdev_tx netdev_tx_t;
114
115 /*
116 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
117 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
118 */
119 static inline bool dev_xmit_complete(int rc)
120 {
121 /*
122 * Positive cases with an skb consumed by a driver:
123 * - successful transmission (rc == NETDEV_TX_OK)
124 * - error while transmitting (rc < 0)
125 * - error while queueing to a different device (rc & NET_XMIT_MASK)
126 */
127 if (likely(rc < NET_XMIT_MASK))
128 return true;
129
130 return false;
131 }
132
133 /*
134 * Compute the worst-case header length according to the protocols
135 * used.
136 */
137
138 #if defined(CONFIG_HYPERV_NET)
139 # define LL_MAX_HEADER 128
140 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
141 # if defined(CONFIG_MAC80211_MESH)
142 # define LL_MAX_HEADER 128
143 # else
144 # define LL_MAX_HEADER 96
145 # endif
146 #else
147 # define LL_MAX_HEADER 32
148 #endif
149
150 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
151 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
152 #define MAX_HEADER LL_MAX_HEADER
153 #else
154 #define MAX_HEADER (LL_MAX_HEADER + 48)
155 #endif
156
157 /*
158 * Old network device statistics. Fields are native words
159 * (unsigned long) so they can be read and written atomically.
160 */
161
162 struct net_device_stats {
163 unsigned long rx_packets;
164 unsigned long tx_packets;
165 unsigned long rx_bytes;
166 unsigned long tx_bytes;
167 unsigned long rx_errors;
168 unsigned long tx_errors;
169 unsigned long rx_dropped;
170 unsigned long tx_dropped;
171 unsigned long multicast;
172 unsigned long collisions;
173 unsigned long rx_length_errors;
174 unsigned long rx_over_errors;
175 unsigned long rx_crc_errors;
176 unsigned long rx_frame_errors;
177 unsigned long rx_fifo_errors;
178 unsigned long rx_missed_errors;
179 unsigned long tx_aborted_errors;
180 unsigned long tx_carrier_errors;
181 unsigned long tx_fifo_errors;
182 unsigned long tx_heartbeat_errors;
183 unsigned long tx_window_errors;
184 unsigned long rx_compressed;
185 unsigned long tx_compressed;
186 };
187
188
189 #include <linux/cache.h>
190 #include <linux/skbuff.h>
191
192 #ifdef CONFIG_RPS
193 #include <linux/static_key.h>
194 extern struct static_key rps_needed;
195 #endif
196
197 struct neighbour;
198 struct neigh_parms;
199 struct sk_buff;
200
201 struct netdev_hw_addr {
202 struct list_head list;
203 unsigned char addr[MAX_ADDR_LEN];
204 unsigned char type;
205 #define NETDEV_HW_ADDR_T_LAN 1
206 #define NETDEV_HW_ADDR_T_SAN 2
207 #define NETDEV_HW_ADDR_T_SLAVE 3
208 #define NETDEV_HW_ADDR_T_UNICAST 4
209 #define NETDEV_HW_ADDR_T_MULTICAST 5
210 bool global_use;
211 int sync_cnt;
212 int refcount;
213 int synced;
214 struct rcu_head rcu_head;
215 };
216
217 struct netdev_hw_addr_list {
218 struct list_head list;
219 int count;
220 };
221
222 #define netdev_hw_addr_list_count(l) ((l)->count)
223 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
224 #define netdev_hw_addr_list_for_each(ha, l) \
225 list_for_each_entry(ha, &(l)->list, list)
226
227 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
228 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
229 #define netdev_for_each_uc_addr(ha, dev) \
230 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
231
232 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
233 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
234 #define netdev_for_each_mc_addr(ha, dev) \
235 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
236
237 struct hh_cache {
238 u16 hh_len;
239 u16 __pad;
240 seqlock_t hh_lock;
241
242 /* cached hardware header; allow for machine alignment needs. */
243 #define HH_DATA_MOD 16
244 #define HH_DATA_OFF(__len) \
245 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
246 #define HH_DATA_ALIGN(__len) \
247 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
248 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
249 };
250
251 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
252 * Alternative is:
253 * dev->hard_header_len ? (dev->hard_header_len +
254 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
255 *
256 * We could use other alignment values, but we must maintain the
257 * relationship HH alignment <= LL alignment.
258 */
259 #define LL_RESERVED_SPACE(dev) \
260 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
261 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
262 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
263
264 struct header_ops {
265 int (*create) (struct sk_buff *skb, struct net_device *dev,
266 unsigned short type, const void *daddr,
267 const void *saddr, unsigned int len);
268 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
269 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
270 void (*cache_update)(struct hh_cache *hh,
271 const struct net_device *dev,
272 const unsigned char *haddr);
273 bool (*validate)(const char *ll_header, unsigned int len);
274 };
275
276 /* These flag bits are private to the generic network queueing
277 * layer; they may not be explicitly referenced by any other
278 * code.
279 */
280
281 enum netdev_state_t {
282 __LINK_STATE_START,
283 __LINK_STATE_PRESENT,
284 __LINK_STATE_NOCARRIER,
285 __LINK_STATE_LINKWATCH_PENDING,
286 __LINK_STATE_DORMANT,
287 };
288
289
290 /*
291 * This structure holds boot-time configured netdevice settings. They
292 * are then used in the device probing.
293 */
294 struct netdev_boot_setup {
295 char name[IFNAMSIZ];
296 struct ifmap map;
297 };
298 #define NETDEV_BOOT_SETUP_MAX 8
299
300 int __init netdev_boot_setup(char *str);
301
302 /*
303 * Structure for NAPI scheduling similar to tasklet but with weighting
304 */
305 struct napi_struct {
306 /* The poll_list must only be managed by the entity which
307 * changes the state of the NAPI_STATE_SCHED bit. This means
308 * whoever atomically sets that bit can add this napi_struct
309 * to the per-CPU poll_list, and whoever clears that bit
310 * can remove from the list right before clearing the bit.
311 */
312 struct list_head poll_list;
313
314 unsigned long state;
315 int weight;
316 unsigned int gro_count;
317 int (*poll)(struct napi_struct *, int);
318 #ifdef CONFIG_NETPOLL
319 spinlock_t poll_lock;
320 int poll_owner;
321 #endif
322 struct net_device *dev;
323 struct sk_buff *gro_list;
324 struct sk_buff *skb;
325 struct hrtimer timer;
326 struct list_head dev_list;
327 struct hlist_node napi_hash_node;
328 unsigned int napi_id;
329 };
330
331 enum {
332 NAPI_STATE_SCHED, /* Poll is scheduled */
333 NAPI_STATE_DISABLE, /* Disable pending */
334 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
335 NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
336 NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
337 };
338
339 enum gro_result {
340 GRO_MERGED,
341 GRO_MERGED_FREE,
342 GRO_HELD,
343 GRO_NORMAL,
344 GRO_DROP,
345 };
346 typedef enum gro_result gro_result_t;
347
348 /*
349 * enum rx_handler_result - Possible return values for rx_handlers.
350 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
351 * further.
352 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
353 * case skb->dev was changed by rx_handler.
354 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
355 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
356 *
357 * rx_handlers are functions called from inside __netif_receive_skb(), to do
358 * special processing of the skb, prior to delivery to protocol handlers.
359 *
360 * Currently, a net_device can only have a single rx_handler registered. Trying
361 * to register a second rx_handler will return -EBUSY.
362 *
363 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
364 * To unregister a rx_handler on a net_device, use
365 * netdev_rx_handler_unregister().
366 *
367 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
368 * do with the skb.
369 *
370 * If the rx_handler consumed the skb in some way, it should return
371 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
372 * the skb to be delivered in some other way.
373 *
374 * If the rx_handler changed skb->dev, to divert the skb to another
375 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
376 * new device will be called if it exists.
377 *
378 * If the rx_handler decides the skb should be ignored, it should return
379 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
380 * are registered on exact device (ptype->dev == skb->dev).
381 *
382 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
383 * delivered, it should return RX_HANDLER_PASS.
384 *
385 * A device without a registered rx_handler will behave as if rx_handler
386 * returned RX_HANDLER_PASS.
387 */
388
389 enum rx_handler_result {
390 RX_HANDLER_CONSUMED,
391 RX_HANDLER_ANOTHER,
392 RX_HANDLER_EXACT,
393 RX_HANDLER_PASS,
394 };
395 typedef enum rx_handler_result rx_handler_result_t;
396 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
397
398 void __napi_schedule(struct napi_struct *n);
399 void __napi_schedule_irqoff(struct napi_struct *n);
400
401 static inline bool napi_disable_pending(struct napi_struct *n)
402 {
403 return test_bit(NAPI_STATE_DISABLE, &n->state);
404 }
405
406 /**
407 * napi_schedule_prep - check if NAPI can be scheduled
408 * @n: NAPI context
409 *
410 * Test if NAPI routine is already running, and if not mark
411 * it as running. This is used as a condition variable to
412 * insure only one NAPI poll instance runs. We also make
413 * sure there is no pending NAPI disable.
414 */
415 static inline bool napi_schedule_prep(struct napi_struct *n)
416 {
417 return !napi_disable_pending(n) &&
418 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
419 }
420
421 /**
422 * napi_schedule - schedule NAPI poll
423 * @n: NAPI context
424 *
425 * Schedule NAPI poll routine to be called if it is not already
426 * running.
427 */
428 static inline void napi_schedule(struct napi_struct *n)
429 {
430 if (napi_schedule_prep(n))
431 __napi_schedule(n);
432 }
433
434 /**
435 * napi_schedule_irqoff - schedule NAPI poll
436 * @n: NAPI context
437 *
438 * Variant of napi_schedule(), assuming hard irqs are masked.
439 */
440 static inline void napi_schedule_irqoff(struct napi_struct *n)
441 {
442 if (napi_schedule_prep(n))
443 __napi_schedule_irqoff(n);
444 }
445
446 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
447 static inline bool napi_reschedule(struct napi_struct *napi)
448 {
449 if (napi_schedule_prep(napi)) {
450 __napi_schedule(napi);
451 return true;
452 }
453 return false;
454 }
455
456 void __napi_complete(struct napi_struct *n);
457 void napi_complete_done(struct napi_struct *n, int work_done);
458 /**
459 * napi_complete - NAPI processing complete
460 * @n: NAPI context
461 *
462 * Mark NAPI processing as complete.
463 * Consider using napi_complete_done() instead.
464 */
465 static inline void napi_complete(struct napi_struct *n)
466 {
467 return napi_complete_done(n, 0);
468 }
469
470 /**
471 * napi_hash_add - add a NAPI to global hashtable
472 * @napi: NAPI context
473 *
474 * Generate a new napi_id and store a @napi under it in napi_hash.
475 * Used for busy polling (CONFIG_NET_RX_BUSY_POLL).
476 * Note: This is normally automatically done from netif_napi_add(),
477 * so might disappear in a future Linux version.
478 */
479 void napi_hash_add(struct napi_struct *napi);
480
481 /**
482 * napi_hash_del - remove a NAPI from global table
483 * @napi: NAPI context
484 *
485 * Warning: caller must observe RCU grace period
486 * before freeing memory containing @napi, if
487 * this function returns true.
488 * Note: core networking stack automatically calls it
489 * from netif_napi_del().
490 * Drivers might want to call this helper to combine all
491 * the needed RCU grace periods into a single one.
492 */
493 bool napi_hash_del(struct napi_struct *napi);
494
495 /**
496 * napi_disable - prevent NAPI from scheduling
497 * @n: NAPI context
498 *
499 * Stop NAPI from being scheduled on this context.
500 * Waits till any outstanding processing completes.
501 */
502 void napi_disable(struct napi_struct *n);
503
504 /**
505 * napi_enable - enable NAPI scheduling
506 * @n: NAPI context
507 *
508 * Resume NAPI from being scheduled on this context.
509 * Must be paired with napi_disable.
510 */
511 static inline void napi_enable(struct napi_struct *n)
512 {
513 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
514 smp_mb__before_atomic();
515 clear_bit(NAPI_STATE_SCHED, &n->state);
516 clear_bit(NAPI_STATE_NPSVC, &n->state);
517 }
518
519 /**
520 * napi_synchronize - wait until NAPI is not running
521 * @n: NAPI context
522 *
523 * Wait until NAPI is done being scheduled on this context.
524 * Waits till any outstanding processing completes but
525 * does not disable future activations.
526 */
527 static inline void napi_synchronize(const struct napi_struct *n)
528 {
529 if (IS_ENABLED(CONFIG_SMP))
530 while (test_bit(NAPI_STATE_SCHED, &n->state))
531 msleep(1);
532 else
533 barrier();
534 }
535
536 enum netdev_queue_state_t {
537 __QUEUE_STATE_DRV_XOFF,
538 __QUEUE_STATE_STACK_XOFF,
539 __QUEUE_STATE_FROZEN,
540 };
541
542 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
543 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
544 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
545
546 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
547 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
548 QUEUE_STATE_FROZEN)
549 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
550 QUEUE_STATE_FROZEN)
551
552 /*
553 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
554 * netif_tx_* functions below are used to manipulate this flag. The
555 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
556 * queue independently. The netif_xmit_*stopped functions below are called
557 * to check if the queue has been stopped by the driver or stack (either
558 * of the XOFF bits are set in the state). Drivers should not need to call
559 * netif_xmit*stopped functions, they should only be using netif_tx_*.
560 */
561
562 struct netdev_queue {
563 /*
564 * read-mostly part
565 */
566 struct net_device *dev;
567 struct Qdisc __rcu *qdisc;
568 struct Qdisc *qdisc_sleeping;
569 #ifdef CONFIG_SYSFS
570 struct kobject kobj;
571 #endif
572 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
573 int numa_node;
574 #endif
575 unsigned long tx_maxrate;
576 /*
577 * Number of TX timeouts for this queue
578 * (/sys/class/net/DEV/Q/trans_timeout)
579 */
580 unsigned long trans_timeout;
581 /*
582 * write-mostly part
583 */
584 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
585 int xmit_lock_owner;
586 /*
587 * Time (in jiffies) of last Tx
588 */
589 unsigned long trans_start;
590
591 unsigned long state;
592
593 #ifdef CONFIG_BQL
594 struct dql dql;
595 #endif
596 } ____cacheline_aligned_in_smp;
597
598 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
599 {
600 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
601 return q->numa_node;
602 #else
603 return NUMA_NO_NODE;
604 #endif
605 }
606
607 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
608 {
609 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
610 q->numa_node = node;
611 #endif
612 }
613
614 #ifdef CONFIG_RPS
615 /*
616 * This structure holds an RPS map which can be of variable length. The
617 * map is an array of CPUs.
618 */
619 struct rps_map {
620 unsigned int len;
621 struct rcu_head rcu;
622 u16 cpus[0];
623 };
624 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
625
626 /*
627 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
628 * tail pointer for that CPU's input queue at the time of last enqueue, and
629 * a hardware filter index.
630 */
631 struct rps_dev_flow {
632 u16 cpu;
633 u16 filter;
634 unsigned int last_qtail;
635 };
636 #define RPS_NO_FILTER 0xffff
637
638 /*
639 * The rps_dev_flow_table structure contains a table of flow mappings.
640 */
641 struct rps_dev_flow_table {
642 unsigned int mask;
643 struct rcu_head rcu;
644 struct rps_dev_flow flows[0];
645 };
646 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
647 ((_num) * sizeof(struct rps_dev_flow)))
648
649 /*
650 * The rps_sock_flow_table contains mappings of flows to the last CPU
651 * on which they were processed by the application (set in recvmsg).
652 * Each entry is a 32bit value. Upper part is the high-order bits
653 * of flow hash, lower part is CPU number.
654 * rps_cpu_mask is used to partition the space, depending on number of
655 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
656 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
657 * meaning we use 32-6=26 bits for the hash.
658 */
659 struct rps_sock_flow_table {
660 u32 mask;
661
662 u32 ents[0] ____cacheline_aligned_in_smp;
663 };
664 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
665
666 #define RPS_NO_CPU 0xffff
667
668 extern u32 rps_cpu_mask;
669 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
670
671 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
672 u32 hash)
673 {
674 if (table && hash) {
675 unsigned int index = hash & table->mask;
676 u32 val = hash & ~rps_cpu_mask;
677
678 /* We only give a hint, preemption can change CPU under us */
679 val |= raw_smp_processor_id();
680
681 if (table->ents[index] != val)
682 table->ents[index] = val;
683 }
684 }
685
686 #ifdef CONFIG_RFS_ACCEL
687 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
688 u16 filter_id);
689 #endif
690 #endif /* CONFIG_RPS */
691
692 /* This structure contains an instance of an RX queue. */
693 struct netdev_rx_queue {
694 #ifdef CONFIG_RPS
695 struct rps_map __rcu *rps_map;
696 struct rps_dev_flow_table __rcu *rps_flow_table;
697 #endif
698 struct kobject kobj;
699 struct net_device *dev;
700 } ____cacheline_aligned_in_smp;
701
702 /*
703 * RX queue sysfs structures and functions.
704 */
705 struct rx_queue_attribute {
706 struct attribute attr;
707 ssize_t (*show)(struct netdev_rx_queue *queue,
708 struct rx_queue_attribute *attr, char *buf);
709 ssize_t (*store)(struct netdev_rx_queue *queue,
710 struct rx_queue_attribute *attr, const char *buf, size_t len);
711 };
712
713 #ifdef CONFIG_XPS
714 /*
715 * This structure holds an XPS map which can be of variable length. The
716 * map is an array of queues.
717 */
718 struct xps_map {
719 unsigned int len;
720 unsigned int alloc_len;
721 struct rcu_head rcu;
722 u16 queues[0];
723 };
724 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
725 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
726 - sizeof(struct xps_map)) / sizeof(u16))
727
728 /*
729 * This structure holds all XPS maps for device. Maps are indexed by CPU.
730 */
731 struct xps_dev_maps {
732 struct rcu_head rcu;
733 struct xps_map __rcu *cpu_map[0];
734 };
735 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
736 (nr_cpu_ids * sizeof(struct xps_map *)))
737 #endif /* CONFIG_XPS */
738
739 #define TC_MAX_QUEUE 16
740 #define TC_BITMASK 15
741 /* HW offloaded queuing disciplines txq count and offset maps */
742 struct netdev_tc_txq {
743 u16 count;
744 u16 offset;
745 };
746
747 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
748 /*
749 * This structure is to hold information about the device
750 * configured to run FCoE protocol stack.
751 */
752 struct netdev_fcoe_hbainfo {
753 char manufacturer[64];
754 char serial_number[64];
755 char hardware_version[64];
756 char driver_version[64];
757 char optionrom_version[64];
758 char firmware_version[64];
759 char model[256];
760 char model_description[256];
761 };
762 #endif
763
764 #define MAX_PHYS_ITEM_ID_LEN 32
765
766 /* This structure holds a unique identifier to identify some
767 * physical item (port for example) used by a netdevice.
768 */
769 struct netdev_phys_item_id {
770 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
771 unsigned char id_len;
772 };
773
774 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
775 struct netdev_phys_item_id *b)
776 {
777 return a->id_len == b->id_len &&
778 memcmp(a->id, b->id, a->id_len) == 0;
779 }
780
781 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
782 struct sk_buff *skb);
783
784 /* These structures hold the attributes of qdisc and classifiers
785 * that are being passed to the netdevice through the setup_tc op.
786 */
787 enum {
788 TC_SETUP_MQPRIO,
789 TC_SETUP_CLSU32,
790 TC_SETUP_CLSFLOWER,
791 TC_SETUP_MATCHALL,
792 TC_SETUP_CLSBPF,
793 };
794
795 struct tc_cls_u32_offload;
796
797 struct tc_to_netdev {
798 unsigned int type;
799 union {
800 u8 tc;
801 struct tc_cls_u32_offload *cls_u32;
802 struct tc_cls_flower_offload *cls_flower;
803 struct tc_cls_matchall_offload *cls_mall;
804 struct tc_cls_bpf_offload *cls_bpf;
805 };
806 };
807
808 /* These structures hold the attributes of xdp state that are being passed
809 * to the netdevice through the xdp op.
810 */
811 enum xdp_netdev_command {
812 /* Set or clear a bpf program used in the earliest stages of packet
813 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
814 * is responsible for calling bpf_prog_put on any old progs that are
815 * stored. In case of error, the callee need not release the new prog
816 * reference, but on success it takes ownership and must bpf_prog_put
817 * when it is no longer used.
818 */
819 XDP_SETUP_PROG,
820 /* Check if a bpf program is set on the device. The callee should
821 * return true if a program is currently attached and running.
822 */
823 XDP_QUERY_PROG,
824 };
825
826 struct netdev_xdp {
827 enum xdp_netdev_command command;
828 union {
829 /* XDP_SETUP_PROG */
830 struct bpf_prog *prog;
831 /* XDP_QUERY_PROG */
832 bool prog_attached;
833 };
834 };
835
836 /*
837 * This structure defines the management hooks for network devices.
838 * The following hooks can be defined; unless noted otherwise, they are
839 * optional and can be filled with a null pointer.
840 *
841 * int (*ndo_init)(struct net_device *dev);
842 * This function is called once when a network device is registered.
843 * The network device can use this for any late stage initialization
844 * or semantic validation. It can fail with an error code which will
845 * be propagated back to register_netdev.
846 *
847 * void (*ndo_uninit)(struct net_device *dev);
848 * This function is called when device is unregistered or when registration
849 * fails. It is not called if init fails.
850 *
851 * int (*ndo_open)(struct net_device *dev);
852 * This function is called when a network device transitions to the up
853 * state.
854 *
855 * int (*ndo_stop)(struct net_device *dev);
856 * This function is called when a network device transitions to the down
857 * state.
858 *
859 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
860 * struct net_device *dev);
861 * Called when a packet needs to be transmitted.
862 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
863 * the queue before that can happen; it's for obsolete devices and weird
864 * corner cases, but the stack really does a non-trivial amount
865 * of useless work if you return NETDEV_TX_BUSY.
866 * Required; cannot be NULL.
867 *
868 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
869 * netdev_features_t features);
870 * Adjusts the requested feature flags according to device-specific
871 * constraints, and returns the resulting flags. Must not modify
872 * the device state.
873 *
874 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
875 * void *accel_priv, select_queue_fallback_t fallback);
876 * Called to decide which queue to use when device supports multiple
877 * transmit queues.
878 *
879 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
880 * This function is called to allow device receiver to make
881 * changes to configuration when multicast or promiscuous is enabled.
882 *
883 * void (*ndo_set_rx_mode)(struct net_device *dev);
884 * This function is called device changes address list filtering.
885 * If driver handles unicast address filtering, it should set
886 * IFF_UNICAST_FLT in its priv_flags.
887 *
888 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
889 * This function is called when the Media Access Control address
890 * needs to be changed. If this interface is not defined, the
891 * MAC address can not be changed.
892 *
893 * int (*ndo_validate_addr)(struct net_device *dev);
894 * Test if Media Access Control address is valid for the device.
895 *
896 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
897 * Called when a user requests an ioctl which can't be handled by
898 * the generic interface code. If not defined ioctls return
899 * not supported error code.
900 *
901 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
902 * Used to set network devices bus interface parameters. This interface
903 * is retained for legacy reasons; new devices should use the bus
904 * interface (PCI) for low level management.
905 *
906 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
907 * Called when a user wants to change the Maximum Transfer Unit
908 * of a device. If not defined, any request to change MTU will
909 * will return an error.
910 *
911 * void (*ndo_tx_timeout)(struct net_device *dev);
912 * Callback used when the transmitter has not made any progress
913 * for dev->watchdog ticks.
914 *
915 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
916 * struct rtnl_link_stats64 *storage);
917 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
918 * Called when a user wants to get the network device usage
919 * statistics. Drivers must do one of the following:
920 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
921 * rtnl_link_stats64 structure passed by the caller.
922 * 2. Define @ndo_get_stats to update a net_device_stats structure
923 * (which should normally be dev->stats) and return a pointer to
924 * it. The structure may be changed asynchronously only if each
925 * field is written atomically.
926 * 3. Update dev->stats asynchronously and atomically, and define
927 * neither operation.
928 *
929 * bool (*ndo_has_offload_stats)(int attr_id)
930 * Return true if this device supports offload stats of this attr_id.
931 *
932 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
933 * void *attr_data)
934 * Get statistics for offload operations by attr_id. Write it into the
935 * attr_data pointer.
936 *
937 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
938 * If device supports VLAN filtering this function is called when a
939 * VLAN id is registered.
940 *
941 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
942 * If device supports VLAN filtering this function is called when a
943 * VLAN id is unregistered.
944 *
945 * void (*ndo_poll_controller)(struct net_device *dev);
946 *
947 * SR-IOV management functions.
948 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
949 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
950 * u8 qos, __be16 proto);
951 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
952 * int max_tx_rate);
953 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
954 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
955 * int (*ndo_get_vf_config)(struct net_device *dev,
956 * int vf, struct ifla_vf_info *ivf);
957 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
958 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
959 * struct nlattr *port[]);
960 *
961 * Enable or disable the VF ability to query its RSS Redirection Table and
962 * Hash Key. This is needed since on some devices VF share this information
963 * with PF and querying it may introduce a theoretical security risk.
964 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
965 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
966 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
967 * Called to setup 'tc' number of traffic classes in the net device. This
968 * is always called from the stack with the rtnl lock held and netif tx
969 * queues stopped. This allows the netdevice to perform queue management
970 * safely.
971 *
972 * Fiber Channel over Ethernet (FCoE) offload functions.
973 * int (*ndo_fcoe_enable)(struct net_device *dev);
974 * Called when the FCoE protocol stack wants to start using LLD for FCoE
975 * so the underlying device can perform whatever needed configuration or
976 * initialization to support acceleration of FCoE traffic.
977 *
978 * int (*ndo_fcoe_disable)(struct net_device *dev);
979 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
980 * so the underlying device can perform whatever needed clean-ups to
981 * stop supporting acceleration of FCoE traffic.
982 *
983 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
984 * struct scatterlist *sgl, unsigned int sgc);
985 * Called when the FCoE Initiator wants to initialize an I/O that
986 * is a possible candidate for Direct Data Placement (DDP). The LLD can
987 * perform necessary setup and returns 1 to indicate the device is set up
988 * successfully to perform DDP on this I/O, otherwise this returns 0.
989 *
990 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
991 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
992 * indicated by the FC exchange id 'xid', so the underlying device can
993 * clean up and reuse resources for later DDP requests.
994 *
995 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
996 * struct scatterlist *sgl, unsigned int sgc);
997 * Called when the FCoE Target wants to initialize an I/O that
998 * is a possible candidate for Direct Data Placement (DDP). The LLD can
999 * perform necessary setup and returns 1 to indicate the device is set up
1000 * successfully to perform DDP on this I/O, otherwise this returns 0.
1001 *
1002 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1003 * struct netdev_fcoe_hbainfo *hbainfo);
1004 * Called when the FCoE Protocol stack wants information on the underlying
1005 * device. This information is utilized by the FCoE protocol stack to
1006 * register attributes with Fiber Channel management service as per the
1007 * FC-GS Fabric Device Management Information(FDMI) specification.
1008 *
1009 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
1010 * Called when the underlying device wants to override default World Wide
1011 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
1012 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
1013 * protocol stack to use.
1014 *
1015 * RFS acceleration.
1016 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
1017 * u16 rxq_index, u32 flow_id);
1018 * Set hardware filter for RFS. rxq_index is the target queue index;
1019 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
1020 * Return the filter ID on success, or a negative error code.
1021 *
1022 * Slave management functions (for bridge, bonding, etc).
1023 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
1024 * Called to make another netdev an underling.
1025 *
1026 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
1027 * Called to release previously enslaved netdev.
1028 *
1029 * Feature/offload setting functions.
1030 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1031 * Called to update device configuration to new features. Passed
1032 * feature set might be less than what was returned by ndo_fix_features()).
1033 * Must return >0 or -errno if it changed dev->features itself.
1034 *
1035 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
1036 * struct net_device *dev,
1037 * const unsigned char *addr, u16 vid, u16 flags)
1038 * Adds an FDB entry to dev for addr.
1039 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
1040 * struct net_device *dev,
1041 * const unsigned char *addr, u16 vid)
1042 * Deletes the FDB entry from dev coresponding to addr.
1043 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
1044 * struct net_device *dev, struct net_device *filter_dev,
1045 * int *idx)
1046 * Used to add FDB entries to dump requests. Implementers should add
1047 * entries to skb and update idx with the number of entries.
1048 *
1049 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
1050 * u16 flags)
1051 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
1052 * struct net_device *dev, u32 filter_mask,
1053 * int nlflags)
1054 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
1055 * u16 flags);
1056 *
1057 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
1058 * Called to change device carrier. Soft-devices (like dummy, team, etc)
1059 * which do not represent real hardware may define this to allow their
1060 * userspace components to manage their virtual carrier state. Devices
1061 * that determine carrier state from physical hardware properties (eg
1062 * network cables) or protocol-dependent mechanisms (eg
1063 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
1064 *
1065 * int (*ndo_get_phys_port_id)(struct net_device *dev,
1066 * struct netdev_phys_item_id *ppid);
1067 * Called to get ID of physical port of this device. If driver does
1068 * not implement this, it is assumed that the hw is not able to have
1069 * multiple net devices on single physical port.
1070 *
1071 * void (*ndo_udp_tunnel_add)(struct net_device *dev,
1072 * struct udp_tunnel_info *ti);
1073 * Called by UDP tunnel to notify a driver about the UDP port and socket
1074 * address family that a UDP tunnel is listnening to. It is called only
1075 * when a new port starts listening. The operation is protected by the
1076 * RTNL.
1077 *
1078 * void (*ndo_udp_tunnel_del)(struct net_device *dev,
1079 * struct udp_tunnel_info *ti);
1080 * Called by UDP tunnel to notify the driver about a UDP port and socket
1081 * address family that the UDP tunnel is not listening to anymore. The
1082 * operation is protected by the RTNL.
1083 *
1084 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1085 * struct net_device *dev)
1086 * Called by upper layer devices to accelerate switching or other
1087 * station functionality into hardware. 'pdev is the lowerdev
1088 * to use for the offload and 'dev' is the net device that will
1089 * back the offload. Returns a pointer to the private structure
1090 * the upper layer will maintain.
1091 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
1092 * Called by upper layer device to delete the station created
1093 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
1094 * the station and priv is the structure returned by the add
1095 * operation.
1096 * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
1097 * struct net_device *dev,
1098 * void *priv);
1099 * Callback to use for xmit over the accelerated station. This
1100 * is used in place of ndo_start_xmit on accelerated net
1101 * devices.
1102 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1103 * struct net_device *dev
1104 * netdev_features_t features);
1105 * Called by core transmit path to determine if device is capable of
1106 * performing offload operations on a given packet. This is to give
1107 * the device an opportunity to implement any restrictions that cannot
1108 * be otherwise expressed by feature flags. The check is called with
1109 * the set of features that the stack has calculated and it returns
1110 * those the driver believes to be appropriate.
1111 * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1112 * int queue_index, u32 maxrate);
1113 * Called when a user wants to set a max-rate limitation of specific
1114 * TX queue.
1115 * int (*ndo_get_iflink)(const struct net_device *dev);
1116 * Called to get the iflink value of this device.
1117 * void (*ndo_change_proto_down)(struct net_device *dev,
1118 * bool proto_down);
1119 * This function is used to pass protocol port error state information
1120 * to the switch driver. The switch driver can react to the proto_down
1121 * by doing a phys down on the associated switch port.
1122 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1123 * This function is used to get egress tunnel information for given skb.
1124 * This is useful for retrieving outer tunnel header parameters while
1125 * sampling packet.
1126 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
1127 * This function is used to specify the headroom that the skb must
1128 * consider when allocation skb during packet reception. Setting
1129 * appropriate rx headroom value allows avoiding skb head copy on
1130 * forward. Setting a negative value resets the rx headroom to the
1131 * default value.
1132 * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp);
1133 * This function is used to set or query state related to XDP on the
1134 * netdevice. See definition of enum xdp_netdev_command for details.
1135 *
1136 */
1137 struct net_device_ops {
1138 int (*ndo_init)(struct net_device *dev);
1139 void (*ndo_uninit)(struct net_device *dev);
1140 int (*ndo_open)(struct net_device *dev);
1141 int (*ndo_stop)(struct net_device *dev);
1142 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1143 struct net_device *dev);
1144 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1145 struct net_device *dev,
1146 netdev_features_t features);
1147 u16 (*ndo_select_queue)(struct net_device *dev,
1148 struct sk_buff *skb,
1149 void *accel_priv,
1150 select_queue_fallback_t fallback);
1151 void (*ndo_change_rx_flags)(struct net_device *dev,
1152 int flags);
1153 void (*ndo_set_rx_mode)(struct net_device *dev);
1154 int (*ndo_set_mac_address)(struct net_device *dev,
1155 void *addr);
1156 int (*ndo_validate_addr)(struct net_device *dev);
1157 int (*ndo_do_ioctl)(struct net_device *dev,
1158 struct ifreq *ifr, int cmd);
1159 int (*ndo_set_config)(struct net_device *dev,
1160 struct ifmap *map);
1161 int (*ndo_change_mtu)(struct net_device *dev,
1162 int new_mtu);
1163 int (*ndo_neigh_setup)(struct net_device *dev,
1164 struct neigh_parms *);
1165 void (*ndo_tx_timeout) (struct net_device *dev);
1166
1167 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
1168 struct rtnl_link_stats64 *storage);
1169 bool (*ndo_has_offload_stats)(int attr_id);
1170 int (*ndo_get_offload_stats)(int attr_id,
1171 const struct net_device *dev,
1172 void *attr_data);
1173 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1174
1175 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1176 __be16 proto, u16 vid);
1177 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1178 __be16 proto, u16 vid);
1179 #ifdef CONFIG_NET_POLL_CONTROLLER
1180 void (*ndo_poll_controller)(struct net_device *dev);
1181 int (*ndo_netpoll_setup)(struct net_device *dev,
1182 struct netpoll_info *info);
1183 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1184 #endif
1185 #ifdef CONFIG_NET_RX_BUSY_POLL
1186 int (*ndo_busy_poll)(struct napi_struct *dev);
1187 #endif
1188 int (*ndo_set_vf_mac)(struct net_device *dev,
1189 int queue, u8 *mac);
1190 int (*ndo_set_vf_vlan)(struct net_device *dev,
1191 int queue, u16 vlan,
1192 u8 qos, __be16 proto);
1193 int (*ndo_set_vf_rate)(struct net_device *dev,
1194 int vf, int min_tx_rate,
1195 int max_tx_rate);
1196 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1197 int vf, bool setting);
1198 int (*ndo_set_vf_trust)(struct net_device *dev,
1199 int vf, bool setting);
1200 int (*ndo_get_vf_config)(struct net_device *dev,
1201 int vf,
1202 struct ifla_vf_info *ivf);
1203 int (*ndo_set_vf_link_state)(struct net_device *dev,
1204 int vf, int link_state);
1205 int (*ndo_get_vf_stats)(struct net_device *dev,
1206 int vf,
1207 struct ifla_vf_stats
1208 *vf_stats);
1209 int (*ndo_set_vf_port)(struct net_device *dev,
1210 int vf,
1211 struct nlattr *port[]);
1212 int (*ndo_get_vf_port)(struct net_device *dev,
1213 int vf, struct sk_buff *skb);
1214 int (*ndo_set_vf_guid)(struct net_device *dev,
1215 int vf, u64 guid,
1216 int guid_type);
1217 int (*ndo_set_vf_rss_query_en)(
1218 struct net_device *dev,
1219 int vf, bool setting);
1220 int (*ndo_setup_tc)(struct net_device *dev,
1221 u32 handle,
1222 __be16 protocol,
1223 struct tc_to_netdev *tc);
1224 #if IS_ENABLED(CONFIG_FCOE)
1225 int (*ndo_fcoe_enable)(struct net_device *dev);
1226 int (*ndo_fcoe_disable)(struct net_device *dev);
1227 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1228 u16 xid,
1229 struct scatterlist *sgl,
1230 unsigned int sgc);
1231 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1232 u16 xid);
1233 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1234 u16 xid,
1235 struct scatterlist *sgl,
1236 unsigned int sgc);
1237 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1238 struct netdev_fcoe_hbainfo *hbainfo);
1239 #endif
1240
1241 #if IS_ENABLED(CONFIG_LIBFCOE)
1242 #define NETDEV_FCOE_WWNN 0
1243 #define NETDEV_FCOE_WWPN 1
1244 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1245 u64 *wwn, int type);
1246 #endif
1247
1248 #ifdef CONFIG_RFS_ACCEL
1249 int (*ndo_rx_flow_steer)(struct net_device *dev,
1250 const struct sk_buff *skb,
1251 u16 rxq_index,
1252 u32 flow_id);
1253 #endif
1254 int (*ndo_add_slave)(struct net_device *dev,
1255 struct net_device *slave_dev);
1256 int (*ndo_del_slave)(struct net_device *dev,
1257 struct net_device *slave_dev);
1258 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1259 netdev_features_t features);
1260 int (*ndo_set_features)(struct net_device *dev,
1261 netdev_features_t features);
1262 int (*ndo_neigh_construct)(struct net_device *dev,
1263 struct neighbour *n);
1264 void (*ndo_neigh_destroy)(struct net_device *dev,
1265 struct neighbour *n);
1266
1267 int (*ndo_fdb_add)(struct ndmsg *ndm,
1268 struct nlattr *tb[],
1269 struct net_device *dev,
1270 const unsigned char *addr,
1271 u16 vid,
1272 u16 flags);
1273 int (*ndo_fdb_del)(struct ndmsg *ndm,
1274 struct nlattr *tb[],
1275 struct net_device *dev,
1276 const unsigned char *addr,
1277 u16 vid);
1278 int (*ndo_fdb_dump)(struct sk_buff *skb,
1279 struct netlink_callback *cb,
1280 struct net_device *dev,
1281 struct net_device *filter_dev,
1282 int *idx);
1283
1284 int (*ndo_bridge_setlink)(struct net_device *dev,
1285 struct nlmsghdr *nlh,
1286 u16 flags);
1287 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1288 u32 pid, u32 seq,
1289 struct net_device *dev,
1290 u32 filter_mask,
1291 int nlflags);
1292 int (*ndo_bridge_dellink)(struct net_device *dev,
1293 struct nlmsghdr *nlh,
1294 u16 flags);
1295 int (*ndo_change_carrier)(struct net_device *dev,
1296 bool new_carrier);
1297 int (*ndo_get_phys_port_id)(struct net_device *dev,
1298 struct netdev_phys_item_id *ppid);
1299 int (*ndo_get_phys_port_name)(struct net_device *dev,
1300 char *name, size_t len);
1301 void (*ndo_udp_tunnel_add)(struct net_device *dev,
1302 struct udp_tunnel_info *ti);
1303 void (*ndo_udp_tunnel_del)(struct net_device *dev,
1304 struct udp_tunnel_info *ti);
1305 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1306 struct net_device *dev);
1307 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1308 void *priv);
1309
1310 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
1311 struct net_device *dev,
1312 void *priv);
1313 int (*ndo_get_lock_subclass)(struct net_device *dev);
1314 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1315 int queue_index,
1316 u32 maxrate);
1317 int (*ndo_get_iflink)(const struct net_device *dev);
1318 int (*ndo_change_proto_down)(struct net_device *dev,
1319 bool proto_down);
1320 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1321 struct sk_buff *skb);
1322 void (*ndo_set_rx_headroom)(struct net_device *dev,
1323 int needed_headroom);
1324 int (*ndo_xdp)(struct net_device *dev,
1325 struct netdev_xdp *xdp);
1326 };
1327
1328 /**
1329 * enum net_device_priv_flags - &struct net_device priv_flags
1330 *
1331 * These are the &struct net_device, they are only set internally
1332 * by drivers and used in the kernel. These flags are invisible to
1333 * userspace; this means that the order of these flags can change
1334 * during any kernel release.
1335 *
1336 * You should have a pretty good reason to be extending these flags.
1337 *
1338 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1339 * @IFF_EBRIDGE: Ethernet bridging device
1340 * @IFF_BONDING: bonding master or slave
1341 * @IFF_ISATAP: ISATAP interface (RFC4214)
1342 * @IFF_WAN_HDLC: WAN HDLC device
1343 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1344 * release skb->dst
1345 * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1346 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1347 * @IFF_MACVLAN_PORT: device used as macvlan port
1348 * @IFF_BRIDGE_PORT: device used as bridge port
1349 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1350 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1351 * @IFF_UNICAST_FLT: Supports unicast filtering
1352 * @IFF_TEAM_PORT: device used as team port
1353 * @IFF_SUPP_NOFCS: device supports sending custom FCS
1354 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1355 * change when it's running
1356 * @IFF_MACVLAN: Macvlan device
1357 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
1358 * underlying stacked devices
1359 * @IFF_IPVLAN_MASTER: IPvlan master device
1360 * @IFF_IPVLAN_SLAVE: IPvlan slave device
1361 * @IFF_L3MDEV_MASTER: device is an L3 master device
1362 * @IFF_NO_QUEUE: device can run without qdisc attached
1363 * @IFF_OPENVSWITCH: device is a Open vSwitch master
1364 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
1365 * @IFF_TEAM: device is a team device
1366 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
1367 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
1368 * entity (i.e. the master device for bridged veth)
1369 * @IFF_MACSEC: device is a MACsec device
1370 */
1371 enum netdev_priv_flags {
1372 IFF_802_1Q_VLAN = 1<<0,
1373 IFF_EBRIDGE = 1<<1,
1374 IFF_BONDING = 1<<2,
1375 IFF_ISATAP = 1<<3,
1376 IFF_WAN_HDLC = 1<<4,
1377 IFF_XMIT_DST_RELEASE = 1<<5,
1378 IFF_DONT_BRIDGE = 1<<6,
1379 IFF_DISABLE_NETPOLL = 1<<7,
1380 IFF_MACVLAN_PORT = 1<<8,
1381 IFF_BRIDGE_PORT = 1<<9,
1382 IFF_OVS_DATAPATH = 1<<10,
1383 IFF_TX_SKB_SHARING = 1<<11,
1384 IFF_UNICAST_FLT = 1<<12,
1385 IFF_TEAM_PORT = 1<<13,
1386 IFF_SUPP_NOFCS = 1<<14,
1387 IFF_LIVE_ADDR_CHANGE = 1<<15,
1388 IFF_MACVLAN = 1<<16,
1389 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1390 IFF_IPVLAN_MASTER = 1<<18,
1391 IFF_IPVLAN_SLAVE = 1<<19,
1392 IFF_L3MDEV_MASTER = 1<<20,
1393 IFF_NO_QUEUE = 1<<21,
1394 IFF_OPENVSWITCH = 1<<22,
1395 IFF_L3MDEV_SLAVE = 1<<23,
1396 IFF_TEAM = 1<<24,
1397 IFF_RXFH_CONFIGURED = 1<<25,
1398 IFF_PHONY_HEADROOM = 1<<26,
1399 IFF_MACSEC = 1<<27,
1400 };
1401
1402 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1403 #define IFF_EBRIDGE IFF_EBRIDGE
1404 #define IFF_BONDING IFF_BONDING
1405 #define IFF_ISATAP IFF_ISATAP
1406 #define IFF_WAN_HDLC IFF_WAN_HDLC
1407 #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1408 #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1409 #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1410 #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1411 #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1412 #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1413 #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1414 #define IFF_UNICAST_FLT IFF_UNICAST_FLT
1415 #define IFF_TEAM_PORT IFF_TEAM_PORT
1416 #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1417 #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1418 #define IFF_MACVLAN IFF_MACVLAN
1419 #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1420 #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
1421 #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
1422 #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
1423 #define IFF_NO_QUEUE IFF_NO_QUEUE
1424 #define IFF_OPENVSWITCH IFF_OPENVSWITCH
1425 #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1426 #define IFF_TEAM IFF_TEAM
1427 #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
1428 #define IFF_MACSEC IFF_MACSEC
1429
1430 /**
1431 * struct net_device - The DEVICE structure.
1432 * Actually, this whole structure is a big mistake. It mixes I/O
1433 * data with strictly "high-level" data, and it has to know about
1434 * almost every data structure used in the INET module.
1435 *
1436 * @name: This is the first field of the "visible" part of this structure
1437 * (i.e. as seen by users in the "Space.c" file). It is the name
1438 * of the interface.
1439 *
1440 * @name_hlist: Device name hash chain, please keep it close to name[]
1441 * @ifalias: SNMP alias
1442 * @mem_end: Shared memory end
1443 * @mem_start: Shared memory start
1444 * @base_addr: Device I/O address
1445 * @irq: Device IRQ number
1446 *
1447 * @carrier_changes: Stats to monitor carrier on<->off transitions
1448 *
1449 * @state: Generic network queuing layer state, see netdev_state_t
1450 * @dev_list: The global list of network devices
1451 * @napi_list: List entry used for polling NAPI devices
1452 * @unreg_list: List entry when we are unregistering the
1453 * device; see the function unregister_netdev
1454 * @close_list: List entry used when we are closing the device
1455 * @ptype_all: Device-specific packet handlers for all protocols
1456 * @ptype_specific: Device-specific, protocol-specific packet handlers
1457 *
1458 * @adj_list: Directly linked devices, like slaves for bonding
1459 * @all_adj_list: All linked devices, *including* neighbours
1460 * @features: Currently active device features
1461 * @hw_features: User-changeable features
1462 *
1463 * @wanted_features: User-requested features
1464 * @vlan_features: Mask of features inheritable by VLAN devices
1465 *
1466 * @hw_enc_features: Mask of features inherited by encapsulating devices
1467 * This field indicates what encapsulation
1468 * offloads the hardware is capable of doing,
1469 * and drivers will need to set them appropriately.
1470 *
1471 * @mpls_features: Mask of features inheritable by MPLS
1472 *
1473 * @ifindex: interface index
1474 * @group: The group the device belongs to
1475 *
1476 * @stats: Statistics struct, which was left as a legacy, use
1477 * rtnl_link_stats64 instead
1478 *
1479 * @rx_dropped: Dropped packets by core network,
1480 * do not use this in drivers
1481 * @tx_dropped: Dropped packets by core network,
1482 * do not use this in drivers
1483 * @rx_nohandler: nohandler dropped packets by core network on
1484 * inactive devices, do not use this in drivers
1485 *
1486 * @wireless_handlers: List of functions to handle Wireless Extensions,
1487 * instead of ioctl,
1488 * see <net/iw_handler.h> for details.
1489 * @wireless_data: Instance data managed by the core of wireless extensions
1490 *
1491 * @netdev_ops: Includes several pointers to callbacks,
1492 * if one wants to override the ndo_*() functions
1493 * @ethtool_ops: Management operations
1494 * @ndisc_ops: Includes callbacks for different IPv6 neighbour
1495 * discovery handling. Necessary for e.g. 6LoWPAN.
1496 * @header_ops: Includes callbacks for creating,parsing,caching,etc
1497 * of Layer 2 headers.
1498 *
1499 * @flags: Interface flags (a la BSD)
1500 * @priv_flags: Like 'flags' but invisible to userspace,
1501 * see if.h for the definitions
1502 * @gflags: Global flags ( kept as legacy )
1503 * @padded: How much padding added by alloc_netdev()
1504 * @operstate: RFC2863 operstate
1505 * @link_mode: Mapping policy to operstate
1506 * @if_port: Selectable AUI, TP, ...
1507 * @dma: DMA channel
1508 * @mtu: Interface MTU value
1509 * @type: Interface hardware type
1510 * @hard_header_len: Maximum hardware header length.
1511 *
1512 * @needed_headroom: Extra headroom the hardware may need, but not in all
1513 * cases can this be guaranteed
1514 * @needed_tailroom: Extra tailroom the hardware may need, but not in all
1515 * cases can this be guaranteed. Some cases also use
1516 * LL_MAX_HEADER instead to allocate the skb
1517 *
1518 * interface address info:
1519 *
1520 * @perm_addr: Permanent hw address
1521 * @addr_assign_type: Hw address assignment type
1522 * @addr_len: Hardware address length
1523 * @neigh_priv_len: Used in neigh_alloc()
1524 * @dev_id: Used to differentiate devices that share
1525 * the same link layer address
1526 * @dev_port: Used to differentiate devices that share
1527 * the same function
1528 * @addr_list_lock: XXX: need comments on this one
1529 * @uc_promisc: Counter that indicates promiscuous mode
1530 * has been enabled due to the need to listen to
1531 * additional unicast addresses in a device that
1532 * does not implement ndo_set_rx_mode()
1533 * @uc: unicast mac addresses
1534 * @mc: multicast mac addresses
1535 * @dev_addrs: list of device hw addresses
1536 * @queues_kset: Group of all Kobjects in the Tx and RX queues
1537 * @promiscuity: Number of times the NIC is told to work in
1538 * promiscuous mode; if it becomes 0 the NIC will
1539 * exit promiscuous mode
1540 * @allmulti: Counter, enables or disables allmulticast mode
1541 *
1542 * @vlan_info: VLAN info
1543 * @dsa_ptr: dsa specific data
1544 * @tipc_ptr: TIPC specific data
1545 * @atalk_ptr: AppleTalk link
1546 * @ip_ptr: IPv4 specific data
1547 * @dn_ptr: DECnet specific data
1548 * @ip6_ptr: IPv6 specific data
1549 * @ax25_ptr: AX.25 specific data
1550 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1551 *
1552 * @last_rx: Time of last Rx
1553 * @dev_addr: Hw address (before bcast,
1554 * because most packets are unicast)
1555 *
1556 * @_rx: Array of RX queues
1557 * @num_rx_queues: Number of RX queues
1558 * allocated at register_netdev() time
1559 * @real_num_rx_queues: Number of RX queues currently active in device
1560 *
1561 * @rx_handler: handler for received packets
1562 * @rx_handler_data: XXX: need comments on this one
1563 * @ingress_queue: XXX: need comments on this one
1564 * @broadcast: hw bcast address
1565 *
1566 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1567 * indexed by RX queue number. Assigned by driver.
1568 * This must only be set if the ndo_rx_flow_steer
1569 * operation is defined
1570 * @index_hlist: Device index hash chain
1571 *
1572 * @_tx: Array of TX queues
1573 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
1574 * @real_num_tx_queues: Number of TX queues currently active in device
1575 * @qdisc: Root qdisc from userspace point of view
1576 * @tx_queue_len: Max frames per queue allowed
1577 * @tx_global_lock: XXX: need comments on this one
1578 *
1579 * @xps_maps: XXX: need comments on this one
1580 *
1581 * @watchdog_timeo: Represents the timeout that is used by
1582 * the watchdog (see dev_watchdog())
1583 * @watchdog_timer: List of timers
1584 *
1585 * @pcpu_refcnt: Number of references to this device
1586 * @todo_list: Delayed register/unregister
1587 * @link_watch_list: XXX: need comments on this one
1588 *
1589 * @reg_state: Register/unregister state machine
1590 * @dismantle: Device is going to be freed
1591 * @rtnl_link_state: This enum represents the phases of creating
1592 * a new link
1593 *
1594 * @destructor: Called from unregister,
1595 * can be used to call free_netdev
1596 * @npinfo: XXX: need comments on this one
1597 * @nd_net: Network namespace this network device is inside
1598 *
1599 * @ml_priv: Mid-layer private
1600 * @lstats: Loopback statistics
1601 * @tstats: Tunnel statistics
1602 * @dstats: Dummy statistics
1603 * @vstats: Virtual ethernet statistics
1604 *
1605 * @garp_port: GARP
1606 * @mrp_port: MRP
1607 *
1608 * @dev: Class/net/name entry
1609 * @sysfs_groups: Space for optional device, statistics and wireless
1610 * sysfs groups
1611 *
1612 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1613 * @rtnl_link_ops: Rtnl_link_ops
1614 *
1615 * @gso_max_size: Maximum size of generic segmentation offload
1616 * @gso_max_segs: Maximum number of segments that can be passed to the
1617 * NIC for GSO
1618 *
1619 * @dcbnl_ops: Data Center Bridging netlink ops
1620 * @num_tc: Number of traffic classes in the net device
1621 * @tc_to_txq: XXX: need comments on this one
1622 * @prio_tc_map XXX: need comments on this one
1623 *
1624 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
1625 *
1626 * @priomap: XXX: need comments on this one
1627 * @phydev: Physical device may attach itself
1628 * for hardware timestamping
1629 *
1630 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
1631 * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
1632 *
1633 * @proto_down: protocol port state information can be sent to the
1634 * switch driver and used to set the phys state of the
1635 * switch port.
1636 *
1637 * FIXME: cleanup struct net_device such that network protocol info
1638 * moves out.
1639 */
1640
1641 struct net_device {
1642 char name[IFNAMSIZ];
1643 struct hlist_node name_hlist;
1644 char *ifalias;
1645 /*
1646 * I/O specific fields
1647 * FIXME: Merge these and struct ifmap into one
1648 */
1649 unsigned long mem_end;
1650 unsigned long mem_start;
1651 unsigned long base_addr;
1652 int irq;
1653
1654 atomic_t carrier_changes;
1655
1656 /*
1657 * Some hardware also needs these fields (state,dev_list,
1658 * napi_list,unreg_list,close_list) but they are not
1659 * part of the usual set specified in Space.c.
1660 */
1661
1662 unsigned long state;
1663
1664 struct list_head dev_list;
1665 struct list_head napi_list;
1666 struct list_head unreg_list;
1667 struct list_head close_list;
1668 struct list_head ptype_all;
1669 struct list_head ptype_specific;
1670
1671 struct {
1672 struct list_head upper;
1673 struct list_head lower;
1674 } adj_list;
1675
1676 struct {
1677 struct list_head upper;
1678 struct list_head lower;
1679 } all_adj_list;
1680
1681 netdev_features_t features;
1682 netdev_features_t hw_features;
1683 netdev_features_t wanted_features;
1684 netdev_features_t vlan_features;
1685 netdev_features_t hw_enc_features;
1686 netdev_features_t mpls_features;
1687 netdev_features_t gso_partial_features;
1688
1689 int ifindex;
1690 int group;
1691
1692 struct net_device_stats stats;
1693
1694 atomic_long_t rx_dropped;
1695 atomic_long_t tx_dropped;
1696 atomic_long_t rx_nohandler;
1697
1698 #ifdef CONFIG_WIRELESS_EXT
1699 const struct iw_handler_def *wireless_handlers;
1700 struct iw_public_data *wireless_data;
1701 #endif
1702 const struct net_device_ops *netdev_ops;
1703 const struct ethtool_ops *ethtool_ops;
1704 #ifdef CONFIG_NET_SWITCHDEV
1705 const struct switchdev_ops *switchdev_ops;
1706 #endif
1707 #ifdef CONFIG_NET_L3_MASTER_DEV
1708 const struct l3mdev_ops *l3mdev_ops;
1709 #endif
1710 #if IS_ENABLED(CONFIG_IPV6)
1711 const struct ndisc_ops *ndisc_ops;
1712 #endif
1713
1714 const struct header_ops *header_ops;
1715
1716 unsigned int flags;
1717 unsigned int priv_flags;
1718
1719 unsigned short gflags;
1720 unsigned short padded;
1721
1722 unsigned char operstate;
1723 unsigned char link_mode;
1724
1725 unsigned char if_port;
1726 unsigned char dma;
1727
1728 unsigned int mtu;
1729 unsigned short type;
1730 unsigned short hard_header_len;
1731
1732 unsigned short needed_headroom;
1733 unsigned short needed_tailroom;
1734
1735 /* Interface address info. */
1736 unsigned char perm_addr[MAX_ADDR_LEN];
1737 unsigned char addr_assign_type;
1738 unsigned char addr_len;
1739 unsigned short neigh_priv_len;
1740 unsigned short dev_id;
1741 unsigned short dev_port;
1742 spinlock_t addr_list_lock;
1743 unsigned char name_assign_type;
1744 bool uc_promisc;
1745 struct netdev_hw_addr_list uc;
1746 struct netdev_hw_addr_list mc;
1747 struct netdev_hw_addr_list dev_addrs;
1748
1749 #ifdef CONFIG_SYSFS
1750 struct kset *queues_kset;
1751 #endif
1752 unsigned int promiscuity;
1753 unsigned int allmulti;
1754
1755
1756 /* Protocol-specific pointers */
1757
1758 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1759 struct vlan_info __rcu *vlan_info;
1760 #endif
1761 #if IS_ENABLED(CONFIG_NET_DSA)
1762 struct dsa_switch_tree *dsa_ptr;
1763 #endif
1764 #if IS_ENABLED(CONFIG_TIPC)
1765 struct tipc_bearer __rcu *tipc_ptr;
1766 #endif
1767 void *atalk_ptr;
1768 struct in_device __rcu *ip_ptr;
1769 struct dn_dev __rcu *dn_ptr;
1770 struct inet6_dev __rcu *ip6_ptr;
1771 void *ax25_ptr;
1772 struct wireless_dev *ieee80211_ptr;
1773 struct wpan_dev *ieee802154_ptr;
1774 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
1775 struct mpls_dev __rcu *mpls_ptr;
1776 #endif
1777
1778 /*
1779 * Cache lines mostly used on receive path (including eth_type_trans())
1780 */
1781 unsigned long last_rx;
1782
1783 /* Interface address info used in eth_type_trans() */
1784 unsigned char *dev_addr;
1785
1786 #ifdef CONFIG_SYSFS
1787 struct netdev_rx_queue *_rx;
1788
1789 unsigned int num_rx_queues;
1790 unsigned int real_num_rx_queues;
1791 #endif
1792
1793 unsigned long gro_flush_timeout;
1794 rx_handler_func_t __rcu *rx_handler;
1795 void __rcu *rx_handler_data;
1796
1797 #ifdef CONFIG_NET_CLS_ACT
1798 struct tcf_proto __rcu *ingress_cl_list;
1799 #endif
1800 struct netdev_queue __rcu *ingress_queue;
1801 #ifdef CONFIG_NETFILTER_INGRESS
1802 struct nf_hook_entry __rcu *nf_hooks_ingress;
1803 #endif
1804
1805 unsigned char broadcast[MAX_ADDR_LEN];
1806 #ifdef CONFIG_RFS_ACCEL
1807 struct cpu_rmap *rx_cpu_rmap;
1808 #endif
1809 struct hlist_node index_hlist;
1810
1811 /*
1812 * Cache lines mostly used on transmit path
1813 */
1814 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1815 unsigned int num_tx_queues;
1816 unsigned int real_num_tx_queues;
1817 struct Qdisc *qdisc;
1818 #ifdef CONFIG_NET_SCHED
1819 DECLARE_HASHTABLE (qdisc_hash, 4);
1820 #endif
1821 unsigned long tx_queue_len;
1822 spinlock_t tx_global_lock;
1823 int watchdog_timeo;
1824
1825 #ifdef CONFIG_XPS
1826 struct xps_dev_maps __rcu *xps_maps;
1827 #endif
1828 #ifdef CONFIG_NET_CLS_ACT
1829 struct tcf_proto __rcu *egress_cl_list;
1830 #endif
1831
1832 /* These may be needed for future network-power-down code. */
1833 struct timer_list watchdog_timer;
1834
1835 int __percpu *pcpu_refcnt;
1836 struct list_head todo_list;
1837
1838 struct list_head link_watch_list;
1839
1840 enum { NETREG_UNINITIALIZED=0,
1841 NETREG_REGISTERED, /* completed register_netdevice */
1842 NETREG_UNREGISTERING, /* called unregister_netdevice */
1843 NETREG_UNREGISTERED, /* completed unregister todo */
1844 NETREG_RELEASED, /* called free_netdev */
1845 NETREG_DUMMY, /* dummy device for NAPI poll */
1846 } reg_state:8;
1847
1848 bool dismantle;
1849
1850 enum {
1851 RTNL_LINK_INITIALIZED,
1852 RTNL_LINK_INITIALIZING,
1853 } rtnl_link_state:16;
1854
1855 void (*destructor)(struct net_device *dev);
1856
1857 #ifdef CONFIG_NETPOLL
1858 struct netpoll_info __rcu *npinfo;
1859 #endif
1860
1861 possible_net_t nd_net;
1862
1863 /* mid-layer private */
1864 union {
1865 void *ml_priv;
1866 struct pcpu_lstats __percpu *lstats;
1867 struct pcpu_sw_netstats __percpu *tstats;
1868 struct pcpu_dstats __percpu *dstats;
1869 struct pcpu_vstats __percpu *vstats;
1870 };
1871
1872 struct garp_port __rcu *garp_port;
1873 struct mrp_port __rcu *mrp_port;
1874
1875 struct device dev;
1876 const struct attribute_group *sysfs_groups[4];
1877 const struct attribute_group *sysfs_rx_queue_group;
1878
1879 const struct rtnl_link_ops *rtnl_link_ops;
1880
1881 /* for setting kernel sock attribute on TCP connection setup */
1882 #define GSO_MAX_SIZE 65536
1883 unsigned int gso_max_size;
1884 #define GSO_MAX_SEGS 65535
1885 u16 gso_max_segs;
1886
1887 #ifdef CONFIG_DCB
1888 const struct dcbnl_rtnl_ops *dcbnl_ops;
1889 #endif
1890 u8 num_tc;
1891 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1892 u8 prio_tc_map[TC_BITMASK + 1];
1893
1894 #if IS_ENABLED(CONFIG_FCOE)
1895 unsigned int fcoe_ddp_xid;
1896 #endif
1897 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1898 struct netprio_map __rcu *priomap;
1899 #endif
1900 struct phy_device *phydev;
1901 struct lock_class_key *qdisc_tx_busylock;
1902 struct lock_class_key *qdisc_running_key;
1903 bool proto_down;
1904 };
1905 #define to_net_dev(d) container_of(d, struct net_device, dev)
1906
1907 #define NETDEV_ALIGN 32
1908
1909 static inline
1910 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1911 {
1912 return dev->prio_tc_map[prio & TC_BITMASK];
1913 }
1914
1915 static inline
1916 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1917 {
1918 if (tc >= dev->num_tc)
1919 return -EINVAL;
1920
1921 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1922 return 0;
1923 }
1924
1925 static inline
1926 void netdev_reset_tc(struct net_device *dev)
1927 {
1928 dev->num_tc = 0;
1929 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1930 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1931 }
1932
1933 static inline
1934 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1935 {
1936 if (tc >= dev->num_tc)
1937 return -EINVAL;
1938
1939 dev->tc_to_txq[tc].count = count;
1940 dev->tc_to_txq[tc].offset = offset;
1941 return 0;
1942 }
1943
1944 static inline
1945 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1946 {
1947 if (num_tc > TC_MAX_QUEUE)
1948 return -EINVAL;
1949
1950 dev->num_tc = num_tc;
1951 return 0;
1952 }
1953
1954 static inline
1955 int netdev_get_num_tc(struct net_device *dev)
1956 {
1957 return dev->num_tc;
1958 }
1959
1960 static inline
1961 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1962 unsigned int index)
1963 {
1964 return &dev->_tx[index];
1965 }
1966
1967 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
1968 const struct sk_buff *skb)
1969 {
1970 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
1971 }
1972
1973 static inline void netdev_for_each_tx_queue(struct net_device *dev,
1974 void (*f)(struct net_device *,
1975 struct netdev_queue *,
1976 void *),
1977 void *arg)
1978 {
1979 unsigned int i;
1980
1981 for (i = 0; i < dev->num_tx_queues; i++)
1982 f(dev, &dev->_tx[i], arg);
1983 }
1984
1985 #define netdev_lockdep_set_classes(dev) \
1986 { \
1987 static struct lock_class_key qdisc_tx_busylock_key; \
1988 static struct lock_class_key qdisc_running_key; \
1989 static struct lock_class_key qdisc_xmit_lock_key; \
1990 static struct lock_class_key dev_addr_list_lock_key; \
1991 unsigned int i; \
1992 \
1993 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
1994 (dev)->qdisc_running_key = &qdisc_running_key; \
1995 lockdep_set_class(&(dev)->addr_list_lock, \
1996 &dev_addr_list_lock_key); \
1997 for (i = 0; i < (dev)->num_tx_queues; i++) \
1998 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
1999 &qdisc_xmit_lock_key); \
2000 }
2001
2002 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2003 struct sk_buff *skb,
2004 void *accel_priv);
2005
2006 /* returns the headroom that the master device needs to take in account
2007 * when forwarding to this dev
2008 */
2009 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2010 {
2011 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2012 }
2013
2014 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2015 {
2016 if (dev->netdev_ops->ndo_set_rx_headroom)
2017 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2018 }
2019
2020 /* set the device rx headroom to the dev's default */
2021 static inline void netdev_reset_rx_headroom(struct net_device *dev)
2022 {
2023 netdev_set_rx_headroom(dev, -1);
2024 }
2025
2026 /*
2027 * Net namespace inlines
2028 */
2029 static inline
2030 struct net *dev_net(const struct net_device *dev)
2031 {
2032 return read_pnet(&dev->nd_net);
2033 }
2034
2035 static inline
2036 void dev_net_set(struct net_device *dev, struct net *net)
2037 {
2038 write_pnet(&dev->nd_net, net);
2039 }
2040
2041 static inline bool netdev_uses_dsa(struct net_device *dev)
2042 {
2043 #if IS_ENABLED(CONFIG_NET_DSA)
2044 if (dev->dsa_ptr != NULL)
2045 return dsa_uses_tagged_protocol(dev->dsa_ptr);
2046 #endif
2047 return false;
2048 }
2049
2050 /**
2051 * netdev_priv - access network device private data
2052 * @dev: network device
2053 *
2054 * Get network device private data
2055 */
2056 static inline void *netdev_priv(const struct net_device *dev)
2057 {
2058 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
2059 }
2060
2061 /* Set the sysfs physical device reference for the network logical device
2062 * if set prior to registration will cause a symlink during initialization.
2063 */
2064 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2065
2066 /* Set the sysfs device type for the network logical device to allow
2067 * fine-grained identification of different network device types. For
2068 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
2069 */
2070 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2071
2072 /* Default NAPI poll() weight
2073 * Device drivers are strongly advised to not use bigger value
2074 */
2075 #define NAPI_POLL_WEIGHT 64
2076
2077 /**
2078 * netif_napi_add - initialize a NAPI context
2079 * @dev: network device
2080 * @napi: NAPI context
2081 * @poll: polling function
2082 * @weight: default weight
2083 *
2084 * netif_napi_add() must be used to initialize a NAPI context prior to calling
2085 * *any* of the other NAPI-related functions.
2086 */
2087 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2088 int (*poll)(struct napi_struct *, int), int weight);
2089
2090 /**
2091 * netif_tx_napi_add - initialize a NAPI context
2092 * @dev: network device
2093 * @napi: NAPI context
2094 * @poll: polling function
2095 * @weight: default weight
2096 *
2097 * This variant of netif_napi_add() should be used from drivers using NAPI
2098 * to exclusively poll a TX queue.
2099 * This will avoid we add it into napi_hash[], thus polluting this hash table.
2100 */
2101 static inline void netif_tx_napi_add(struct net_device *dev,
2102 struct napi_struct *napi,
2103 int (*poll)(struct napi_struct *, int),
2104 int weight)
2105 {
2106 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2107 netif_napi_add(dev, napi, poll, weight);
2108 }
2109
2110 /**
2111 * netif_napi_del - remove a NAPI context
2112 * @napi: NAPI context
2113 *
2114 * netif_napi_del() removes a NAPI context from the network device NAPI list
2115 */
2116 void netif_napi_del(struct napi_struct *napi);
2117
2118 struct napi_gro_cb {
2119 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
2120 void *frag0;
2121
2122 /* Length of frag0. */
2123 unsigned int frag0_len;
2124
2125 /* This indicates where we are processing relative to skb->data. */
2126 int data_offset;
2127
2128 /* This is non-zero if the packet cannot be merged with the new skb. */
2129 u16 flush;
2130
2131 /* Save the IP ID here and check when we get to the transport layer */
2132 u16 flush_id;
2133
2134 /* Number of segments aggregated. */
2135 u16 count;
2136
2137 /* Start offset for remote checksum offload */
2138 u16 gro_remcsum_start;
2139
2140 /* jiffies when first packet was created/queued */
2141 unsigned long age;
2142
2143 /* Used in ipv6_gro_receive() and foo-over-udp */
2144 u16 proto;
2145
2146 /* This is non-zero if the packet may be of the same flow. */
2147 u8 same_flow:1;
2148
2149 /* Used in tunnel GRO receive */
2150 u8 encap_mark:1;
2151
2152 /* GRO checksum is valid */
2153 u8 csum_valid:1;
2154
2155 /* Number of checksums via CHECKSUM_UNNECESSARY */
2156 u8 csum_cnt:3;
2157
2158 /* Free the skb? */
2159 u8 free:2;
2160 #define NAPI_GRO_FREE 1
2161 #define NAPI_GRO_FREE_STOLEN_HEAD 2
2162
2163 /* Used in foo-over-udp, set in udp[46]_gro_receive */
2164 u8 is_ipv6:1;
2165
2166 /* Used in GRE, set in fou/gue_gro_receive */
2167 u8 is_fou:1;
2168
2169 /* Used to determine if flush_id can be ignored */
2170 u8 is_atomic:1;
2171
2172 /* 5 bit hole */
2173
2174 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
2175 __wsum csum;
2176
2177 /* used in skb_gro_receive() slow path */
2178 struct sk_buff *last;
2179 };
2180
2181 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2182
2183 struct packet_type {
2184 __be16 type; /* This is really htons(ether_type). */
2185 struct net_device *dev; /* NULL is wildcarded here */
2186 int (*func) (struct sk_buff *,
2187 struct net_device *,
2188 struct packet_type *,
2189 struct net_device *);
2190 bool (*id_match)(struct packet_type *ptype,
2191 struct sock *sk);
2192 void *af_packet_priv;
2193 struct list_head list;
2194 };
2195
2196 struct offload_callbacks {
2197 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2198 netdev_features_t features);
2199 struct sk_buff **(*gro_receive)(struct sk_buff **head,
2200 struct sk_buff *skb);
2201 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2202 };
2203
2204 struct packet_offload {
2205 __be16 type; /* This is really htons(ether_type). */
2206 u16 priority;
2207 struct offload_callbacks callbacks;
2208 struct list_head list;
2209 };
2210
2211 /* often modified stats are per-CPU, other are shared (netdev->stats) */
2212 struct pcpu_sw_netstats {
2213 u64 rx_packets;
2214 u64 rx_bytes;
2215 u64 tx_packets;
2216 u64 tx_bytes;
2217 struct u64_stats_sync syncp;
2218 };
2219
2220 #define __netdev_alloc_pcpu_stats(type, gfp) \
2221 ({ \
2222 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2223 if (pcpu_stats) { \
2224 int __cpu; \
2225 for_each_possible_cpu(__cpu) { \
2226 typeof(type) *stat; \
2227 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2228 u64_stats_init(&stat->syncp); \
2229 } \
2230 } \
2231 pcpu_stats; \
2232 })
2233
2234 #define netdev_alloc_pcpu_stats(type) \
2235 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2236
2237 enum netdev_lag_tx_type {
2238 NETDEV_LAG_TX_TYPE_UNKNOWN,
2239 NETDEV_LAG_TX_TYPE_RANDOM,
2240 NETDEV_LAG_TX_TYPE_BROADCAST,
2241 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2242 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2243 NETDEV_LAG_TX_TYPE_HASH,
2244 };
2245
2246 struct netdev_lag_upper_info {
2247 enum netdev_lag_tx_type tx_type;
2248 };
2249
2250 struct netdev_lag_lower_state_info {
2251 u8 link_up : 1,
2252 tx_enabled : 1;
2253 };
2254
2255 #include <linux/notifier.h>
2256
2257 /* netdevice notifier chain. Please remember to update the rtnetlink
2258 * notification exclusion list in rtnetlink_event() when adding new
2259 * types.
2260 */
2261 #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
2262 #define NETDEV_DOWN 0x0002
2263 #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
2264 detected a hardware crash and restarted
2265 - we can use this eg to kick tcp sessions
2266 once done */
2267 #define NETDEV_CHANGE 0x0004 /* Notify device state change */
2268 #define NETDEV_REGISTER 0x0005
2269 #define NETDEV_UNREGISTER 0x0006
2270 #define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
2271 #define NETDEV_CHANGEADDR 0x0008
2272 #define NETDEV_GOING_DOWN 0x0009
2273 #define NETDEV_CHANGENAME 0x000A
2274 #define NETDEV_FEAT_CHANGE 0x000B
2275 #define NETDEV_BONDING_FAILOVER 0x000C
2276 #define NETDEV_PRE_UP 0x000D
2277 #define NETDEV_PRE_TYPE_CHANGE 0x000E
2278 #define NETDEV_POST_TYPE_CHANGE 0x000F
2279 #define NETDEV_POST_INIT 0x0010
2280 #define NETDEV_UNREGISTER_FINAL 0x0011
2281 #define NETDEV_RELEASE 0x0012
2282 #define NETDEV_NOTIFY_PEERS 0x0013
2283 #define NETDEV_JOIN 0x0014
2284 #define NETDEV_CHANGEUPPER 0x0015
2285 #define NETDEV_RESEND_IGMP 0x0016
2286 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
2287 #define NETDEV_CHANGEINFODATA 0x0018
2288 #define NETDEV_BONDING_INFO 0x0019
2289 #define NETDEV_PRECHANGEUPPER 0x001A
2290 #define NETDEV_CHANGELOWERSTATE 0x001B
2291 #define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
2292 #define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
2293
2294 int register_netdevice_notifier(struct notifier_block *nb);
2295 int unregister_netdevice_notifier(struct notifier_block *nb);
2296
2297 struct netdev_notifier_info {
2298 struct net_device *dev;
2299 };
2300
2301 struct netdev_notifier_change_info {
2302 struct netdev_notifier_info info; /* must be first */
2303 unsigned int flags_changed;
2304 };
2305
2306 struct netdev_notifier_changeupper_info {
2307 struct netdev_notifier_info info; /* must be first */
2308 struct net_device *upper_dev; /* new upper dev */
2309 bool master; /* is upper dev master */
2310 bool linking; /* is the notification for link or unlink */
2311 void *upper_info; /* upper dev info */
2312 };
2313
2314 struct netdev_notifier_changelowerstate_info {
2315 struct netdev_notifier_info info; /* must be first */
2316 void *lower_state_info; /* is lower dev state */
2317 };
2318
2319 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2320 struct net_device *dev)
2321 {
2322 info->dev = dev;
2323 }
2324
2325 static inline struct net_device *
2326 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2327 {
2328 return info->dev;
2329 }
2330
2331 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2332
2333
2334 extern rwlock_t dev_base_lock; /* Device list lock */
2335
2336 #define for_each_netdev(net, d) \
2337 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2338 #define for_each_netdev_reverse(net, d) \
2339 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2340 #define for_each_netdev_rcu(net, d) \
2341 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2342 #define for_each_netdev_safe(net, d, n) \
2343 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2344 #define for_each_netdev_continue(net, d) \
2345 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2346 #define for_each_netdev_continue_rcu(net, d) \
2347 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2348 #define for_each_netdev_in_bond_rcu(bond, slave) \
2349 for_each_netdev_rcu(&init_net, slave) \
2350 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2351 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2352
2353 static inline struct net_device *next_net_device(struct net_device *dev)
2354 {
2355 struct list_head *lh;
2356 struct net *net;
2357
2358 net = dev_net(dev);
2359 lh = dev->dev_list.next;
2360 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2361 }
2362
2363 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2364 {
2365 struct list_head *lh;
2366 struct net *net;
2367
2368 net = dev_net(dev);
2369 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2370 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2371 }
2372
2373 static inline struct net_device *first_net_device(struct net *net)
2374 {
2375 return list_empty(&net->dev_base_head) ? NULL :
2376 net_device_entry(net->dev_base_head.next);
2377 }
2378
2379 static inline struct net_device *first_net_device_rcu(struct net *net)
2380 {
2381 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2382
2383 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2384 }
2385
2386 int netdev_boot_setup_check(struct net_device *dev);
2387 unsigned long netdev_boot_base(const char *prefix, int unit);
2388 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2389 const char *hwaddr);
2390 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2391 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2392 void dev_add_pack(struct packet_type *pt);
2393 void dev_remove_pack(struct packet_type *pt);
2394 void __dev_remove_pack(struct packet_type *pt);
2395 void dev_add_offload(struct packet_offload *po);
2396 void dev_remove_offload(struct packet_offload *po);
2397
2398 int dev_get_iflink(const struct net_device *dev);
2399 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2400 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2401 unsigned short mask);
2402 struct net_device *dev_get_by_name(struct net *net, const char *name);
2403 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2404 struct net_device *__dev_get_by_name(struct net *net, const char *name);
2405 int dev_alloc_name(struct net_device *dev, const char *name);
2406 int dev_open(struct net_device *dev);
2407 int dev_close(struct net_device *dev);
2408 int dev_close_many(struct list_head *head, bool unlink);
2409 void dev_disable_lro(struct net_device *dev);
2410 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2411 int dev_queue_xmit(struct sk_buff *skb);
2412 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
2413 int register_netdevice(struct net_device *dev);
2414 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2415 void unregister_netdevice_many(struct list_head *head);
2416 static inline void unregister_netdevice(struct net_device *dev)
2417 {
2418 unregister_netdevice_queue(dev, NULL);
2419 }
2420
2421 int netdev_refcnt_read(const struct net_device *dev);
2422 void free_netdev(struct net_device *dev);
2423 void netdev_freemem(struct net_device *dev);
2424 void synchronize_net(void);
2425 int init_dummy_netdev(struct net_device *dev);
2426
2427 DECLARE_PER_CPU(int, xmit_recursion);
2428 #define XMIT_RECURSION_LIMIT 10
2429
2430 static inline int dev_recursion_level(void)
2431 {
2432 return this_cpu_read(xmit_recursion);
2433 }
2434
2435 struct net_device *dev_get_by_index(struct net *net, int ifindex);
2436 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2437 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2438 int netdev_get_name(struct net *net, char *name, int ifindex);
2439 int dev_restart(struct net_device *dev);
2440 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
2441
2442 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2443 {
2444 return NAPI_GRO_CB(skb)->data_offset;
2445 }
2446
2447 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2448 {
2449 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2450 }
2451
2452 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2453 {
2454 NAPI_GRO_CB(skb)->data_offset += len;
2455 }
2456
2457 static inline void *skb_gro_header_fast(struct sk_buff *skb,
2458 unsigned int offset)
2459 {
2460 return NAPI_GRO_CB(skb)->frag0 + offset;
2461 }
2462
2463 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2464 {
2465 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2466 }
2467
2468 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2469 unsigned int offset)
2470 {
2471 if (!pskb_may_pull(skb, hlen))
2472 return NULL;
2473
2474 NAPI_GRO_CB(skb)->frag0 = NULL;
2475 NAPI_GRO_CB(skb)->frag0_len = 0;
2476 return skb->data + offset;
2477 }
2478
2479 static inline void *skb_gro_network_header(struct sk_buff *skb)
2480 {
2481 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2482 skb_network_offset(skb);
2483 }
2484
2485 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2486 const void *start, unsigned int len)
2487 {
2488 if (NAPI_GRO_CB(skb)->csum_valid)
2489 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2490 csum_partial(start, len, 0));
2491 }
2492
2493 /* GRO checksum functions. These are logical equivalents of the normal
2494 * checksum functions (in skbuff.h) except that they operate on the GRO
2495 * offsets and fields in sk_buff.
2496 */
2497
2498 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2499
2500 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2501 {
2502 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
2503 }
2504
2505 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2506 bool zero_okay,
2507 __sum16 check)
2508 {
2509 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
2510 skb_checksum_start_offset(skb) <
2511 skb_gro_offset(skb)) &&
2512 !skb_at_gro_remcsum_start(skb) &&
2513 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2514 (!zero_okay || check));
2515 }
2516
2517 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2518 __wsum psum)
2519 {
2520 if (NAPI_GRO_CB(skb)->csum_valid &&
2521 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2522 return 0;
2523
2524 NAPI_GRO_CB(skb)->csum = psum;
2525
2526 return __skb_gro_checksum_complete(skb);
2527 }
2528
2529 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2530 {
2531 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2532 /* Consume a checksum from CHECKSUM_UNNECESSARY */
2533 NAPI_GRO_CB(skb)->csum_cnt--;
2534 } else {
2535 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
2536 * verified a new top level checksum or an encapsulated one
2537 * during GRO. This saves work if we fallback to normal path.
2538 */
2539 __skb_incr_checksum_unnecessary(skb);
2540 }
2541 }
2542
2543 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2544 compute_pseudo) \
2545 ({ \
2546 __sum16 __ret = 0; \
2547 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2548 __ret = __skb_gro_checksum_validate_complete(skb, \
2549 compute_pseudo(skb, proto)); \
2550 if (__ret) \
2551 __skb_mark_checksum_bad(skb); \
2552 else \
2553 skb_gro_incr_csum_unnecessary(skb); \
2554 __ret; \
2555 })
2556
2557 #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2558 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2559
2560 #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2561 compute_pseudo) \
2562 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2563
2564 #define skb_gro_checksum_simple_validate(skb) \
2565 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2566
2567 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2568 {
2569 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2570 !NAPI_GRO_CB(skb)->csum_valid);
2571 }
2572
2573 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2574 __sum16 check, __wsum pseudo)
2575 {
2576 NAPI_GRO_CB(skb)->csum = ~pseudo;
2577 NAPI_GRO_CB(skb)->csum_valid = 1;
2578 }
2579
2580 #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
2581 do { \
2582 if (__skb_gro_checksum_convert_check(skb)) \
2583 __skb_gro_checksum_convert(skb, check, \
2584 compute_pseudo(skb, proto)); \
2585 } while (0)
2586
2587 struct gro_remcsum {
2588 int offset;
2589 __wsum delta;
2590 };
2591
2592 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2593 {
2594 grc->offset = 0;
2595 grc->delta = 0;
2596 }
2597
2598 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2599 unsigned int off, size_t hdrlen,
2600 int start, int offset,
2601 struct gro_remcsum *grc,
2602 bool nopartial)
2603 {
2604 __wsum delta;
2605 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
2606
2607 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2608
2609 if (!nopartial) {
2610 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
2611 return ptr;
2612 }
2613
2614 ptr = skb_gro_header_fast(skb, off);
2615 if (skb_gro_header_hard(skb, off + plen)) {
2616 ptr = skb_gro_header_slow(skb, off + plen, off);
2617 if (!ptr)
2618 return NULL;
2619 }
2620
2621 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
2622 start, offset);
2623
2624 /* Adjust skb->csum since we changed the packet */
2625 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
2626
2627 grc->offset = off + hdrlen + offset;
2628 grc->delta = delta;
2629
2630 return ptr;
2631 }
2632
2633 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2634 struct gro_remcsum *grc)
2635 {
2636 void *ptr;
2637 size_t plen = grc->offset + sizeof(u16);
2638
2639 if (!grc->delta)
2640 return;
2641
2642 ptr = skb_gro_header_fast(skb, grc->offset);
2643 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
2644 ptr = skb_gro_header_slow(skb, plen, grc->offset);
2645 if (!ptr)
2646 return;
2647 }
2648
2649 remcsum_unadjust((__sum16 *)ptr, grc->delta);
2650 }
2651
2652 struct skb_csum_offl_spec {
2653 __u16 ipv4_okay:1,
2654 ipv6_okay:1,
2655 encap_okay:1,
2656 ip_options_okay:1,
2657 ext_hdrs_okay:1,
2658 tcp_okay:1,
2659 udp_okay:1,
2660 sctp_okay:1,
2661 vlan_okay:1,
2662 no_encapped_ipv6:1,
2663 no_not_encapped:1;
2664 };
2665
2666 bool __skb_csum_offload_chk(struct sk_buff *skb,
2667 const struct skb_csum_offl_spec *spec,
2668 bool *csum_encapped,
2669 bool csum_help);
2670
2671 static inline bool skb_csum_offload_chk(struct sk_buff *skb,
2672 const struct skb_csum_offl_spec *spec,
2673 bool *csum_encapped,
2674 bool csum_help)
2675 {
2676 if (skb->ip_summed != CHECKSUM_PARTIAL)
2677 return false;
2678
2679 return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help);
2680 }
2681
2682 static inline bool skb_csum_offload_chk_help(struct sk_buff *skb,
2683 const struct skb_csum_offl_spec *spec)
2684 {
2685 bool csum_encapped;
2686
2687 return skb_csum_offload_chk(skb, spec, &csum_encapped, true);
2688 }
2689
2690 static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb)
2691 {
2692 static const struct skb_csum_offl_spec csum_offl_spec = {
2693 .ipv4_okay = 1,
2694 .ip_options_okay = 1,
2695 .ipv6_okay = 1,
2696 .vlan_okay = 1,
2697 .tcp_okay = 1,
2698 .udp_okay = 1,
2699 };
2700
2701 return skb_csum_offload_chk_help(skb, &csum_offl_spec);
2702 }
2703
2704 static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb)
2705 {
2706 static const struct skb_csum_offl_spec csum_offl_spec = {
2707 .ipv4_okay = 1,
2708 .ip_options_okay = 1,
2709 .tcp_okay = 1,
2710 .udp_okay = 1,
2711 .vlan_okay = 1,
2712 };
2713
2714 return skb_csum_offload_chk_help(skb, &csum_offl_spec);
2715 }
2716
2717 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2718 unsigned short type,
2719 const void *daddr, const void *saddr,
2720 unsigned int len)
2721 {
2722 if (!dev->header_ops || !dev->header_ops->create)
2723 return 0;
2724
2725 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
2726 }
2727
2728 static inline int dev_parse_header(const struct sk_buff *skb,
2729 unsigned char *haddr)
2730 {
2731 const struct net_device *dev = skb->dev;
2732
2733 if (!dev->header_ops || !dev->header_ops->parse)
2734 return 0;
2735 return dev->header_ops->parse(skb, haddr);
2736 }
2737
2738 /* ll_header must have at least hard_header_len allocated */
2739 static inline bool dev_validate_header(const struct net_device *dev,
2740 char *ll_header, int len)
2741 {
2742 if (likely(len >= dev->hard_header_len))
2743 return true;
2744
2745 if (capable(CAP_SYS_RAWIO)) {
2746 memset(ll_header + len, 0, dev->hard_header_len - len);
2747 return true;
2748 }
2749
2750 if (dev->header_ops && dev->header_ops->validate)
2751 return dev->header_ops->validate(ll_header, len);
2752
2753 return false;
2754 }
2755
2756 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
2757 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
2758 static inline int unregister_gifconf(unsigned int family)
2759 {
2760 return register_gifconf(family, NULL);
2761 }
2762
2763 #ifdef CONFIG_NET_FLOW_LIMIT
2764 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
2765 struct sd_flow_limit {
2766 u64 count;
2767 unsigned int num_buckets;
2768 unsigned int history_head;
2769 u16 history[FLOW_LIMIT_HISTORY];
2770 u8 buckets[];
2771 };
2772
2773 extern int netdev_flow_limit_table_len;
2774 #endif /* CONFIG_NET_FLOW_LIMIT */
2775
2776 /*
2777 * Incoming packets are placed on per-CPU queues
2778 */
2779 struct softnet_data {
2780 struct list_head poll_list;
2781 struct sk_buff_head process_queue;
2782
2783 /* stats */
2784 unsigned int processed;
2785 unsigned int time_squeeze;
2786 unsigned int received_rps;
2787 #ifdef CONFIG_RPS
2788 struct softnet_data *rps_ipi_list;
2789 #endif
2790 #ifdef CONFIG_NET_FLOW_LIMIT
2791 struct sd_flow_limit __rcu *flow_limit;
2792 #endif
2793 struct Qdisc *output_queue;
2794 struct Qdisc **output_queue_tailp;
2795 struct sk_buff *completion_queue;
2796
2797 #ifdef CONFIG_RPS
2798 /* input_queue_head should be written by cpu owning this struct,
2799 * and only read by other cpus. Worth using a cache line.
2800 */
2801 unsigned int input_queue_head ____cacheline_aligned_in_smp;
2802
2803 /* Elements below can be accessed between CPUs for RPS/RFS */
2804 struct call_single_data csd ____cacheline_aligned_in_smp;
2805 struct softnet_data *rps_ipi_next;
2806 unsigned int cpu;
2807 unsigned int input_queue_tail;
2808 #endif
2809 unsigned int dropped;
2810 struct sk_buff_head input_pkt_queue;
2811 struct napi_struct backlog;
2812
2813 };
2814
2815 static inline void input_queue_head_incr(struct softnet_data *sd)
2816 {
2817 #ifdef CONFIG_RPS
2818 sd->input_queue_head++;
2819 #endif
2820 }
2821
2822 static inline void input_queue_tail_incr_save(struct softnet_data *sd,
2823 unsigned int *qtail)
2824 {
2825 #ifdef CONFIG_RPS
2826 *qtail = ++sd->input_queue_tail;
2827 #endif
2828 }
2829
2830 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
2831
2832 void __netif_schedule(struct Qdisc *q);
2833 void netif_schedule_queue(struct netdev_queue *txq);
2834
2835 static inline void netif_tx_schedule_all(struct net_device *dev)
2836 {
2837 unsigned int i;
2838
2839 for (i = 0; i < dev->num_tx_queues; i++)
2840 netif_schedule_queue(netdev_get_tx_queue(dev, i));
2841 }
2842
2843 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
2844 {
2845 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2846 }
2847
2848 /**
2849 * netif_start_queue - allow transmit
2850 * @dev: network device
2851 *
2852 * Allow upper layers to call the device hard_start_xmit routine.
2853 */
2854 static inline void netif_start_queue(struct net_device *dev)
2855 {
2856 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
2857 }
2858
2859 static inline void netif_tx_start_all_queues(struct net_device *dev)
2860 {
2861 unsigned int i;
2862
2863 for (i = 0; i < dev->num_tx_queues; i++) {
2864 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2865 netif_tx_start_queue(txq);
2866 }
2867 }
2868
2869 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
2870
2871 /**
2872 * netif_wake_queue - restart transmit
2873 * @dev: network device
2874 *
2875 * Allow upper layers to call the device hard_start_xmit routine.
2876 * Used for flow control when transmit resources are available.
2877 */
2878 static inline void netif_wake_queue(struct net_device *dev)
2879 {
2880 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
2881 }
2882
2883 static inline void netif_tx_wake_all_queues(struct net_device *dev)
2884 {
2885 unsigned int i;
2886
2887 for (i = 0; i < dev->num_tx_queues; i++) {
2888 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2889 netif_tx_wake_queue(txq);
2890 }
2891 }
2892
2893 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
2894 {
2895 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2896 }
2897
2898 /**
2899 * netif_stop_queue - stop transmitted packets
2900 * @dev: network device
2901 *
2902 * Stop upper layers calling the device hard_start_xmit routine.
2903 * Used for flow control when transmit resources are unavailable.
2904 */
2905 static inline void netif_stop_queue(struct net_device *dev)
2906 {
2907 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
2908 }
2909
2910 void netif_tx_stop_all_queues(struct net_device *dev);
2911
2912 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2913 {
2914 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2915 }
2916
2917 /**
2918 * netif_queue_stopped - test if transmit queue is flowblocked
2919 * @dev: network device
2920 *
2921 * Test if transmit queue on device is currently unable to send.
2922 */
2923 static inline bool netif_queue_stopped(const struct net_device *dev)
2924 {
2925 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
2926 }
2927
2928 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
2929 {
2930 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2931 }
2932
2933 static inline bool
2934 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
2935 {
2936 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2937 }
2938
2939 static inline bool
2940 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
2941 {
2942 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
2943 }
2944
2945 /**
2946 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
2947 * @dev_queue: pointer to transmit queue
2948 *
2949 * BQL enabled drivers might use this helper in their ndo_start_xmit(),
2950 * to give appropriate hint to the CPU.
2951 */
2952 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
2953 {
2954 #ifdef CONFIG_BQL
2955 prefetchw(&dev_queue->dql.num_queued);
2956 #endif
2957 }
2958
2959 /**
2960 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
2961 * @dev_queue: pointer to transmit queue
2962 *
2963 * BQL enabled drivers might use this helper in their TX completion path,
2964 * to give appropriate hint to the CPU.
2965 */
2966 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
2967 {
2968 #ifdef CONFIG_BQL
2969 prefetchw(&dev_queue->dql.limit);
2970 #endif
2971 }
2972
2973 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2974 unsigned int bytes)
2975 {
2976 #ifdef CONFIG_BQL
2977 dql_queued(&dev_queue->dql, bytes);
2978
2979 if (likely(dql_avail(&dev_queue->dql) >= 0))
2980 return;
2981
2982 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2983
2984 /*
2985 * The XOFF flag must be set before checking the dql_avail below,
2986 * because in netdev_tx_completed_queue we update the dql_completed
2987 * before checking the XOFF flag.
2988 */
2989 smp_mb();
2990
2991 /* check again in case another CPU has just made room avail */
2992 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2993 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2994 #endif
2995 }
2996
2997 /**
2998 * netdev_sent_queue - report the number of bytes queued to hardware
2999 * @dev: network device
3000 * @bytes: number of bytes queued to the hardware device queue
3001 *
3002 * Report the number of bytes queued for sending/completion to the network
3003 * device hardware queue. @bytes should be a good approximation and should
3004 * exactly match netdev_completed_queue() @bytes
3005 */
3006 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3007 {
3008 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3009 }
3010
3011 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
3012 unsigned int pkts, unsigned int bytes)
3013 {
3014 #ifdef CONFIG_BQL
3015 if (unlikely(!bytes))
3016 return;
3017
3018 dql_completed(&dev_queue->dql, bytes);
3019
3020 /*
3021 * Without the memory barrier there is a small possiblity that
3022 * netdev_tx_sent_queue will miss the update and cause the queue to
3023 * be stopped forever
3024 */
3025 smp_mb();
3026
3027 if (dql_avail(&dev_queue->dql) < 0)
3028 return;
3029
3030 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3031 netif_schedule_queue(dev_queue);
3032 #endif
3033 }
3034
3035 /**
3036 * netdev_completed_queue - report bytes and packets completed by device
3037 * @dev: network device
3038 * @pkts: actual number of packets sent over the medium
3039 * @bytes: actual number of bytes sent over the medium
3040 *
3041 * Report the number of bytes and packets transmitted by the network device
3042 * hardware queue over the physical medium, @bytes must exactly match the
3043 * @bytes amount passed to netdev_sent_queue()
3044 */
3045 static inline void netdev_completed_queue(struct net_device *dev,
3046 unsigned int pkts, unsigned int bytes)
3047 {
3048 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3049 }
3050
3051 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3052 {
3053 #ifdef CONFIG_BQL
3054 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3055 dql_reset(&q->dql);
3056 #endif
3057 }
3058
3059 /**
3060 * netdev_reset_queue - reset the packets and bytes count of a network device
3061 * @dev_queue: network device
3062 *
3063 * Reset the bytes and packet count of a network device and clear the
3064 * software flow control OFF bit for this network device
3065 */
3066 static inline void netdev_reset_queue(struct net_device *dev_queue)
3067 {
3068 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
3069 }
3070
3071 /**
3072 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
3073 * @dev: network device
3074 * @queue_index: given tx queue index
3075 *
3076 * Returns 0 if given tx queue index >= number of device tx queues,
3077 * otherwise returns the originally passed tx queue index.
3078 */
3079 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3080 {
3081 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3082 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3083 dev->name, queue_index,
3084 dev->real_num_tx_queues);
3085 return 0;
3086 }
3087
3088 return queue_index;
3089 }
3090
3091 /**
3092 * netif_running - test if up
3093 * @dev: network device
3094 *
3095 * Test if the device has been brought up.
3096 */
3097 static inline bool netif_running(const struct net_device *dev)
3098 {
3099 return test_bit(__LINK_STATE_START, &dev->state);
3100 }
3101
3102 /*
3103 * Routines to manage the subqueues on a device. We only need start,
3104 * stop, and a check if it's stopped. All other device management is
3105 * done at the overall netdevice level.
3106 * Also test the device if we're multiqueue.
3107 */
3108
3109 /**
3110 * netif_start_subqueue - allow sending packets on subqueue
3111 * @dev: network device
3112 * @queue_index: sub queue index
3113 *
3114 * Start individual transmit queue of a device with multiple transmit queues.
3115 */
3116 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3117 {
3118 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3119
3120 netif_tx_start_queue(txq);
3121 }
3122
3123 /**
3124 * netif_stop_subqueue - stop sending packets on subqueue
3125 * @dev: network device
3126 * @queue_index: sub queue index
3127 *
3128 * Stop individual transmit queue of a device with multiple transmit queues.
3129 */
3130 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3131 {
3132 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3133 netif_tx_stop_queue(txq);
3134 }
3135
3136 /**
3137 * netif_subqueue_stopped - test status of subqueue
3138 * @dev: network device
3139 * @queue_index: sub queue index
3140 *
3141 * Check individual transmit queue of a device with multiple transmit queues.
3142 */
3143 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3144 u16 queue_index)
3145 {
3146 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3147
3148 return netif_tx_queue_stopped(txq);
3149 }
3150
3151 static inline bool netif_subqueue_stopped(const struct net_device *dev,
3152 struct sk_buff *skb)
3153 {
3154 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3155 }
3156
3157 void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
3158
3159 #ifdef CONFIG_XPS
3160 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3161 u16 index);
3162 #else
3163 static inline int netif_set_xps_queue(struct net_device *dev,
3164 const struct cpumask *mask,
3165 u16 index)
3166 {
3167 return 0;
3168 }
3169 #endif
3170
3171 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
3172 unsigned int num_tx_queues);
3173
3174 /*
3175 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
3176 * as a distribution range limit for the returned value.
3177 */
3178 static inline u16 skb_tx_hash(const struct net_device *dev,
3179 struct sk_buff *skb)
3180 {
3181 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
3182 }
3183
3184 /**
3185 * netif_is_multiqueue - test if device has multiple transmit queues
3186 * @dev: network device
3187 *
3188 * Check if device has multiple transmit queues
3189 */
3190 static inline bool netif_is_multiqueue(const struct net_device *dev)
3191 {
3192 return dev->num_tx_queues > 1;
3193 }
3194
3195 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3196
3197 #ifdef CONFIG_SYSFS
3198 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3199 #else
3200 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3201 unsigned int rxq)
3202 {
3203 return 0;
3204 }
3205 #endif
3206
3207 #ifdef CONFIG_SYSFS
3208 static inline unsigned int get_netdev_rx_queue_index(
3209 struct netdev_rx_queue *queue)
3210 {
3211 struct net_device *dev = queue->dev;
3212 int index = queue - dev->_rx;
3213
3214 BUG_ON(index >= dev->num_rx_queues);
3215 return index;
3216 }
3217 #endif
3218
3219 #define DEFAULT_MAX_NUM_RSS_QUEUES (8)
3220 int netif_get_num_default_rss_queues(void);
3221
3222 enum skb_free_reason {
3223 SKB_REASON_CONSUMED,
3224 SKB_REASON_DROPPED,
3225 };
3226
3227 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3228 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3229
3230 /*
3231 * It is not allowed to call kfree_skb() or consume_skb() from hardware
3232 * interrupt context or with hardware interrupts being disabled.
3233 * (in_irq() || irqs_disabled())
3234 *
3235 * We provide four helpers that can be used in following contexts :
3236 *
3237 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
3238 * replacing kfree_skb(skb)
3239 *
3240 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
3241 * Typically used in place of consume_skb(skb) in TX completion path
3242 *
3243 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
3244 * replacing kfree_skb(skb)
3245 *
3246 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
3247 * and consumed a packet. Used in place of consume_skb(skb)
3248 */
3249 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3250 {
3251 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3252 }
3253
3254 static inline void dev_consume_skb_irq(struct sk_buff *skb)
3255 {
3256 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3257 }
3258
3259 static inline void dev_kfree_skb_any(struct sk_buff *skb)
3260 {
3261 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3262 }
3263
3264 static inline void dev_consume_skb_any(struct sk_buff *skb)
3265 {
3266 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3267 }
3268
3269 int netif_rx(struct sk_buff *skb);
3270 int netif_rx_ni(struct sk_buff *skb);
3271 int netif_receive_skb(struct sk_buff *skb);
3272 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3273 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3274 struct sk_buff *napi_get_frags(struct napi_struct *napi);
3275 gro_result_t napi_gro_frags(struct napi_struct *napi);
3276 struct packet_offload *gro_find_receive_by_type(__be16 type);
3277 struct packet_offload *gro_find_complete_by_type(__be16 type);
3278
3279 static inline void napi_free_frags(struct napi_struct *napi)
3280 {
3281 kfree_skb(napi->skb);
3282 napi->skb = NULL;
3283 }
3284
3285 bool netdev_is_rx_handler_busy(struct net_device *dev);
3286 int netdev_rx_handler_register(struct net_device *dev,
3287 rx_handler_func_t *rx_handler,
3288 void *rx_handler_data);
3289 void netdev_rx_handler_unregister(struct net_device *dev);
3290
3291 bool dev_valid_name(const char *name);
3292 int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
3293 int dev_ethtool(struct net *net, struct ifreq *);
3294 unsigned int dev_get_flags(const struct net_device *);
3295 int __dev_change_flags(struct net_device *, unsigned int flags);
3296 int dev_change_flags(struct net_device *, unsigned int);
3297 void __dev_notify_flags(struct net_device *, unsigned int old_flags,
3298 unsigned int gchanges);
3299 int dev_change_name(struct net_device *, const char *);
3300 int dev_set_alias(struct net_device *, const char *, size_t);
3301 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3302 int dev_set_mtu(struct net_device *, int);
3303 void dev_set_group(struct net_device *, int);
3304 int dev_set_mac_address(struct net_device *, struct sockaddr *);
3305 int dev_change_carrier(struct net_device *, bool new_carrier);
3306 int dev_get_phys_port_id(struct net_device *dev,
3307 struct netdev_phys_item_id *ppid);
3308 int dev_get_phys_port_name(struct net_device *dev,
3309 char *name, size_t len);
3310 int dev_change_proto_down(struct net_device *dev, bool proto_down);
3311 int dev_change_xdp_fd(struct net_device *dev, int fd);
3312 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
3313 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3314 struct netdev_queue *txq, int *ret);
3315 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3316 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3317 bool is_skb_forwardable(const struct net_device *dev,
3318 const struct sk_buff *skb);
3319
3320 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3321
3322 extern int netdev_budget;
3323
3324 /* Called by rtnetlink.c:rtnl_unlock() */
3325 void netdev_run_todo(void);
3326
3327 /**
3328 * dev_put - release reference to device
3329 * @dev: network device
3330 *
3331 * Release reference to device to allow it to be freed.
3332 */
3333 static inline void dev_put(struct net_device *dev)
3334 {
3335 this_cpu_dec(*dev->pcpu_refcnt);
3336 }
3337
3338 /**
3339 * dev_hold - get reference to device
3340 * @dev: network device
3341 *
3342 * Hold reference to device to keep it from being freed.
3343 */
3344 static inline void dev_hold(struct net_device *dev)
3345 {
3346 this_cpu_inc(*dev->pcpu_refcnt);
3347 }
3348
3349 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
3350 * and _off may be called from IRQ context, but it is caller
3351 * who is responsible for serialization of these calls.
3352 *
3353 * The name carrier is inappropriate, these functions should really be
3354 * called netif_lowerlayer_*() because they represent the state of any
3355 * kind of lower layer not just hardware media.
3356 */
3357
3358 void linkwatch_init_dev(struct net_device *dev);
3359 void linkwatch_fire_event(struct net_device *dev);
3360 void linkwatch_forget_dev(struct net_device *dev);
3361
3362 /**
3363 * netif_carrier_ok - test if carrier present
3364 * @dev: network device
3365 *
3366 * Check if carrier is present on device
3367 */
3368 static inline bool netif_carrier_ok(const struct net_device *dev)
3369 {
3370 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
3371 }
3372
3373 unsigned long dev_trans_start(struct net_device *dev);
3374
3375 void __netdev_watchdog_up(struct net_device *dev);
3376
3377 void netif_carrier_on(struct net_device *dev);
3378
3379 void netif_carrier_off(struct net_device *dev);
3380
3381 /**
3382 * netif_dormant_on - mark device as dormant.
3383 * @dev: network device
3384 *
3385 * Mark device as dormant (as per RFC2863).
3386 *
3387 * The dormant state indicates that the relevant interface is not
3388 * actually in a condition to pass packets (i.e., it is not 'up') but is
3389 * in a "pending" state, waiting for some external event. For "on-
3390 * demand" interfaces, this new state identifies the situation where the
3391 * interface is waiting for events to place it in the up state.
3392 */
3393 static inline void netif_dormant_on(struct net_device *dev)
3394 {
3395 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
3396 linkwatch_fire_event(dev);
3397 }
3398
3399 /**
3400 * netif_dormant_off - set device as not dormant.
3401 * @dev: network device
3402 *
3403 * Device is not in dormant state.
3404 */
3405 static inline void netif_dormant_off(struct net_device *dev)
3406 {
3407 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
3408 linkwatch_fire_event(dev);
3409 }
3410
3411 /**
3412 * netif_dormant - test if carrier present
3413 * @dev: network device
3414 *
3415 * Check if carrier is present on device
3416 */
3417 static inline bool netif_dormant(const struct net_device *dev)
3418 {
3419 return test_bit(__LINK_STATE_DORMANT, &dev->state);
3420 }
3421
3422
3423 /**
3424 * netif_oper_up - test if device is operational
3425 * @dev: network device
3426 *
3427 * Check if carrier is operational
3428 */
3429 static inline bool netif_oper_up(const struct net_device *dev)
3430 {
3431 return (dev->operstate == IF_OPER_UP ||
3432 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
3433 }
3434
3435 /**
3436 * netif_device_present - is device available or removed
3437 * @dev: network device
3438 *
3439 * Check if device has not been removed from system.
3440 */
3441 static inline bool netif_device_present(struct net_device *dev)
3442 {
3443 return test_bit(__LINK_STATE_PRESENT, &dev->state);
3444 }
3445
3446 void netif_device_detach(struct net_device *dev);
3447
3448 void netif_device_attach(struct net_device *dev);
3449
3450 /*
3451 * Network interface message level settings
3452 */
3453
3454 enum {
3455 NETIF_MSG_DRV = 0x0001,
3456 NETIF_MSG_PROBE = 0x0002,
3457 NETIF_MSG_LINK = 0x0004,
3458 NETIF_MSG_TIMER = 0x0008,
3459 NETIF_MSG_IFDOWN = 0x0010,
3460 NETIF_MSG_IFUP = 0x0020,
3461 NETIF_MSG_RX_ERR = 0x0040,
3462 NETIF_MSG_TX_ERR = 0x0080,
3463 NETIF_MSG_TX_QUEUED = 0x0100,
3464 NETIF_MSG_INTR = 0x0200,
3465 NETIF_MSG_TX_DONE = 0x0400,
3466 NETIF_MSG_RX_STATUS = 0x0800,
3467 NETIF_MSG_PKTDATA = 0x1000,
3468 NETIF_MSG_HW = 0x2000,
3469 NETIF_MSG_WOL = 0x4000,
3470 };
3471
3472 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
3473 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
3474 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
3475 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
3476 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
3477 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
3478 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
3479 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
3480 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
3481 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
3482 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
3483 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
3484 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
3485 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
3486 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
3487
3488 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
3489 {
3490 /* use default */
3491 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
3492 return default_msg_enable_bits;
3493 if (debug_value == 0) /* no output */
3494 return 0;
3495 /* set low N bits */
3496 return (1 << debug_value) - 1;
3497 }
3498
3499 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
3500 {
3501 spin_lock(&txq->_xmit_lock);
3502 txq->xmit_lock_owner = cpu;
3503 }
3504
3505 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
3506 {
3507 spin_lock_bh(&txq->_xmit_lock);
3508 txq->xmit_lock_owner = smp_processor_id();
3509 }
3510
3511 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
3512 {
3513 bool ok = spin_trylock(&txq->_xmit_lock);
3514 if (likely(ok))
3515 txq->xmit_lock_owner = smp_processor_id();
3516 return ok;
3517 }
3518
3519 static inline void __netif_tx_unlock(struct netdev_queue *txq)
3520 {
3521 txq->xmit_lock_owner = -1;
3522 spin_unlock(&txq->_xmit_lock);
3523 }
3524
3525 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
3526 {
3527 txq->xmit_lock_owner = -1;
3528 spin_unlock_bh(&txq->_xmit_lock);
3529 }
3530
3531 static inline void txq_trans_update(struct netdev_queue *txq)
3532 {
3533 if (txq->xmit_lock_owner != -1)
3534 txq->trans_start = jiffies;
3535 }
3536
3537 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
3538 static inline void netif_trans_update(struct net_device *dev)
3539 {
3540 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
3541
3542 if (txq->trans_start != jiffies)
3543 txq->trans_start = jiffies;
3544 }
3545
3546 /**
3547 * netif_tx_lock - grab network device transmit lock
3548 * @dev: network device
3549 *
3550 * Get network device transmit lock
3551 */
3552 static inline void netif_tx_lock(struct net_device *dev)
3553 {
3554 unsigned int i;
3555 int cpu;
3556
3557 spin_lock(&dev->tx_global_lock);
3558 cpu = smp_processor_id();
3559 for (i = 0; i < dev->num_tx_queues; i++) {
3560 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3561
3562 /* We are the only thread of execution doing a
3563 * freeze, but we have to grab the _xmit_lock in
3564 * order to synchronize with threads which are in
3565 * the ->hard_start_xmit() handler and already
3566 * checked the frozen bit.
3567 */
3568 __netif_tx_lock(txq, cpu);
3569 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
3570 __netif_tx_unlock(txq);
3571 }
3572 }
3573
3574 static inline void netif_tx_lock_bh(struct net_device *dev)
3575 {
3576 local_bh_disable();
3577 netif_tx_lock(dev);
3578 }
3579
3580 static inline void netif_tx_unlock(struct net_device *dev)
3581 {
3582 unsigned int i;
3583
3584 for (i = 0; i < dev->num_tx_queues; i++) {
3585 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3586
3587 /* No need to grab the _xmit_lock here. If the
3588 * queue is not stopped for another reason, we
3589 * force a schedule.
3590 */
3591 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
3592 netif_schedule_queue(txq);
3593 }
3594 spin_unlock(&dev->tx_global_lock);
3595 }
3596
3597 static inline void netif_tx_unlock_bh(struct net_device *dev)
3598 {
3599 netif_tx_unlock(dev);
3600 local_bh_enable();
3601 }
3602
3603 #define HARD_TX_LOCK(dev, txq, cpu) { \
3604 if ((dev->features & NETIF_F_LLTX) == 0) { \
3605 __netif_tx_lock(txq, cpu); \
3606 } \
3607 }
3608
3609 #define HARD_TX_TRYLOCK(dev, txq) \
3610 (((dev->features & NETIF_F_LLTX) == 0) ? \
3611 __netif_tx_trylock(txq) : \
3612 true )
3613
3614 #define HARD_TX_UNLOCK(dev, txq) { \
3615 if ((dev->features & NETIF_F_LLTX) == 0) { \
3616 __netif_tx_unlock(txq); \
3617 } \
3618 }
3619
3620 static inline void netif_tx_disable(struct net_device *dev)
3621 {
3622 unsigned int i;
3623 int cpu;
3624
3625 local_bh_disable();
3626 cpu = smp_processor_id();
3627 for (i = 0; i < dev->num_tx_queues; i++) {
3628 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3629
3630 __netif_tx_lock(txq, cpu);
3631 netif_tx_stop_queue(txq);
3632 __netif_tx_unlock(txq);
3633 }
3634 local_bh_enable();
3635 }
3636
3637 static inline void netif_addr_lock(struct net_device *dev)
3638 {
3639 spin_lock(&dev->addr_list_lock);
3640 }
3641
3642 static inline void netif_addr_lock_nested(struct net_device *dev)
3643 {
3644 int subclass = SINGLE_DEPTH_NESTING;
3645
3646 if (dev->netdev_ops->ndo_get_lock_subclass)
3647 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
3648
3649 spin_lock_nested(&dev->addr_list_lock, subclass);
3650 }
3651
3652 static inline void netif_addr_lock_bh(struct net_device *dev)
3653 {
3654 spin_lock_bh(&dev->addr_list_lock);
3655 }
3656
3657 static inline void netif_addr_unlock(struct net_device *dev)
3658 {
3659 spin_unlock(&dev->addr_list_lock);
3660 }
3661
3662 static inline void netif_addr_unlock_bh(struct net_device *dev)
3663 {
3664 spin_unlock_bh(&dev->addr_list_lock);
3665 }
3666
3667 /*
3668 * dev_addrs walker. Should be used only for read access. Call with
3669 * rcu_read_lock held.
3670 */
3671 #define for_each_dev_addr(dev, ha) \
3672 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
3673
3674 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
3675
3676 void ether_setup(struct net_device *dev);
3677
3678 /* Support for loadable net-drivers */
3679 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
3680 unsigned char name_assign_type,
3681 void (*setup)(struct net_device *),
3682 unsigned int txqs, unsigned int rxqs);
3683 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
3684 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
3685
3686 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
3687 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
3688 count)
3689
3690 int register_netdev(struct net_device *dev);
3691 void unregister_netdev(struct net_device *dev);
3692
3693 /* General hardware address lists handling functions */
3694 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3695 struct netdev_hw_addr_list *from_list, int addr_len);
3696 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3697 struct netdev_hw_addr_list *from_list, int addr_len);
3698 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
3699 struct net_device *dev,
3700 int (*sync)(struct net_device *, const unsigned char *),
3701 int (*unsync)(struct net_device *,
3702 const unsigned char *));
3703 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
3704 struct net_device *dev,
3705 int (*unsync)(struct net_device *,
3706 const unsigned char *));
3707 void __hw_addr_init(struct netdev_hw_addr_list *list);
3708
3709 /* Functions used for device addresses handling */
3710 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
3711 unsigned char addr_type);
3712 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
3713 unsigned char addr_type);
3714 void dev_addr_flush(struct net_device *dev);
3715 int dev_addr_init(struct net_device *dev);
3716
3717 /* Functions used for unicast addresses handling */
3718 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
3719 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
3720 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
3721 int dev_uc_sync(struct net_device *to, struct net_device *from);
3722 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
3723 void dev_uc_unsync(struct net_device *to, struct net_device *from);
3724 void dev_uc_flush(struct net_device *dev);
3725 void dev_uc_init(struct net_device *dev);
3726
3727 /**
3728 * __dev_uc_sync - Synchonize device's unicast list
3729 * @dev: device to sync
3730 * @sync: function to call if address should be added
3731 * @unsync: function to call if address should be removed
3732 *
3733 * Add newly added addresses to the interface, and release
3734 * addresses that have been deleted.
3735 */
3736 static inline int __dev_uc_sync(struct net_device *dev,
3737 int (*sync)(struct net_device *,
3738 const unsigned char *),
3739 int (*unsync)(struct net_device *,
3740 const unsigned char *))
3741 {
3742 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
3743 }
3744
3745 /**
3746 * __dev_uc_unsync - Remove synchronized addresses from device
3747 * @dev: device to sync
3748 * @unsync: function to call if address should be removed
3749 *
3750 * Remove all addresses that were added to the device by dev_uc_sync().
3751 */
3752 static inline void __dev_uc_unsync(struct net_device *dev,
3753 int (*unsync)(struct net_device *,
3754 const unsigned char *))
3755 {
3756 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
3757 }
3758
3759 /* Functions used for multicast addresses handling */
3760 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
3761 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
3762 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
3763 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
3764 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
3765 int dev_mc_sync(struct net_device *to, struct net_device *from);
3766 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
3767 void dev_mc_unsync(struct net_device *to, struct net_device *from);
3768 void dev_mc_flush(struct net_device *dev);
3769 void dev_mc_init(struct net_device *dev);
3770
3771 /**
3772 * __dev_mc_sync - Synchonize device's multicast list
3773 * @dev: device to sync
3774 * @sync: function to call if address should be added
3775 * @unsync: function to call if address should be removed
3776 *
3777 * Add newly added addresses to the interface, and release
3778 * addresses that have been deleted.
3779 */
3780 static inline int __dev_mc_sync(struct net_device *dev,
3781 int (*sync)(struct net_device *,
3782 const unsigned char *),
3783 int (*unsync)(struct net_device *,
3784 const unsigned char *))
3785 {
3786 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
3787 }
3788
3789 /**
3790 * __dev_mc_unsync - Remove synchronized addresses from device
3791 * @dev: device to sync
3792 * @unsync: function to call if address should be removed
3793 *
3794 * Remove all addresses that were added to the device by dev_mc_sync().
3795 */
3796 static inline void __dev_mc_unsync(struct net_device *dev,
3797 int (*unsync)(struct net_device *,
3798 const unsigned char *))
3799 {
3800 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
3801 }
3802
3803 /* Functions used for secondary unicast and multicast support */
3804 void dev_set_rx_mode(struct net_device *dev);
3805 void __dev_set_rx_mode(struct net_device *dev);
3806 int dev_set_promiscuity(struct net_device *dev, int inc);
3807 int dev_set_allmulti(struct net_device *dev, int inc);
3808 void netdev_state_change(struct net_device *dev);
3809 void netdev_notify_peers(struct net_device *dev);
3810 void netdev_features_change(struct net_device *dev);
3811 /* Load a device via the kmod */
3812 void dev_load(struct net *net, const char *name);
3813 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
3814 struct rtnl_link_stats64 *storage);
3815 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
3816 const struct net_device_stats *netdev_stats);
3817
3818 extern int netdev_max_backlog;
3819 extern int netdev_tstamp_prequeue;
3820 extern int weight_p;
3821
3822 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
3823 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
3824 struct list_head **iter);
3825 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
3826 struct list_head **iter);
3827
3828 /* iterate through upper list, must be called under RCU read lock */
3829 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
3830 for (iter = &(dev)->adj_list.upper, \
3831 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
3832 updev; \
3833 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
3834
3835 /* iterate through upper list, must be called under RCU read lock */
3836 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
3837 for (iter = &(dev)->all_adj_list.upper, \
3838 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
3839 updev; \
3840 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
3841
3842 void *netdev_lower_get_next_private(struct net_device *dev,
3843 struct list_head **iter);
3844 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3845 struct list_head **iter);
3846
3847 #define netdev_for_each_lower_private(dev, priv, iter) \
3848 for (iter = (dev)->adj_list.lower.next, \
3849 priv = netdev_lower_get_next_private(dev, &(iter)); \
3850 priv; \
3851 priv = netdev_lower_get_next_private(dev, &(iter)))
3852
3853 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
3854 for (iter = &(dev)->adj_list.lower, \
3855 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
3856 priv; \
3857 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
3858
3859 void *netdev_lower_get_next(struct net_device *dev,
3860 struct list_head **iter);
3861
3862 #define netdev_for_each_lower_dev(dev, ldev, iter) \
3863 for (iter = (dev)->adj_list.lower.next, \
3864 ldev = netdev_lower_get_next(dev, &(iter)); \
3865 ldev; \
3866 ldev = netdev_lower_get_next(dev, &(iter)))
3867
3868 struct net_device *netdev_all_lower_get_next(struct net_device *dev,
3869 struct list_head **iter);
3870 struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
3871 struct list_head **iter);
3872
3873 #define netdev_for_each_all_lower_dev(dev, ldev, iter) \
3874 for (iter = (dev)->all_adj_list.lower.next, \
3875 ldev = netdev_all_lower_get_next(dev, &(iter)); \
3876 ldev; \
3877 ldev = netdev_all_lower_get_next(dev, &(iter)))
3878
3879 #define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
3880 for (iter = (dev)->all_adj_list.lower.next, \
3881 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
3882 ldev; \
3883 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
3884
3885 void *netdev_adjacent_get_private(struct list_head *adj_list);
3886 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
3887 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
3888 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
3889 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
3890 int netdev_master_upper_dev_link(struct net_device *dev,
3891 struct net_device *upper_dev,
3892 void *upper_priv, void *upper_info);
3893 void netdev_upper_dev_unlink(struct net_device *dev,
3894 struct net_device *upper_dev);
3895 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
3896 void *netdev_lower_dev_get_private(struct net_device *dev,
3897 struct net_device *lower_dev);
3898 void netdev_lower_state_changed(struct net_device *lower_dev,
3899 void *lower_state_info);
3900 int netdev_default_l2upper_neigh_construct(struct net_device *dev,
3901 struct neighbour *n);
3902 void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
3903 struct neighbour *n);
3904
3905 /* RSS keys are 40 or 52 bytes long */
3906 #define NETDEV_RSS_KEY_LEN 52
3907 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
3908 void netdev_rss_key_fill(void *buffer, size_t len);
3909
3910 int dev_get_nest_level(struct net_device *dev);
3911 int skb_checksum_help(struct sk_buff *skb);
3912 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3913 netdev_features_t features, bool tx_path);
3914 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3915 netdev_features_t features);
3916
3917 struct netdev_bonding_info {
3918 ifslave slave;
3919 ifbond master;
3920 };
3921
3922 struct netdev_notifier_bonding_info {
3923 struct netdev_notifier_info info; /* must be first */
3924 struct netdev_bonding_info bonding_info;
3925 };
3926
3927 void netdev_bonding_info_change(struct net_device *dev,
3928 struct netdev_bonding_info *bonding_info);
3929
3930 static inline
3931 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
3932 {
3933 return __skb_gso_segment(skb, features, true);
3934 }
3935 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
3936
3937 static inline bool can_checksum_protocol(netdev_features_t features,
3938 __be16 protocol)
3939 {
3940 if (protocol == htons(ETH_P_FCOE))
3941 return !!(features & NETIF_F_FCOE_CRC);
3942
3943 /* Assume this is an IP checksum (not SCTP CRC) */
3944
3945 if (features & NETIF_F_HW_CSUM) {
3946 /* Can checksum everything */
3947 return true;
3948 }
3949
3950 switch (protocol) {
3951 case htons(ETH_P_IP):
3952 return !!(features & NETIF_F_IP_CSUM);
3953 case htons(ETH_P_IPV6):
3954 return !!(features & NETIF_F_IPV6_CSUM);
3955 default:
3956 return false;
3957 }
3958 }
3959
3960 /* Map an ethertype into IP protocol if possible */
3961 static inline int eproto_to_ipproto(int eproto)
3962 {
3963 switch (eproto) {
3964 case htons(ETH_P_IP):
3965 return IPPROTO_IP;
3966 case htons(ETH_P_IPV6):
3967 return IPPROTO_IPV6;
3968 default:
3969 return -1;
3970 }
3971 }
3972
3973 #ifdef CONFIG_BUG
3974 void netdev_rx_csum_fault(struct net_device *dev);
3975 #else
3976 static inline void netdev_rx_csum_fault(struct net_device *dev)
3977 {
3978 }
3979 #endif
3980 /* rx skb timestamps */
3981 void net_enable_timestamp(void);
3982 void net_disable_timestamp(void);
3983
3984 #ifdef CONFIG_PROC_FS
3985 int __init dev_proc_init(void);
3986 #else
3987 #define dev_proc_init() 0
3988 #endif
3989
3990 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
3991 struct sk_buff *skb, struct net_device *dev,
3992 bool more)
3993 {
3994 skb->xmit_more = more ? 1 : 0;
3995 return ops->ndo_start_xmit(skb, dev);
3996 }
3997
3998 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
3999 struct netdev_queue *txq, bool more)
4000 {
4001 const struct net_device_ops *ops = dev->netdev_ops;
4002 int rc;
4003
4004 rc = __netdev_start_xmit(ops, skb, dev, more);
4005 if (rc == NETDEV_TX_OK)
4006 txq_trans_update(txq);
4007
4008 return rc;
4009 }
4010
4011 int netdev_class_create_file_ns(struct class_attribute *class_attr,
4012 const void *ns);
4013 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
4014 const void *ns);
4015
4016 static inline int netdev_class_create_file(struct class_attribute *class_attr)
4017 {
4018 return netdev_class_create_file_ns(class_attr, NULL);
4019 }
4020
4021 static inline void netdev_class_remove_file(struct class_attribute *class_attr)
4022 {
4023 netdev_class_remove_file_ns(class_attr, NULL);
4024 }
4025
4026 extern struct kobj_ns_type_operations net_ns_type_operations;
4027
4028 const char *netdev_drivername(const struct net_device *dev);
4029
4030 void linkwatch_run_queue(void);
4031
4032 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
4033 netdev_features_t f2)
4034 {
4035 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
4036 if (f1 & NETIF_F_HW_CSUM)
4037 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4038 else
4039 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4040 }
4041
4042 return f1 & f2;
4043 }
4044
4045 static inline netdev_features_t netdev_get_wanted_features(
4046 struct net_device *dev)
4047 {
4048 return (dev->features & ~dev->hw_features) | dev->wanted_features;
4049 }
4050 netdev_features_t netdev_increment_features(netdev_features_t all,
4051 netdev_features_t one, netdev_features_t mask);
4052
4053 /* Allow TSO being used on stacked device :
4054 * Performing the GSO segmentation before last device
4055 * is a performance improvement.
4056 */
4057 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
4058 netdev_features_t mask)
4059 {
4060 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
4061 }
4062
4063 int __netdev_update_features(struct net_device *dev);
4064 void netdev_update_features(struct net_device *dev);
4065 void netdev_change_features(struct net_device *dev);
4066
4067 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4068 struct net_device *dev);
4069
4070 netdev_features_t passthru_features_check(struct sk_buff *skb,
4071 struct net_device *dev,
4072 netdev_features_t features);
4073 netdev_features_t netif_skb_features(struct sk_buff *skb);
4074
4075 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4076 {
4077 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
4078
4079 /* check flags correspondence */
4080 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
4081 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
4082 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
4083 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
4084 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
4085 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
4086 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
4087 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
4088 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
4089 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
4090 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
4091 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
4092 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
4093 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
4094 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
4095 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
4096
4097 return (features & feature) == feature;
4098 }
4099
4100 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
4101 {
4102 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
4103 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
4104 }
4105
4106 static inline bool netif_needs_gso(struct sk_buff *skb,
4107 netdev_features_t features)
4108 {
4109 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
4110 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
4111 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
4112 }
4113
4114 static inline void netif_set_gso_max_size(struct net_device *dev,
4115 unsigned int size)
4116 {
4117 dev->gso_max_size = size;
4118 }
4119
4120 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
4121 int pulled_hlen, u16 mac_offset,
4122 int mac_len)
4123 {
4124 skb->protocol = protocol;
4125 skb->encapsulation = 1;
4126 skb_push(skb, pulled_hlen);
4127 skb_reset_transport_header(skb);
4128 skb->mac_header = mac_offset;
4129 skb->network_header = skb->mac_header + mac_len;
4130 skb->mac_len = mac_len;
4131 }
4132
4133 static inline bool netif_is_macsec(const struct net_device *dev)
4134 {
4135 return dev->priv_flags & IFF_MACSEC;
4136 }
4137
4138 static inline bool netif_is_macvlan(const struct net_device *dev)
4139 {
4140 return dev->priv_flags & IFF_MACVLAN;
4141 }
4142
4143 static inline bool netif_is_macvlan_port(const struct net_device *dev)
4144 {
4145 return dev->priv_flags & IFF_MACVLAN_PORT;
4146 }
4147
4148 static inline bool netif_is_ipvlan(const struct net_device *dev)
4149 {
4150 return dev->priv_flags & IFF_IPVLAN_SLAVE;
4151 }
4152
4153 static inline bool netif_is_ipvlan_port(const struct net_device *dev)
4154 {
4155 return dev->priv_flags & IFF_IPVLAN_MASTER;
4156 }
4157
4158 static inline bool netif_is_bond_master(const struct net_device *dev)
4159 {
4160 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
4161 }
4162
4163 static inline bool netif_is_bond_slave(const struct net_device *dev)
4164 {
4165 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
4166 }
4167
4168 static inline bool netif_supports_nofcs(struct net_device *dev)
4169 {
4170 return dev->priv_flags & IFF_SUPP_NOFCS;
4171 }
4172
4173 static inline bool netif_is_l3_master(const struct net_device *dev)
4174 {
4175 return dev->priv_flags & IFF_L3MDEV_MASTER;
4176 }
4177
4178 static inline bool netif_is_l3_slave(const struct net_device *dev)
4179 {
4180 return dev->priv_flags & IFF_L3MDEV_SLAVE;
4181 }
4182
4183 static inline bool netif_is_bridge_master(const struct net_device *dev)
4184 {
4185 return dev->priv_flags & IFF_EBRIDGE;
4186 }
4187
4188 static inline bool netif_is_bridge_port(const struct net_device *dev)
4189 {
4190 return dev->priv_flags & IFF_BRIDGE_PORT;
4191 }
4192
4193 static inline bool netif_is_ovs_master(const struct net_device *dev)
4194 {
4195 return dev->priv_flags & IFF_OPENVSWITCH;
4196 }
4197
4198 static inline bool netif_is_team_master(const struct net_device *dev)
4199 {
4200 return dev->priv_flags & IFF_TEAM;
4201 }
4202
4203 static inline bool netif_is_team_port(const struct net_device *dev)
4204 {
4205 return dev->priv_flags & IFF_TEAM_PORT;
4206 }
4207
4208 static inline bool netif_is_lag_master(const struct net_device *dev)
4209 {
4210 return netif_is_bond_master(dev) || netif_is_team_master(dev);
4211 }
4212
4213 static inline bool netif_is_lag_port(const struct net_device *dev)
4214 {
4215 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
4216 }
4217
4218 static inline bool netif_is_rxfh_configured(const struct net_device *dev)
4219 {
4220 return dev->priv_flags & IFF_RXFH_CONFIGURED;
4221 }
4222
4223 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
4224 static inline void netif_keep_dst(struct net_device *dev)
4225 {
4226 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
4227 }
4228
4229 /* return true if dev can't cope with mtu frames that need vlan tag insertion */
4230 static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
4231 {
4232 /* TODO: reserve and use an additional IFF bit, if we get more users */
4233 return dev->priv_flags & IFF_MACSEC;
4234 }
4235
4236 extern struct pernet_operations __net_initdata loopback_net_ops;
4237
4238 /* Logging, debugging and troubleshooting/diagnostic helpers. */
4239
4240 /* netdev_printk helpers, similar to dev_printk */
4241
4242 static inline const char *netdev_name(const struct net_device *dev)
4243 {
4244 if (!dev->name[0] || strchr(dev->name, '%'))
4245 return "(unnamed net_device)";
4246 return dev->name;
4247 }
4248
4249 static inline const char *netdev_reg_state(const struct net_device *dev)
4250 {
4251 switch (dev->reg_state) {
4252 case NETREG_UNINITIALIZED: return " (uninitialized)";
4253 case NETREG_REGISTERED: return "";
4254 case NETREG_UNREGISTERING: return " (unregistering)";
4255 case NETREG_UNREGISTERED: return " (unregistered)";
4256 case NETREG_RELEASED: return " (released)";
4257 case NETREG_DUMMY: return " (dummy)";
4258 }
4259
4260 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
4261 return " (unknown)";
4262 }
4263
4264 __printf(3, 4)
4265 void netdev_printk(const char *level, const struct net_device *dev,
4266 const char *format, ...);
4267 __printf(2, 3)
4268 void netdev_emerg(const struct net_device *dev, const char *format, ...);
4269 __printf(2, 3)
4270 void netdev_alert(const struct net_device *dev, const char *format, ...);
4271 __printf(2, 3)
4272 void netdev_crit(const struct net_device *dev, const char *format, ...);
4273 __printf(2, 3)
4274 void netdev_err(const struct net_device *dev, const char *format, ...);
4275 __printf(2, 3)
4276 void netdev_warn(const struct net_device *dev, const char *format, ...);
4277 __printf(2, 3)
4278 void netdev_notice(const struct net_device *dev, const char *format, ...);
4279 __printf(2, 3)
4280 void netdev_info(const struct net_device *dev, const char *format, ...);
4281
4282 #define MODULE_ALIAS_NETDEV(device) \
4283 MODULE_ALIAS("netdev-" device)
4284
4285 #if defined(CONFIG_DYNAMIC_DEBUG)
4286 #define netdev_dbg(__dev, format, args...) \
4287 do { \
4288 dynamic_netdev_dbg(__dev, format, ##args); \
4289 } while (0)
4290 #elif defined(DEBUG)
4291 #define netdev_dbg(__dev, format, args...) \
4292 netdev_printk(KERN_DEBUG, __dev, format, ##args)
4293 #else
4294 #define netdev_dbg(__dev, format, args...) \
4295 ({ \
4296 if (0) \
4297 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
4298 })
4299 #endif
4300
4301 #if defined(VERBOSE_DEBUG)
4302 #define netdev_vdbg netdev_dbg
4303 #else
4304
4305 #define netdev_vdbg(dev, format, args...) \
4306 ({ \
4307 if (0) \
4308 netdev_printk(KERN_DEBUG, dev, format, ##args); \
4309 0; \
4310 })
4311 #endif
4312
4313 /*
4314 * netdev_WARN() acts like dev_printk(), but with the key difference
4315 * of using a WARN/WARN_ON to get the message out, including the
4316 * file/line information and a backtrace.
4317 */
4318 #define netdev_WARN(dev, format, args...) \
4319 WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \
4320 netdev_reg_state(dev), ##args)
4321
4322 /* netif printk helpers, similar to netdev_printk */
4323
4324 #define netif_printk(priv, type, level, dev, fmt, args...) \
4325 do { \
4326 if (netif_msg_##type(priv)) \
4327 netdev_printk(level, (dev), fmt, ##args); \
4328 } while (0)
4329
4330 #define netif_level(level, priv, type, dev, fmt, args...) \
4331 do { \
4332 if (netif_msg_##type(priv)) \
4333 netdev_##level(dev, fmt, ##args); \
4334 } while (0)
4335
4336 #define netif_emerg(priv, type, dev, fmt, args...) \
4337 netif_level(emerg, priv, type, dev, fmt, ##args)
4338 #define netif_alert(priv, type, dev, fmt, args...) \
4339 netif_level(alert, priv, type, dev, fmt, ##args)
4340 #define netif_crit(priv, type, dev, fmt, args...) \
4341 netif_level(crit, priv, type, dev, fmt, ##args)
4342 #define netif_err(priv, type, dev, fmt, args...) \
4343 netif_level(err, priv, type, dev, fmt, ##args)
4344 #define netif_warn(priv, type, dev, fmt, args...) \
4345 netif_level(warn, priv, type, dev, fmt, ##args)
4346 #define netif_notice(priv, type, dev, fmt, args...) \
4347 netif_level(notice, priv, type, dev, fmt, ##args)
4348 #define netif_info(priv, type, dev, fmt, args...) \
4349 netif_level(info, priv, type, dev, fmt, ##args)
4350
4351 #if defined(CONFIG_DYNAMIC_DEBUG)
4352 #define netif_dbg(priv, type, netdev, format, args...) \
4353 do { \
4354 if (netif_msg_##type(priv)) \
4355 dynamic_netdev_dbg(netdev, format, ##args); \
4356 } while (0)
4357 #elif defined(DEBUG)
4358 #define netif_dbg(priv, type, dev, format, args...) \
4359 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
4360 #else
4361 #define netif_dbg(priv, type, dev, format, args...) \
4362 ({ \
4363 if (0) \
4364 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4365 0; \
4366 })
4367 #endif
4368
4369 #if defined(VERBOSE_DEBUG)
4370 #define netif_vdbg netif_dbg
4371 #else
4372 #define netif_vdbg(priv, type, dev, format, args...) \
4373 ({ \
4374 if (0) \
4375 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4376 0; \
4377 })
4378 #endif
4379
4380 /*
4381 * The list of packet types we will receive (as opposed to discard)
4382 * and the routines to invoke.
4383 *
4384 * Why 16. Because with 16 the only overlap we get on a hash of the
4385 * low nibble of the protocol value is RARP/SNAP/X.25.
4386 *
4387 * NOTE: That is no longer true with the addition of VLAN tags. Not
4388 * sure which should go first, but I bet it won't make much
4389 * difference if we are running VLANs. The good news is that
4390 * this protocol won't be in the list unless compiled in, so
4391 * the average user (w/out VLANs) will not be adversely affected.
4392 * --BLG
4393 *
4394 * 0800 IP
4395 * 8100 802.1Q VLAN
4396 * 0001 802.3
4397 * 0002 AX.25
4398 * 0004 802.2
4399 * 8035 RARP
4400 * 0005 SNAP
4401 * 0805 X.25
4402 * 0806 ARP
4403 * 8137 IPX
4404 * 0009 Localtalk
4405 * 86DD IPv6
4406 */
4407 #define PTYPE_HASH_SIZE (16)
4408 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
4409
4410 #endif /* _LINUX_NETDEVICE_H */