2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the Interfaces handler.
8 * Version: @(#)dev.h 1.0.10 08/12/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
23 * Moved to /usr/include/linux for NET3
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
28 #include <linux/pm_qos.h>
29 #include <linux/timer.h>
30 #include <linux/bug.h>
31 #include <linux/delay.h>
32 #include <linux/atomic.h>
33 #include <asm/cache.h>
34 #include <asm/byteorder.h>
36 #include <linux/percpu.h>
37 #include <linux/rculist.h>
38 #include <linux/dmaengine.h>
39 #include <linux/workqueue.h>
40 #include <linux/dynamic_queue_limits.h>
42 #include <linux/ethtool.h>
43 #include <net/net_namespace.h>
46 #include <net/dcbnl.h>
48 #include <net/netprio_cgroup.h>
50 #include <linux/netdev_features.h>
51 #include <linux/neighbour.h>
52 #include <uapi/linux/netdevice.h>
59 /* source back-compat hooks */
60 #define SET_ETHTOOL_OPS(netdev,ops) \
61 ( (netdev)->ethtool_ops = (ops) )
63 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
64 const struct ethtool_ops
*ops
);
66 /* hardware address assignment types */
67 #define NET_ADDR_PERM 0 /* address is permanent (default) */
68 #define NET_ADDR_RANDOM 1 /* address is generated randomly */
69 #define NET_ADDR_STOLEN 2 /* address is stolen from other device */
70 #define NET_ADDR_SET 3 /* address is set using
71 * dev_set_mac_address() */
73 /* Backlog congestion levels */
74 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
75 #define NET_RX_DROP 1 /* packet dropped */
78 * Transmit return codes: transmit return codes originate from three different
81 * - qdisc return codes
82 * - driver transmit return codes
85 * Drivers are allowed to return any one of those in their hard_start_xmit()
86 * function. Real network devices commonly used with qdiscs should only return
87 * the driver transmit return codes though - when qdiscs are used, the actual
88 * transmission happens asynchronously, so the value is not propagated to
89 * higher layers. Virtual network devices transmit synchronously, in this case
90 * the driver transmit return codes are consumed by dev_queue_xmit(), all
91 * others are propagated to higher layers.
94 /* qdisc ->enqueue() return codes. */
95 #define NET_XMIT_SUCCESS 0x00
96 #define NET_XMIT_DROP 0x01 /* skb dropped */
97 #define NET_XMIT_CN 0x02 /* congestion notification */
98 #define NET_XMIT_POLICED 0x03 /* skb is shot by police */
99 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
101 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
102 * indicates that the device will soon be dropping packets, or already drops
103 * some packets of the same priority; prompting us to send less aggressively. */
104 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
105 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
107 /* Driver transmit return codes */
108 #define NETDEV_TX_MASK 0xf0
111 __NETDEV_TX_MIN
= INT_MIN
, /* make sure enum is signed */
112 NETDEV_TX_OK
= 0x00, /* driver took care of packet */
113 NETDEV_TX_BUSY
= 0x10, /* driver tx path was busy*/
114 NETDEV_TX_LOCKED
= 0x20, /* driver tx lock was already taken */
116 typedef enum netdev_tx netdev_tx_t
;
119 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
120 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
122 static inline bool dev_xmit_complete(int rc
)
125 * Positive cases with an skb consumed by a driver:
126 * - successful transmission (rc == NETDEV_TX_OK)
127 * - error while transmitting (rc < 0)
128 * - error while queueing to a different device (rc & NET_XMIT_MASK)
130 if (likely(rc
< NET_XMIT_MASK
))
137 * Compute the worst case header length according to the protocols
141 #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
142 # if defined(CONFIG_MAC80211_MESH)
143 # define LL_MAX_HEADER 128
145 # define LL_MAX_HEADER 96
148 # define LL_MAX_HEADER 32
151 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
152 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
153 #define MAX_HEADER LL_MAX_HEADER
155 #define MAX_HEADER (LL_MAX_HEADER + 48)
159 * Old network device statistics. Fields are native words
160 * (unsigned long) so they can be read and written atomically.
163 struct net_device_stats
{
164 unsigned long rx_packets
;
165 unsigned long tx_packets
;
166 unsigned long rx_bytes
;
167 unsigned long tx_bytes
;
168 unsigned long rx_errors
;
169 unsigned long tx_errors
;
170 unsigned long rx_dropped
;
171 unsigned long tx_dropped
;
172 unsigned long multicast
;
173 unsigned long collisions
;
174 unsigned long rx_length_errors
;
175 unsigned long rx_over_errors
;
176 unsigned long rx_crc_errors
;
177 unsigned long rx_frame_errors
;
178 unsigned long rx_fifo_errors
;
179 unsigned long rx_missed_errors
;
180 unsigned long tx_aborted_errors
;
181 unsigned long tx_carrier_errors
;
182 unsigned long tx_fifo_errors
;
183 unsigned long tx_heartbeat_errors
;
184 unsigned long tx_window_errors
;
185 unsigned long rx_compressed
;
186 unsigned long tx_compressed
;
190 #include <linux/cache.h>
191 #include <linux/skbuff.h>
194 #include <linux/static_key.h>
195 extern struct static_key rps_needed
;
202 struct netdev_hw_addr
{
203 struct list_head list
;
204 unsigned char addr
[MAX_ADDR_LEN
];
206 #define NETDEV_HW_ADDR_T_LAN 1
207 #define NETDEV_HW_ADDR_T_SAN 2
208 #define NETDEV_HW_ADDR_T_SLAVE 3
209 #define NETDEV_HW_ADDR_T_UNICAST 4
210 #define NETDEV_HW_ADDR_T_MULTICAST 5
215 struct rcu_head rcu_head
;
218 struct netdev_hw_addr_list
{
219 struct list_head list
;
223 #define netdev_hw_addr_list_count(l) ((l)->count)
224 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
225 #define netdev_hw_addr_list_for_each(ha, l) \
226 list_for_each_entry(ha, &(l)->list, list)
228 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
229 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
230 #define netdev_for_each_uc_addr(ha, dev) \
231 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
233 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
234 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
235 #define netdev_for_each_mc_addr(ha, dev) \
236 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
243 /* cached hardware header; allow for machine alignment needs. */
244 #define HH_DATA_MOD 16
245 #define HH_DATA_OFF(__len) \
246 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
247 #define HH_DATA_ALIGN(__len) \
248 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
249 unsigned long hh_data
[HH_DATA_ALIGN(LL_MAX_HEADER
) / sizeof(long)];
252 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
254 * dev->hard_header_len ? (dev->hard_header_len +
255 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
257 * We could use other alignment values, but we must maintain the
258 * relationship HH alignment <= LL alignment.
260 #define LL_RESERVED_SPACE(dev) \
261 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
262 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
263 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
266 int (*create
) (struct sk_buff
*skb
, struct net_device
*dev
,
267 unsigned short type
, const void *daddr
,
268 const void *saddr
, unsigned int len
);
269 int (*parse
)(const struct sk_buff
*skb
, unsigned char *haddr
);
270 int (*rebuild
)(struct sk_buff
*skb
);
271 int (*cache
)(const struct neighbour
*neigh
, struct hh_cache
*hh
, __be16 type
);
272 void (*cache_update
)(struct hh_cache
*hh
,
273 const struct net_device
*dev
,
274 const unsigned char *haddr
);
277 /* These flag bits are private to the generic network queueing
278 * layer, they may not be explicitly referenced by any other
282 enum netdev_state_t
{
284 __LINK_STATE_PRESENT
,
285 __LINK_STATE_NOCARRIER
,
286 __LINK_STATE_LINKWATCH_PENDING
,
287 __LINK_STATE_DORMANT
,
292 * This structure holds at boot time configured netdevice settings. They
293 * are then used in the device probing.
295 struct netdev_boot_setup
{
299 #define NETDEV_BOOT_SETUP_MAX 8
301 int __init
netdev_boot_setup(char *str
);
304 * Structure for NAPI scheduling similar to tasklet but with weighting
307 /* The poll_list must only be managed by the entity which
308 * changes the state of the NAPI_STATE_SCHED bit. This means
309 * whoever atomically sets that bit can add this napi_struct
310 * to the per-cpu poll_list, and whoever clears that bit
311 * can remove from the list right before clearing the bit.
313 struct list_head poll_list
;
317 unsigned int gro_count
;
318 int (*poll
)(struct napi_struct
*, int);
319 #ifdef CONFIG_NETPOLL
320 spinlock_t poll_lock
;
323 struct net_device
*dev
;
324 struct sk_buff
*gro_list
;
326 struct list_head dev_list
;
327 struct hlist_node napi_hash_node
;
328 unsigned int napi_id
;
332 NAPI_STATE_SCHED
, /* Poll is scheduled */
333 NAPI_STATE_DISABLE
, /* Disable pending */
334 NAPI_STATE_NPSVC
, /* Netpoll - don't dequeue from poll_list */
335 NAPI_STATE_HASHED
, /* In NAPI hash */
345 typedef enum gro_result gro_result_t
;
348 * enum rx_handler_result - Possible return values for rx_handlers.
349 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
351 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
352 * case skb->dev was changed by rx_handler.
353 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
354 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
356 * rx_handlers are functions called from inside __netif_receive_skb(), to do
357 * special processing of the skb, prior to delivery to protocol handlers.
359 * Currently, a net_device can only have a single rx_handler registered. Trying
360 * to register a second rx_handler will return -EBUSY.
362 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
363 * To unregister a rx_handler on a net_device, use
364 * netdev_rx_handler_unregister().
366 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
369 * If the rx_handler consumed to skb in some way, it should return
370 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
371 * the skb to be delivered in some other ways.
373 * If the rx_handler changed skb->dev, to divert the skb to another
374 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
375 * new device will be called if it exists.
377 * If the rx_handler consider the skb should be ignored, it should return
378 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
379 * are registered on exact device (ptype->dev == skb->dev).
381 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
382 * delivered, it should return RX_HANDLER_PASS.
384 * A device without a registered rx_handler will behave as if rx_handler
385 * returned RX_HANDLER_PASS.
388 enum rx_handler_result
{
394 typedef enum rx_handler_result rx_handler_result_t
;
395 typedef rx_handler_result_t
rx_handler_func_t(struct sk_buff
**pskb
);
397 void __napi_schedule(struct napi_struct
*n
);
399 static inline bool napi_disable_pending(struct napi_struct
*n
)
401 return test_bit(NAPI_STATE_DISABLE
, &n
->state
);
405 * napi_schedule_prep - check if napi can be scheduled
408 * Test if NAPI routine is already running, and if not mark
409 * it as running. This is used as a condition variable
410 * insure only one NAPI poll instance runs. We also make
411 * sure there is no pending NAPI disable.
413 static inline bool napi_schedule_prep(struct napi_struct
*n
)
415 return !napi_disable_pending(n
) &&
416 !test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
);
420 * napi_schedule - schedule NAPI poll
423 * Schedule NAPI poll routine to be called if it is not already
426 static inline void napi_schedule(struct napi_struct
*n
)
428 if (napi_schedule_prep(n
))
432 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
433 static inline bool napi_reschedule(struct napi_struct
*napi
)
435 if (napi_schedule_prep(napi
)) {
436 __napi_schedule(napi
);
443 * napi_complete - NAPI processing complete
446 * Mark NAPI processing as complete.
448 void __napi_complete(struct napi_struct
*n
);
449 void napi_complete(struct napi_struct
*n
);
452 * napi_by_id - lookup a NAPI by napi_id
453 * @napi_id: hashed napi_id
455 * lookup @napi_id in napi_hash table
456 * must be called under rcu_read_lock()
458 struct napi_struct
*napi_by_id(unsigned int napi_id
);
461 * napi_hash_add - add a NAPI to global hashtable
462 * @napi: napi context
464 * generate a new napi_id and store a @napi under it in napi_hash
466 void napi_hash_add(struct napi_struct
*napi
);
469 * napi_hash_del - remove a NAPI from global table
470 * @napi: napi context
472 * Warning: caller must observe rcu grace period
473 * before freeing memory containing @napi
475 void napi_hash_del(struct napi_struct
*napi
);
478 * napi_disable - prevent NAPI from scheduling
481 * Stop NAPI from being scheduled on this context.
482 * Waits till any outstanding processing completes.
484 static inline void napi_disable(struct napi_struct
*n
)
487 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
488 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
490 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
494 * napi_enable - enable NAPI scheduling
497 * Resume NAPI from being scheduled on this context.
498 * Must be paired with napi_disable.
500 static inline void napi_enable(struct napi_struct
*n
)
502 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &n
->state
));
503 smp_mb__before_clear_bit();
504 clear_bit(NAPI_STATE_SCHED
, &n
->state
);
509 * napi_synchronize - wait until NAPI is not running
512 * Wait until NAPI is done being scheduled on this context.
513 * Waits till any outstanding processing completes but
514 * does not disable future activations.
516 static inline void napi_synchronize(const struct napi_struct
*n
)
518 while (test_bit(NAPI_STATE_SCHED
, &n
->state
))
522 # define napi_synchronize(n) barrier()
525 enum netdev_queue_state_t
{
526 __QUEUE_STATE_DRV_XOFF
,
527 __QUEUE_STATE_STACK_XOFF
,
528 __QUEUE_STATE_FROZEN
,
529 #define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
530 (1 << __QUEUE_STATE_STACK_XOFF))
531 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
532 (1 << __QUEUE_STATE_FROZEN))
535 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
536 * netif_tx_* functions below are used to manipulate this flag. The
537 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
538 * queue independently. The netif_xmit_*stopped functions below are called
539 * to check if the queue has been stopped by the driver or stack (either
540 * of the XOFF bits are set in the state). Drivers should not need to call
541 * netif_xmit*stopped functions, they should only be using netif_tx_*.
544 struct netdev_queue
{
548 struct net_device
*dev
;
550 struct Qdisc
*qdisc_sleeping
;
554 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
560 spinlock_t _xmit_lock ____cacheline_aligned_in_smp
;
563 * please use this field instead of dev->trans_start
565 unsigned long trans_start
;
568 * Number of TX timeouts for this queue
569 * (/sys/class/net/DEV/Q/trans_timeout)
571 unsigned long trans_timeout
;
578 } ____cacheline_aligned_in_smp
;
580 static inline int netdev_queue_numa_node_read(const struct netdev_queue
*q
)
582 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
589 static inline void netdev_queue_numa_node_write(struct netdev_queue
*q
, int node
)
591 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
598 * This structure holds an RPS map which can be of variable length. The
599 * map is an array of CPUs.
606 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
609 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
610 * tail pointer for that CPU's input queue at the time of last enqueue, and
611 * a hardware filter index.
613 struct rps_dev_flow
{
616 unsigned int last_qtail
;
618 #define RPS_NO_FILTER 0xffff
621 * The rps_dev_flow_table structure contains a table of flow mappings.
623 struct rps_dev_flow_table
{
626 struct rps_dev_flow flows
[0];
628 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
629 ((_num) * sizeof(struct rps_dev_flow)))
632 * The rps_sock_flow_table contains mappings of flows to the last CPU
633 * on which they were processed by the application (set in recvmsg).
635 struct rps_sock_flow_table
{
639 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
640 ((_num) * sizeof(u16)))
642 #define RPS_NO_CPU 0xffff
644 static inline void rps_record_sock_flow(struct rps_sock_flow_table
*table
,
648 unsigned int cpu
, index
= hash
& table
->mask
;
650 /* We only give a hint, preemption can change cpu under us */
651 cpu
= raw_smp_processor_id();
653 if (table
->ents
[index
] != cpu
)
654 table
->ents
[index
] = cpu
;
658 static inline void rps_reset_sock_flow(struct rps_sock_flow_table
*table
,
662 table
->ents
[hash
& table
->mask
] = RPS_NO_CPU
;
665 extern struct rps_sock_flow_table __rcu
*rps_sock_flow_table
;
667 #ifdef CONFIG_RFS_ACCEL
668 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
, u32 flow_id
,
671 #endif /* CONFIG_RPS */
673 /* This structure contains an instance of an RX queue. */
674 struct netdev_rx_queue
{
676 struct rps_map __rcu
*rps_map
;
677 struct rps_dev_flow_table __rcu
*rps_flow_table
;
680 struct net_device
*dev
;
681 } ____cacheline_aligned_in_smp
;
684 * RX queue sysfs structures and functions.
686 struct rx_queue_attribute
{
687 struct attribute attr
;
688 ssize_t (*show
)(struct netdev_rx_queue
*queue
,
689 struct rx_queue_attribute
*attr
, char *buf
);
690 ssize_t (*store
)(struct netdev_rx_queue
*queue
,
691 struct rx_queue_attribute
*attr
, const char *buf
, size_t len
);
696 * This structure holds an XPS map which can be of variable length. The
697 * map is an array of queues.
701 unsigned int alloc_len
;
705 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
706 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
710 * This structure holds all XPS maps for device. Maps are indexed by CPU.
712 struct xps_dev_maps
{
714 struct xps_map __rcu
*cpu_map
[0];
716 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
717 (nr_cpu_ids * sizeof(struct xps_map *)))
718 #endif /* CONFIG_XPS */
720 #define TC_MAX_QUEUE 16
721 #define TC_BITMASK 15
722 /* HW offloaded queuing disciplines txq count and offset maps */
723 struct netdev_tc_txq
{
728 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
730 * This structure is to hold information about the device
731 * configured to run FCoE protocol stack.
733 struct netdev_fcoe_hbainfo
{
734 char manufacturer
[64];
735 char serial_number
[64];
736 char hardware_version
[64];
737 char driver_version
[64];
738 char optionrom_version
[64];
739 char firmware_version
[64];
741 char model_description
[256];
745 #define MAX_PHYS_PORT_ID_LEN 32
747 /* This structure holds a unique identifier to identify the
748 * physical port used by a netdevice.
750 struct netdev_phys_port_id
{
751 unsigned char id
[MAX_PHYS_PORT_ID_LEN
];
752 unsigned char id_len
;
756 * This structure defines the management hooks for network devices.
757 * The following hooks can be defined; unless noted otherwise, they are
758 * optional and can be filled with a null pointer.
760 * int (*ndo_init)(struct net_device *dev);
761 * This function is called once when network device is registered.
762 * The network device can use this to any late stage initializaton
763 * or semantic validattion. It can fail with an error code which will
764 * be propogated back to register_netdev
766 * void (*ndo_uninit)(struct net_device *dev);
767 * This function is called when device is unregistered or when registration
768 * fails. It is not called if init fails.
770 * int (*ndo_open)(struct net_device *dev);
771 * This function is called when network device transistions to the up
774 * int (*ndo_stop)(struct net_device *dev);
775 * This function is called when network device transistions to the down
778 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
779 * struct net_device *dev);
780 * Called when a packet needs to be transmitted.
781 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
782 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
783 * Required can not be NULL.
785 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
787 * Called to decide which queue to when device supports multiple
790 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
791 * This function is called to allow device receiver to make
792 * changes to configuration when multicast or promiscious is enabled.
794 * void (*ndo_set_rx_mode)(struct net_device *dev);
795 * This function is called device changes address list filtering.
796 * If driver handles unicast address filtering, it should set
797 * IFF_UNICAST_FLT to its priv_flags.
799 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
800 * This function is called when the Media Access Control address
801 * needs to be changed. If this interface is not defined, the
802 * mac address can not be changed.
804 * int (*ndo_validate_addr)(struct net_device *dev);
805 * Test if Media Access Control address is valid for the device.
807 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
808 * Called when a user request an ioctl which can't be handled by
809 * the generic interface code. If not defined ioctl's return
810 * not supported error code.
812 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
813 * Used to set network devices bus interface parameters. This interface
814 * is retained for legacy reason, new devices should use the bus
815 * interface (PCI) for low level management.
817 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
818 * Called when a user wants to change the Maximum Transfer Unit
819 * of a device. If not defined, any request to change MTU will
820 * will return an error.
822 * void (*ndo_tx_timeout)(struct net_device *dev);
823 * Callback uses when the transmitter has not made any progress
824 * for dev->watchdog ticks.
826 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
827 * struct rtnl_link_stats64 *storage);
828 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
829 * Called when a user wants to get the network device usage
830 * statistics. Drivers must do one of the following:
831 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
832 * rtnl_link_stats64 structure passed by the caller.
833 * 2. Define @ndo_get_stats to update a net_device_stats structure
834 * (which should normally be dev->stats) and return a pointer to
835 * it. The structure may be changed asynchronously only if each
836 * field is written atomically.
837 * 3. Update dev->stats asynchronously and atomically, and define
840 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
841 * If device support VLAN filtering this function is called when a
842 * VLAN id is registered.
844 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
845 * If device support VLAN filtering this function is called when a
846 * VLAN id is unregistered.
848 * void (*ndo_poll_controller)(struct net_device *dev);
850 * SR-IOV management functions.
851 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
852 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
853 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
854 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
855 * int (*ndo_get_vf_config)(struct net_device *dev,
856 * int vf, struct ifla_vf_info *ivf);
857 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
858 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
859 * struct nlattr *port[]);
860 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
861 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
862 * Called to setup 'tc' number of traffic classes in the net device. This
863 * is always called from the stack with the rtnl lock held and netif tx
864 * queues stopped. This allows the netdevice to perform queue management
867 * Fiber Channel over Ethernet (FCoE) offload functions.
868 * int (*ndo_fcoe_enable)(struct net_device *dev);
869 * Called when the FCoE protocol stack wants to start using LLD for FCoE
870 * so the underlying device can perform whatever needed configuration or
871 * initialization to support acceleration of FCoE traffic.
873 * int (*ndo_fcoe_disable)(struct net_device *dev);
874 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
875 * so the underlying device can perform whatever needed clean-ups to
876 * stop supporting acceleration of FCoE traffic.
878 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
879 * struct scatterlist *sgl, unsigned int sgc);
880 * Called when the FCoE Initiator wants to initialize an I/O that
881 * is a possible candidate for Direct Data Placement (DDP). The LLD can
882 * perform necessary setup and returns 1 to indicate the device is set up
883 * successfully to perform DDP on this I/O, otherwise this returns 0.
885 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
886 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
887 * indicated by the FC exchange id 'xid', so the underlying device can
888 * clean up and reuse resources for later DDP requests.
890 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
891 * struct scatterlist *sgl, unsigned int sgc);
892 * Called when the FCoE Target wants to initialize an I/O that
893 * is a possible candidate for Direct Data Placement (DDP). The LLD can
894 * perform necessary setup and returns 1 to indicate the device is set up
895 * successfully to perform DDP on this I/O, otherwise this returns 0.
897 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
898 * struct netdev_fcoe_hbainfo *hbainfo);
899 * Called when the FCoE Protocol stack wants information on the underlying
900 * device. This information is utilized by the FCoE protocol stack to
901 * register attributes with Fiber Channel management service as per the
902 * FC-GS Fabric Device Management Information(FDMI) specification.
904 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
905 * Called when the underlying device wants to override default World Wide
906 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
907 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
908 * protocol stack to use.
911 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
912 * u16 rxq_index, u32 flow_id);
913 * Set hardware filter for RFS. rxq_index is the target queue index;
914 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
915 * Return the filter ID on success, or a negative error code.
917 * Slave management functions (for bridge, bonding, etc).
918 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
919 * Called to make another netdev an underling.
921 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
922 * Called to release previously enslaved netdev.
924 * Feature/offload setting functions.
925 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
926 * netdev_features_t features);
927 * Adjusts the requested feature flags according to device-specific
928 * constraints, and returns the resulting flags. Must not modify
931 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
932 * Called to update device configuration to new features. Passed
933 * feature set might be less than what was returned by ndo_fix_features()).
934 * Must return >0 or -errno if it changed dev->features itself.
936 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
937 * struct net_device *dev,
938 * const unsigned char *addr, u16 flags)
939 * Adds an FDB entry to dev for addr.
940 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
941 * struct net_device *dev,
942 * const unsigned char *addr)
943 * Deletes the FDB entry from dev coresponding to addr.
944 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
945 * struct net_device *dev, int idx)
946 * Used to add FDB entries to dump requests. Implementers should add
947 * entries to skb and update idx with the number of entries.
949 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
950 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
951 * struct net_device *dev, u32 filter_mask)
953 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
954 * Called to change device carrier. Soft-devices (like dummy, team, etc)
955 * which do not represent real hardware may define this to allow their
956 * userspace components to manage their virtual carrier state. Devices
957 * that determine carrier state from physical hardware properties (eg
958 * network cables) or protocol-dependent mechanisms (eg
959 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
961 * int (*ndo_get_phys_port_id)(struct net_device *dev,
962 * struct netdev_phys_port_id *ppid);
963 * Called to get ID of physical port of this device. If driver does
964 * not implement this, it is assumed that the hw is not able to have
965 * multiple net devices on single physical port.
967 * void (*ndo_add_vxlan_port)(struct net_device *dev,
968 * sa_family_t sa_family, __be16 port);
969 * Called by vxlan to notiy a driver about the UDP port and socket
970 * address family that vxlan is listnening to. It is called only when
971 * a new port starts listening. The operation is protected by the
972 * vxlan_net->sock_lock.
974 * void (*ndo_del_vxlan_port)(struct net_device *dev,
975 * sa_family_t sa_family, __be16 port);
976 * Called by vxlan to notify the driver about a UDP port and socket
977 * address family that vxlan is not listening to anymore. The operation
978 * is protected by the vxlan_net->sock_lock.
980 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
981 * struct net_device *dev)
982 * Called by upper layer devices to accelerate switching or other
983 * station functionality into hardware. 'pdev is the lowerdev
984 * to use for the offload and 'dev' is the net device that will
985 * back the offload. Returns a pointer to the private structure
986 * the upper layer will maintain.
987 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
988 * Called by upper layer device to delete the station created
989 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
990 * the station and priv is the structure returned by the add
992 * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
993 * struct net_device *dev,
995 * Callback to use for xmit over the accelerated station. This
996 * is used in place of ndo_start_xmit on accelerated net
999 struct net_device_ops
{
1000 int (*ndo_init
)(struct net_device
*dev
);
1001 void (*ndo_uninit
)(struct net_device
*dev
);
1002 int (*ndo_open
)(struct net_device
*dev
);
1003 int (*ndo_stop
)(struct net_device
*dev
);
1004 netdev_tx_t (*ndo_start_xmit
) (struct sk_buff
*skb
,
1005 struct net_device
*dev
);
1006 u16 (*ndo_select_queue
)(struct net_device
*dev
,
1007 struct sk_buff
*skb
,
1009 void (*ndo_change_rx_flags
)(struct net_device
*dev
,
1011 void (*ndo_set_rx_mode
)(struct net_device
*dev
);
1012 int (*ndo_set_mac_address
)(struct net_device
*dev
,
1014 int (*ndo_validate_addr
)(struct net_device
*dev
);
1015 int (*ndo_do_ioctl
)(struct net_device
*dev
,
1016 struct ifreq
*ifr
, int cmd
);
1017 int (*ndo_set_config
)(struct net_device
*dev
,
1019 int (*ndo_change_mtu
)(struct net_device
*dev
,
1021 int (*ndo_neigh_setup
)(struct net_device
*dev
,
1022 struct neigh_parms
*);
1023 void (*ndo_tx_timeout
) (struct net_device
*dev
);
1025 struct rtnl_link_stats64
* (*ndo_get_stats64
)(struct net_device
*dev
,
1026 struct rtnl_link_stats64
*storage
);
1027 struct net_device_stats
* (*ndo_get_stats
)(struct net_device
*dev
);
1029 int (*ndo_vlan_rx_add_vid
)(struct net_device
*dev
,
1030 __be16 proto
, u16 vid
);
1031 int (*ndo_vlan_rx_kill_vid
)(struct net_device
*dev
,
1032 __be16 proto
, u16 vid
);
1033 #ifdef CONFIG_NET_POLL_CONTROLLER
1034 void (*ndo_poll_controller
)(struct net_device
*dev
);
1035 int (*ndo_netpoll_setup
)(struct net_device
*dev
,
1036 struct netpoll_info
*info
,
1038 void (*ndo_netpoll_cleanup
)(struct net_device
*dev
);
1040 #ifdef CONFIG_NET_RX_BUSY_POLL
1041 int (*ndo_busy_poll
)(struct napi_struct
*dev
);
1043 int (*ndo_set_vf_mac
)(struct net_device
*dev
,
1044 int queue
, u8
*mac
);
1045 int (*ndo_set_vf_vlan
)(struct net_device
*dev
,
1046 int queue
, u16 vlan
, u8 qos
);
1047 int (*ndo_set_vf_tx_rate
)(struct net_device
*dev
,
1049 int (*ndo_set_vf_spoofchk
)(struct net_device
*dev
,
1050 int vf
, bool setting
);
1051 int (*ndo_get_vf_config
)(struct net_device
*dev
,
1053 struct ifla_vf_info
*ivf
);
1054 int (*ndo_set_vf_link_state
)(struct net_device
*dev
,
1055 int vf
, int link_state
);
1056 int (*ndo_set_vf_port
)(struct net_device
*dev
,
1058 struct nlattr
*port
[]);
1059 int (*ndo_get_vf_port
)(struct net_device
*dev
,
1060 int vf
, struct sk_buff
*skb
);
1061 int (*ndo_setup_tc
)(struct net_device
*dev
, u8 tc
);
1062 #if IS_ENABLED(CONFIG_FCOE)
1063 int (*ndo_fcoe_enable
)(struct net_device
*dev
);
1064 int (*ndo_fcoe_disable
)(struct net_device
*dev
);
1065 int (*ndo_fcoe_ddp_setup
)(struct net_device
*dev
,
1067 struct scatterlist
*sgl
,
1069 int (*ndo_fcoe_ddp_done
)(struct net_device
*dev
,
1071 int (*ndo_fcoe_ddp_target
)(struct net_device
*dev
,
1073 struct scatterlist
*sgl
,
1075 int (*ndo_fcoe_get_hbainfo
)(struct net_device
*dev
,
1076 struct netdev_fcoe_hbainfo
*hbainfo
);
1079 #if IS_ENABLED(CONFIG_LIBFCOE)
1080 #define NETDEV_FCOE_WWNN 0
1081 #define NETDEV_FCOE_WWPN 1
1082 int (*ndo_fcoe_get_wwn
)(struct net_device
*dev
,
1083 u64
*wwn
, int type
);
1086 #ifdef CONFIG_RFS_ACCEL
1087 int (*ndo_rx_flow_steer
)(struct net_device
*dev
,
1088 const struct sk_buff
*skb
,
1092 int (*ndo_add_slave
)(struct net_device
*dev
,
1093 struct net_device
*slave_dev
);
1094 int (*ndo_del_slave
)(struct net_device
*dev
,
1095 struct net_device
*slave_dev
);
1096 netdev_features_t (*ndo_fix_features
)(struct net_device
*dev
,
1097 netdev_features_t features
);
1098 int (*ndo_set_features
)(struct net_device
*dev
,
1099 netdev_features_t features
);
1100 int (*ndo_neigh_construct
)(struct neighbour
*n
);
1101 void (*ndo_neigh_destroy
)(struct neighbour
*n
);
1103 int (*ndo_fdb_add
)(struct ndmsg
*ndm
,
1104 struct nlattr
*tb
[],
1105 struct net_device
*dev
,
1106 const unsigned char *addr
,
1108 int (*ndo_fdb_del
)(struct ndmsg
*ndm
,
1109 struct nlattr
*tb
[],
1110 struct net_device
*dev
,
1111 const unsigned char *addr
);
1112 int (*ndo_fdb_dump
)(struct sk_buff
*skb
,
1113 struct netlink_callback
*cb
,
1114 struct net_device
*dev
,
1117 int (*ndo_bridge_setlink
)(struct net_device
*dev
,
1118 struct nlmsghdr
*nlh
);
1119 int (*ndo_bridge_getlink
)(struct sk_buff
*skb
,
1121 struct net_device
*dev
,
1123 int (*ndo_bridge_dellink
)(struct net_device
*dev
,
1124 struct nlmsghdr
*nlh
);
1125 int (*ndo_change_carrier
)(struct net_device
*dev
,
1127 int (*ndo_get_phys_port_id
)(struct net_device
*dev
,
1128 struct netdev_phys_port_id
*ppid
);
1129 void (*ndo_add_vxlan_port
)(struct net_device
*dev
,
1130 sa_family_t sa_family
,
1132 void (*ndo_del_vxlan_port
)(struct net_device
*dev
,
1133 sa_family_t sa_family
,
1136 void* (*ndo_dfwd_add_station
)(struct net_device
*pdev
,
1137 struct net_device
*dev
);
1138 void (*ndo_dfwd_del_station
)(struct net_device
*pdev
,
1141 netdev_tx_t (*ndo_dfwd_start_xmit
) (struct sk_buff
*skb
,
1142 struct net_device
*dev
,
1147 * The DEVICE structure.
1148 * Actually, this whole structure is a big mistake. It mixes I/O
1149 * data with strictly "high-level" data, and it has to know about
1150 * almost every data structure used in the INET module.
1152 * FIXME: cleanup struct net_device such that network protocol info
1159 * This is the first field of the "visible" part of this structure
1160 * (i.e. as seen by users in the "Space.c" file). It is the name
1163 char name
[IFNAMSIZ
];
1165 /* device name hash chain, please keep it close to name[] */
1166 struct hlist_node name_hlist
;
1172 * I/O specific fields
1173 * FIXME: Merge these and struct ifmap into one
1175 unsigned long mem_end
; /* shared mem end */
1176 unsigned long mem_start
; /* shared mem start */
1177 unsigned long base_addr
; /* device I/O address */
1178 int irq
; /* device IRQ number */
1181 * Some hardware also needs these fields, but they are not
1182 * part of the usual set specified in Space.c.
1185 unsigned long state
;
1187 struct list_head dev_list
;
1188 struct list_head napi_list
;
1189 struct list_head unreg_list
;
1190 struct list_head close_list
;
1192 /* directly linked devices, like slaves for bonding */
1194 struct list_head upper
;
1195 struct list_head lower
;
1198 /* all linked devices, *including* neighbours */
1200 struct list_head upper
;
1201 struct list_head lower
;
1205 /* currently active device features */
1206 netdev_features_t features
;
1207 /* user-changeable features */
1208 netdev_features_t hw_features
;
1209 /* user-requested features */
1210 netdev_features_t wanted_features
;
1211 /* mask of features inheritable by VLAN devices */
1212 netdev_features_t vlan_features
;
1213 /* mask of features inherited by encapsulating devices
1214 * This field indicates what encapsulation offloads
1215 * the hardware is capable of doing, and drivers will
1216 * need to set them appropriately.
1218 netdev_features_t hw_enc_features
;
1219 /* mask of fetures inheritable by MPLS */
1220 netdev_features_t mpls_features
;
1222 /* Interface index. Unique device identifier */
1226 struct net_device_stats stats
;
1227 atomic_long_t rx_dropped
; /* dropped packets by core network
1228 * Do not use this in drivers.
1231 #ifdef CONFIG_WIRELESS_EXT
1232 /* List of functions to handle Wireless Extensions (instead of ioctl).
1233 * See <net/iw_handler.h> for details. Jean II */
1234 const struct iw_handler_def
* wireless_handlers
;
1235 /* Instance data managed by the core of Wireless Extensions. */
1236 struct iw_public_data
* wireless_data
;
1238 /* Management operations */
1239 const struct net_device_ops
*netdev_ops
;
1240 const struct ethtool_ops
*ethtool_ops
;
1241 const struct forwarding_accel_ops
*fwd_ops
;
1243 /* Hardware header description */
1244 const struct header_ops
*header_ops
;
1246 unsigned int flags
; /* interface flags (a la BSD) */
1247 unsigned int priv_flags
; /* Like 'flags' but invisible to userspace.
1248 * See if.h for definitions. */
1249 unsigned short gflags
;
1250 unsigned short padded
; /* How much padding added by alloc_netdev() */
1252 unsigned char operstate
; /* RFC2863 operstate */
1253 unsigned char link_mode
; /* mapping policy to operstate */
1255 unsigned char if_port
; /* Selectable AUI, TP,..*/
1256 unsigned char dma
; /* DMA channel */
1258 unsigned int mtu
; /* interface MTU value */
1259 unsigned short type
; /* interface hardware type */
1260 unsigned short hard_header_len
; /* hardware hdr length */
1262 /* extra head- and tailroom the hardware may need, but not in all cases
1263 * can this be guaranteed, especially tailroom. Some cases also use
1264 * LL_MAX_HEADER instead to allocate the skb.
1266 unsigned short needed_headroom
;
1267 unsigned short needed_tailroom
;
1269 /* Interface address info. */
1270 unsigned char perm_addr
[MAX_ADDR_LEN
]; /* permanent hw address */
1271 unsigned char addr_assign_type
; /* hw address assignment type */
1272 unsigned char addr_len
; /* hardware address length */
1273 unsigned short neigh_priv_len
;
1274 unsigned short dev_id
; /* Used to differentiate devices
1275 * that share the same link
1278 spinlock_t addr_list_lock
;
1279 struct netdev_hw_addr_list uc
; /* Unicast mac addresses */
1280 struct netdev_hw_addr_list mc
; /* Multicast mac addresses */
1281 struct netdev_hw_addr_list dev_addrs
; /* list of device
1285 struct kset
*queues_kset
;
1289 unsigned int promiscuity
;
1290 unsigned int allmulti
;
1293 /* Protocol specific pointers */
1295 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1296 struct vlan_info __rcu
*vlan_info
; /* VLAN info */
1298 #if IS_ENABLED(CONFIG_NET_DSA)
1299 struct dsa_switch_tree
*dsa_ptr
; /* dsa specific data */
1301 #if IS_ENABLED(CONFIG_TIPC)
1302 struct tipc_bearer __rcu
*tipc_ptr
; /* TIPC specific data */
1304 void *atalk_ptr
; /* AppleTalk link */
1305 struct in_device __rcu
*ip_ptr
; /* IPv4 specific data */
1306 struct dn_dev __rcu
*dn_ptr
; /* DECnet specific data */
1307 struct inet6_dev __rcu
*ip6_ptr
; /* IPv6 specific data */
1308 void *ax25_ptr
; /* AX.25 specific data */
1309 struct wireless_dev
*ieee80211_ptr
; /* IEEE 802.11 specific data,
1310 assign before registering */
1313 * Cache lines mostly used on receive path (including eth_type_trans())
1315 unsigned long last_rx
; /* Time of last Rx
1316 * This should not be set in
1317 * drivers, unless really needed,
1318 * because network stack (bonding)
1319 * use it if/when necessary, to
1320 * avoid dirtying this cache line.
1323 /* Interface address info used in eth_type_trans() */
1324 unsigned char *dev_addr
; /* hw address, (before bcast
1325 because most packets are
1330 struct netdev_rx_queue
*_rx
;
1332 /* Number of RX queues allocated at register_netdev() time */
1333 unsigned int num_rx_queues
;
1335 /* Number of RX queues currently active in device */
1336 unsigned int real_num_rx_queues
;
1340 rx_handler_func_t __rcu
*rx_handler
;
1341 void __rcu
*rx_handler_data
;
1343 struct netdev_queue __rcu
*ingress_queue
;
1344 unsigned char broadcast
[MAX_ADDR_LEN
]; /* hw bcast add */
1348 * Cache lines mostly used on transmit path
1350 struct netdev_queue
*_tx ____cacheline_aligned_in_smp
;
1352 /* Number of TX queues allocated at alloc_netdev_mq() time */
1353 unsigned int num_tx_queues
;
1355 /* Number of TX queues currently active in device */
1356 unsigned int real_num_tx_queues
;
1358 /* root qdisc from userspace point of view */
1359 struct Qdisc
*qdisc
;
1361 unsigned long tx_queue_len
; /* Max frames per queue allowed */
1362 spinlock_t tx_global_lock
;
1365 struct xps_dev_maps __rcu
*xps_maps
;
1367 #ifdef CONFIG_RFS_ACCEL
1368 /* CPU reverse-mapping for RX completion interrupts, indexed
1369 * by RX queue number. Assigned by driver. This must only be
1370 * set if the ndo_rx_flow_steer operation is defined. */
1371 struct cpu_rmap
*rx_cpu_rmap
;
1374 /* These may be needed for future network-power-down code. */
1377 * trans_start here is expensive for high speed devices on SMP,
1378 * please use netdev_queue->trans_start instead.
1380 unsigned long trans_start
; /* Time (in jiffies) of last Tx */
1382 int watchdog_timeo
; /* used by dev_watchdog() */
1383 struct timer_list watchdog_timer
;
1385 /* Number of references to this device */
1386 int __percpu
*pcpu_refcnt
;
1388 /* delayed register/unregister */
1389 struct list_head todo_list
;
1390 /* device index hash chain */
1391 struct hlist_node index_hlist
;
1393 struct list_head link_watch_list
;
1395 /* register/unregister state machine */
1396 enum { NETREG_UNINITIALIZED
=0,
1397 NETREG_REGISTERED
, /* completed register_netdevice */
1398 NETREG_UNREGISTERING
, /* called unregister_netdevice */
1399 NETREG_UNREGISTERED
, /* completed unregister todo */
1400 NETREG_RELEASED
, /* called free_netdev */
1401 NETREG_DUMMY
, /* dummy device for NAPI poll */
1404 bool dismantle
; /* device is going do be freed */
1407 RTNL_LINK_INITIALIZED
,
1408 RTNL_LINK_INITIALIZING
,
1409 } rtnl_link_state
:16;
1411 /* Called from unregister, can be used to call free_netdev */
1412 void (*destructor
)(struct net_device
*dev
);
1414 #ifdef CONFIG_NETPOLL
1415 struct netpoll_info __rcu
*npinfo
;
1418 #ifdef CONFIG_NET_NS
1419 /* Network namespace this network device is inside */
1423 /* mid-layer private */
1426 struct pcpu_lstats __percpu
*lstats
; /* loopback stats */
1427 struct pcpu_sw_netstats __percpu
*tstats
;
1428 struct pcpu_dstats __percpu
*dstats
; /* dummy stats */
1429 struct pcpu_vstats __percpu
*vstats
; /* veth stats */
1432 struct garp_port __rcu
*garp_port
;
1434 struct mrp_port __rcu
*mrp_port
;
1436 /* class/net/name entry */
1438 /* space for optional device, statistics, and wireless sysfs groups */
1439 const struct attribute_group
*sysfs_groups
[4];
1440 /* space for optional per-rx queue attributes */
1441 const struct attribute_group
*sysfs_rx_queue_group
;
1443 /* rtnetlink link ops */
1444 const struct rtnl_link_ops
*rtnl_link_ops
;
1446 /* for setting kernel sock attribute on TCP connection setup */
1447 #define GSO_MAX_SIZE 65536
1448 unsigned int gso_max_size
;
1449 #define GSO_MAX_SEGS 65535
1453 /* Data Center Bridging netlink ops */
1454 const struct dcbnl_rtnl_ops
*dcbnl_ops
;
1457 struct netdev_tc_txq tc_to_txq
[TC_MAX_QUEUE
];
1458 u8 prio_tc_map
[TC_BITMASK
+ 1];
1460 #if IS_ENABLED(CONFIG_FCOE)
1461 /* max exchange id for FCoE LRO by ddp */
1462 unsigned int fcoe_ddp_xid
;
1464 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1465 struct netprio_map __rcu
*priomap
;
1467 /* phy device may attach itself for hardware timestamping */
1468 struct phy_device
*phydev
;
1470 struct lock_class_key
*qdisc_tx_busylock
;
1472 /* group the device belongs to */
1475 struct pm_qos_request pm_qos_req
;
1477 #define to_net_dev(d) container_of(d, struct net_device, dev)
1479 #define NETDEV_ALIGN 32
1482 int netdev_get_prio_tc_map(const struct net_device
*dev
, u32 prio
)
1484 return dev
->prio_tc_map
[prio
& TC_BITMASK
];
1488 int netdev_set_prio_tc_map(struct net_device
*dev
, u8 prio
, u8 tc
)
1490 if (tc
>= dev
->num_tc
)
1493 dev
->prio_tc_map
[prio
& TC_BITMASK
] = tc
& TC_BITMASK
;
1498 void netdev_reset_tc(struct net_device
*dev
)
1501 memset(dev
->tc_to_txq
, 0, sizeof(dev
->tc_to_txq
));
1502 memset(dev
->prio_tc_map
, 0, sizeof(dev
->prio_tc_map
));
1506 int netdev_set_tc_queue(struct net_device
*dev
, u8 tc
, u16 count
, u16 offset
)
1508 if (tc
>= dev
->num_tc
)
1511 dev
->tc_to_txq
[tc
].count
= count
;
1512 dev
->tc_to_txq
[tc
].offset
= offset
;
1517 int netdev_set_num_tc(struct net_device
*dev
, u8 num_tc
)
1519 if (num_tc
> TC_MAX_QUEUE
)
1522 dev
->num_tc
= num_tc
;
1527 int netdev_get_num_tc(struct net_device
*dev
)
1533 struct netdev_queue
*netdev_get_tx_queue(const struct net_device
*dev
,
1536 return &dev
->_tx
[index
];
1539 static inline void netdev_for_each_tx_queue(struct net_device
*dev
,
1540 void (*f
)(struct net_device
*,
1541 struct netdev_queue
*,
1547 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
1548 f(dev
, &dev
->_tx
[i
], arg
);
1551 struct netdev_queue
*netdev_pick_tx(struct net_device
*dev
,
1552 struct sk_buff
*skb
,
1554 u16
__netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
);
1557 * Net namespace inlines
1560 struct net
*dev_net(const struct net_device
*dev
)
1562 return read_pnet(&dev
->nd_net
);
1566 void dev_net_set(struct net_device
*dev
, struct net
*net
)
1568 #ifdef CONFIG_NET_NS
1569 release_net(dev
->nd_net
);
1570 dev
->nd_net
= hold_net(net
);
1574 static inline bool netdev_uses_dsa_tags(struct net_device
*dev
)
1576 #ifdef CONFIG_NET_DSA_TAG_DSA
1577 if (dev
->dsa_ptr
!= NULL
)
1578 return dsa_uses_dsa_tags(dev
->dsa_ptr
);
1584 static inline bool netdev_uses_trailer_tags(struct net_device
*dev
)
1586 #ifdef CONFIG_NET_DSA_TAG_TRAILER
1587 if (dev
->dsa_ptr
!= NULL
)
1588 return dsa_uses_trailer_tags(dev
->dsa_ptr
);
1595 * netdev_priv - access network device private data
1596 * @dev: network device
1598 * Get network device private data
1600 static inline void *netdev_priv(const struct net_device
*dev
)
1602 return (char *)dev
+ ALIGN(sizeof(struct net_device
), NETDEV_ALIGN
);
1605 /* Set the sysfs physical device reference for the network logical device
1606 * if set prior to registration will cause a symlink during initialization.
1608 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1610 /* Set the sysfs device type for the network logical device to allow
1611 * fine-grained identification of different network device types. For
1612 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1614 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1616 /* Default NAPI poll() weight
1617 * Device drivers are strongly advised to not use bigger value
1619 #define NAPI_POLL_WEIGHT 64
1622 * netif_napi_add - initialize a napi context
1623 * @dev: network device
1624 * @napi: napi context
1625 * @poll: polling function
1626 * @weight: default weight
1628 * netif_napi_add() must be used to initialize a napi context prior to calling
1629 * *any* of the other napi related functions.
1631 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
1632 int (*poll
)(struct napi_struct
*, int), int weight
);
1635 * netif_napi_del - remove a napi context
1636 * @napi: napi context
1638 * netif_napi_del() removes a napi context from the network device napi list
1640 void netif_napi_del(struct napi_struct
*napi
);
1642 struct napi_gro_cb
{
1643 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1646 /* Length of frag0. */
1647 unsigned int frag0_len
;
1649 /* This indicates where we are processing relative to skb->data. */
1652 /* This is non-zero if the packet cannot be merged with the new skb. */
1655 /* Save the IP ID here and check when we get to the transport layer */
1658 /* Number of segments aggregated. */
1661 /* This is non-zero if the packet may be of the same flow. */
1666 #define NAPI_GRO_FREE 1
1667 #define NAPI_GRO_FREE_STOLEN_HEAD 2
1669 /* jiffies when first packet was created/queued */
1672 /* Used in ipv6_gro_receive() */
1675 /* Used in udp_gro_receive */
1678 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
1681 /* used in skb_gro_receive() slow path */
1682 struct sk_buff
*last
;
1685 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1687 struct packet_type
{
1688 __be16 type
; /* This is really htons(ether_type). */
1689 struct net_device
*dev
; /* NULL is wildcarded here */
1690 int (*func
) (struct sk_buff
*,
1691 struct net_device
*,
1692 struct packet_type
*,
1693 struct net_device
*);
1694 bool (*id_match
)(struct packet_type
*ptype
,
1696 void *af_packet_priv
;
1697 struct list_head list
;
1700 struct offload_callbacks
{
1701 struct sk_buff
*(*gso_segment
)(struct sk_buff
*skb
,
1702 netdev_features_t features
);
1703 int (*gso_send_check
)(struct sk_buff
*skb
);
1704 struct sk_buff
**(*gro_receive
)(struct sk_buff
**head
,
1705 struct sk_buff
*skb
);
1706 int (*gro_complete
)(struct sk_buff
*skb
, int nhoff
);
1709 struct packet_offload
{
1710 __be16 type
; /* This is really htons(ether_type). */
1711 struct offload_callbacks callbacks
;
1712 struct list_head list
;
1715 struct udp_offload
{
1717 struct offload_callbacks callbacks
;
1720 /* often modified stats are per cpu, other are shared (netdev->stats) */
1721 struct pcpu_sw_netstats
{
1726 struct u64_stats_sync syncp
;
1729 #include <linux/notifier.h>
1731 /* netdevice notifier chain. Please remember to update the rtnetlink
1732 * notification exclusion list in rtnetlink_event() when adding new
1735 #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
1736 #define NETDEV_DOWN 0x0002
1737 #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
1738 detected a hardware crash and restarted
1739 - we can use this eg to kick tcp sessions
1741 #define NETDEV_CHANGE 0x0004 /* Notify device state change */
1742 #define NETDEV_REGISTER 0x0005
1743 #define NETDEV_UNREGISTER 0x0006
1744 #define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
1745 #define NETDEV_CHANGEADDR 0x0008
1746 #define NETDEV_GOING_DOWN 0x0009
1747 #define NETDEV_CHANGENAME 0x000A
1748 #define NETDEV_FEAT_CHANGE 0x000B
1749 #define NETDEV_BONDING_FAILOVER 0x000C
1750 #define NETDEV_PRE_UP 0x000D
1751 #define NETDEV_PRE_TYPE_CHANGE 0x000E
1752 #define NETDEV_POST_TYPE_CHANGE 0x000F
1753 #define NETDEV_POST_INIT 0x0010
1754 #define NETDEV_UNREGISTER_FINAL 0x0011
1755 #define NETDEV_RELEASE 0x0012
1756 #define NETDEV_NOTIFY_PEERS 0x0013
1757 #define NETDEV_JOIN 0x0014
1758 #define NETDEV_CHANGEUPPER 0x0015
1759 #define NETDEV_RESEND_IGMP 0x0016
1760 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
1762 int register_netdevice_notifier(struct notifier_block
*nb
);
1763 int unregister_netdevice_notifier(struct notifier_block
*nb
);
1765 struct netdev_notifier_info
{
1766 struct net_device
*dev
;
1769 struct netdev_notifier_change_info
{
1770 struct netdev_notifier_info info
; /* must be first */
1771 unsigned int flags_changed
;
1774 static inline void netdev_notifier_info_init(struct netdev_notifier_info
*info
,
1775 struct net_device
*dev
)
1780 static inline struct net_device
*
1781 netdev_notifier_info_to_dev(const struct netdev_notifier_info
*info
)
1786 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
);
1789 extern rwlock_t dev_base_lock
; /* Device list lock */
1791 #define for_each_netdev(net, d) \
1792 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1793 #define for_each_netdev_reverse(net, d) \
1794 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1795 #define for_each_netdev_rcu(net, d) \
1796 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1797 #define for_each_netdev_safe(net, d, n) \
1798 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1799 #define for_each_netdev_continue(net, d) \
1800 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1801 #define for_each_netdev_continue_rcu(net, d) \
1802 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1803 #define for_each_netdev_in_bond_rcu(bond, slave) \
1804 for_each_netdev_rcu(&init_net, slave) \
1805 if (netdev_master_upper_dev_get_rcu(slave) == bond)
1806 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1808 static inline struct net_device
*next_net_device(struct net_device
*dev
)
1810 struct list_head
*lh
;
1814 lh
= dev
->dev_list
.next
;
1815 return lh
== &net
->dev_base_head
? NULL
: net_device_entry(lh
);
1818 static inline struct net_device
*next_net_device_rcu(struct net_device
*dev
)
1820 struct list_head
*lh
;
1824 lh
= rcu_dereference(list_next_rcu(&dev
->dev_list
));
1825 return lh
== &net
->dev_base_head
? NULL
: net_device_entry(lh
);
1828 static inline struct net_device
*first_net_device(struct net
*net
)
1830 return list_empty(&net
->dev_base_head
) ? NULL
:
1831 net_device_entry(net
->dev_base_head
.next
);
1834 static inline struct net_device
*first_net_device_rcu(struct net
*net
)
1836 struct list_head
*lh
= rcu_dereference(list_next_rcu(&net
->dev_base_head
));
1838 return lh
== &net
->dev_base_head
? NULL
: net_device_entry(lh
);
1841 int netdev_boot_setup_check(struct net_device
*dev
);
1842 unsigned long netdev_boot_base(const char *prefix
, int unit
);
1843 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
1844 const char *hwaddr
);
1845 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
);
1846 struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
);
1847 void dev_add_pack(struct packet_type
*pt
);
1848 void dev_remove_pack(struct packet_type
*pt
);
1849 void __dev_remove_pack(struct packet_type
*pt
);
1850 void dev_add_offload(struct packet_offload
*po
);
1851 void dev_remove_offload(struct packet_offload
*po
);
1853 struct net_device
*dev_get_by_flags_rcu(struct net
*net
, unsigned short flags
,
1854 unsigned short mask
);
1855 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
);
1856 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
);
1857 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
);
1858 int dev_alloc_name(struct net_device
*dev
, const char *name
);
1859 int dev_open(struct net_device
*dev
);
1860 int dev_close(struct net_device
*dev
);
1861 void dev_disable_lro(struct net_device
*dev
);
1862 int dev_loopback_xmit(struct sk_buff
*newskb
);
1863 int dev_queue_xmit(struct sk_buff
*skb
);
1864 int dev_queue_xmit_accel(struct sk_buff
*skb
, void *accel_priv
);
1865 int register_netdevice(struct net_device
*dev
);
1866 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
);
1867 void unregister_netdevice_many(struct list_head
*head
);
1868 static inline void unregister_netdevice(struct net_device
*dev
)
1870 unregister_netdevice_queue(dev
, NULL
);
1873 int netdev_refcnt_read(const struct net_device
*dev
);
1874 void free_netdev(struct net_device
*dev
);
1875 void netdev_freemem(struct net_device
*dev
);
1876 void synchronize_net(void);
1877 int init_dummy_netdev(struct net_device
*dev
);
1879 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
);
1880 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
);
1881 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
);
1882 int netdev_get_name(struct net
*net
, char *name
, int ifindex
);
1883 int dev_restart(struct net_device
*dev
);
1884 #ifdef CONFIG_NETPOLL_TRAP
1885 int netpoll_trap(void);
1887 int skb_gro_receive(struct sk_buff
**head
, struct sk_buff
*skb
);
1889 static inline unsigned int skb_gro_offset(const struct sk_buff
*skb
)
1891 return NAPI_GRO_CB(skb
)->data_offset
;
1894 static inline unsigned int skb_gro_len(const struct sk_buff
*skb
)
1896 return skb
->len
- NAPI_GRO_CB(skb
)->data_offset
;
1899 static inline void skb_gro_pull(struct sk_buff
*skb
, unsigned int len
)
1901 NAPI_GRO_CB(skb
)->data_offset
+= len
;
1904 static inline void *skb_gro_header_fast(struct sk_buff
*skb
,
1905 unsigned int offset
)
1907 return NAPI_GRO_CB(skb
)->frag0
+ offset
;
1910 static inline int skb_gro_header_hard(struct sk_buff
*skb
, unsigned int hlen
)
1912 return NAPI_GRO_CB(skb
)->frag0_len
< hlen
;
1915 static inline void *skb_gro_header_slow(struct sk_buff
*skb
, unsigned int hlen
,
1916 unsigned int offset
)
1918 if (!pskb_may_pull(skb
, hlen
))
1921 NAPI_GRO_CB(skb
)->frag0
= NULL
;
1922 NAPI_GRO_CB(skb
)->frag0_len
= 0;
1923 return skb
->data
+ offset
;
1926 static inline void *skb_gro_mac_header(struct sk_buff
*skb
)
1928 return NAPI_GRO_CB(skb
)->frag0
?: skb_mac_header(skb
);
1931 static inline void *skb_gro_network_header(struct sk_buff
*skb
)
1933 return (NAPI_GRO_CB(skb
)->frag0
?: skb
->data
) +
1934 skb_network_offset(skb
);
1937 static inline void skb_gro_postpull_rcsum(struct sk_buff
*skb
,
1938 const void *start
, unsigned int len
)
1940 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
1941 NAPI_GRO_CB(skb
)->csum
= csum_sub(NAPI_GRO_CB(skb
)->csum
,
1942 csum_partial(start
, len
, 0));
1945 static inline int dev_hard_header(struct sk_buff
*skb
, struct net_device
*dev
,
1946 unsigned short type
,
1947 const void *daddr
, const void *saddr
,
1950 if (!dev
->header_ops
|| !dev
->header_ops
->create
)
1953 return dev
->header_ops
->create(skb
, dev
, type
, daddr
, saddr
, len
);
1956 static inline int dev_parse_header(const struct sk_buff
*skb
,
1957 unsigned char *haddr
)
1959 const struct net_device
*dev
= skb
->dev
;
1961 if (!dev
->header_ops
|| !dev
->header_ops
->parse
)
1963 return dev
->header_ops
->parse(skb
, haddr
);
1966 static inline int dev_rebuild_header(struct sk_buff
*skb
)
1968 const struct net_device
*dev
= skb
->dev
;
1970 if (!dev
->header_ops
|| !dev
->header_ops
->rebuild
)
1972 return dev
->header_ops
->rebuild(skb
);
1975 typedef int gifconf_func_t(struct net_device
* dev
, char __user
* bufptr
, int len
);
1976 int register_gifconf(unsigned int family
, gifconf_func_t
*gifconf
);
1977 static inline int unregister_gifconf(unsigned int family
)
1979 return register_gifconf(family
, NULL
);
1982 #ifdef CONFIG_NET_FLOW_LIMIT
1983 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
1984 struct sd_flow_limit
{
1986 unsigned int num_buckets
;
1987 unsigned int history_head
;
1988 u16 history
[FLOW_LIMIT_HISTORY
];
1992 extern int netdev_flow_limit_table_len
;
1993 #endif /* CONFIG_NET_FLOW_LIMIT */
1996 * Incoming packets are placed on per-cpu queues
1998 struct softnet_data
{
1999 struct Qdisc
*output_queue
;
2000 struct Qdisc
**output_queue_tailp
;
2001 struct list_head poll_list
;
2002 struct sk_buff
*completion_queue
;
2003 struct sk_buff_head process_queue
;
2006 unsigned int processed
;
2007 unsigned int time_squeeze
;
2008 unsigned int cpu_collision
;
2009 unsigned int received_rps
;
2012 struct softnet_data
*rps_ipi_list
;
2014 /* Elements below can be accessed between CPUs for RPS */
2015 struct call_single_data csd ____cacheline_aligned_in_smp
;
2016 struct softnet_data
*rps_ipi_next
;
2018 unsigned int input_queue_head
;
2019 unsigned int input_queue_tail
;
2021 unsigned int dropped
;
2022 struct sk_buff_head input_pkt_queue
;
2023 struct napi_struct backlog
;
2025 #ifdef CONFIG_NET_FLOW_LIMIT
2026 struct sd_flow_limit __rcu
*flow_limit
;
2030 static inline void input_queue_head_incr(struct softnet_data
*sd
)
2033 sd
->input_queue_head
++;
2037 static inline void input_queue_tail_incr_save(struct softnet_data
*sd
,
2038 unsigned int *qtail
)
2041 *qtail
= ++sd
->input_queue_tail
;
2045 DECLARE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
2047 void __netif_schedule(struct Qdisc
*q
);
2049 static inline void netif_schedule_queue(struct netdev_queue
*txq
)
2051 if (!(txq
->state
& QUEUE_STATE_ANY_XOFF
))
2052 __netif_schedule(txq
->qdisc
);
2055 static inline void netif_tx_schedule_all(struct net_device
*dev
)
2059 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
2060 netif_schedule_queue(netdev_get_tx_queue(dev
, i
));
2063 static inline void netif_tx_start_queue(struct netdev_queue
*dev_queue
)
2065 clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
);
2069 * netif_start_queue - allow transmit
2070 * @dev: network device
2072 * Allow upper layers to call the device hard_start_xmit routine.
2074 static inline void netif_start_queue(struct net_device
*dev
)
2076 netif_tx_start_queue(netdev_get_tx_queue(dev
, 0));
2079 static inline void netif_tx_start_all_queues(struct net_device
*dev
)
2083 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2084 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2085 netif_tx_start_queue(txq
);
2089 static inline void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
2091 #ifdef CONFIG_NETPOLL_TRAP
2092 if (netpoll_trap()) {
2093 netif_tx_start_queue(dev_queue
);
2097 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
))
2098 __netif_schedule(dev_queue
->qdisc
);
2102 * netif_wake_queue - restart transmit
2103 * @dev: network device
2105 * Allow upper layers to call the device hard_start_xmit routine.
2106 * Used for flow control when transmit resources are available.
2108 static inline void netif_wake_queue(struct net_device
*dev
)
2110 netif_tx_wake_queue(netdev_get_tx_queue(dev
, 0));
2113 static inline void netif_tx_wake_all_queues(struct net_device
*dev
)
2117 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2118 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2119 netif_tx_wake_queue(txq
);
2123 static inline void netif_tx_stop_queue(struct netdev_queue
*dev_queue
)
2125 if (WARN_ON(!dev_queue
)) {
2126 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
2129 set_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
);
2133 * netif_stop_queue - stop transmitted packets
2134 * @dev: network device
2136 * Stop upper layers calling the device hard_start_xmit routine.
2137 * Used for flow control when transmit resources are unavailable.
2139 static inline void netif_stop_queue(struct net_device
*dev
)
2141 netif_tx_stop_queue(netdev_get_tx_queue(dev
, 0));
2144 static inline void netif_tx_stop_all_queues(struct net_device
*dev
)
2148 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2149 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2150 netif_tx_stop_queue(txq
);
2154 static inline bool netif_tx_queue_stopped(const struct netdev_queue
*dev_queue
)
2156 return test_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
);
2160 * netif_queue_stopped - test if transmit queue is flowblocked
2161 * @dev: network device
2163 * Test if transmit queue on device is currently unable to send.
2165 static inline bool netif_queue_stopped(const struct net_device
*dev
)
2167 return netif_tx_queue_stopped(netdev_get_tx_queue(dev
, 0));
2170 static inline bool netif_xmit_stopped(const struct netdev_queue
*dev_queue
)
2172 return dev_queue
->state
& QUEUE_STATE_ANY_XOFF
;
2175 static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue
*dev_queue
)
2177 return dev_queue
->state
& QUEUE_STATE_ANY_XOFF_OR_FROZEN
;
2180 static inline void netdev_tx_sent_queue(struct netdev_queue
*dev_queue
,
2184 dql_queued(&dev_queue
->dql
, bytes
);
2186 if (likely(dql_avail(&dev_queue
->dql
) >= 0))
2189 set_bit(__QUEUE_STATE_STACK_XOFF
, &dev_queue
->state
);
2192 * The XOFF flag must be set before checking the dql_avail below,
2193 * because in netdev_tx_completed_queue we update the dql_completed
2194 * before checking the XOFF flag.
2198 /* check again in case another CPU has just made room avail */
2199 if (unlikely(dql_avail(&dev_queue
->dql
) >= 0))
2200 clear_bit(__QUEUE_STATE_STACK_XOFF
, &dev_queue
->state
);
2205 * netdev_sent_queue - report the number of bytes queued to hardware
2206 * @dev: network device
2207 * @bytes: number of bytes queued to the hardware device queue
2209 * Report the number of bytes queued for sending/completion to the network
2210 * device hardware queue. @bytes should be a good approximation and should
2211 * exactly match netdev_completed_queue() @bytes
2213 static inline void netdev_sent_queue(struct net_device
*dev
, unsigned int bytes
)
2215 netdev_tx_sent_queue(netdev_get_tx_queue(dev
, 0), bytes
);
2218 static inline void netdev_tx_completed_queue(struct netdev_queue
*dev_queue
,
2219 unsigned int pkts
, unsigned int bytes
)
2222 if (unlikely(!bytes
))
2225 dql_completed(&dev_queue
->dql
, bytes
);
2228 * Without the memory barrier there is a small possiblity that
2229 * netdev_tx_sent_queue will miss the update and cause the queue to
2230 * be stopped forever
2234 if (dql_avail(&dev_queue
->dql
) < 0)
2237 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF
, &dev_queue
->state
))
2238 netif_schedule_queue(dev_queue
);
2243 * netdev_completed_queue - report bytes and packets completed by device
2244 * @dev: network device
2245 * @pkts: actual number of packets sent over the medium
2246 * @bytes: actual number of bytes sent over the medium
2248 * Report the number of bytes and packets transmitted by the network device
2249 * hardware queue over the physical medium, @bytes must exactly match the
2250 * @bytes amount passed to netdev_sent_queue()
2252 static inline void netdev_completed_queue(struct net_device
*dev
,
2253 unsigned int pkts
, unsigned int bytes
)
2255 netdev_tx_completed_queue(netdev_get_tx_queue(dev
, 0), pkts
, bytes
);
2258 static inline void netdev_tx_reset_queue(struct netdev_queue
*q
)
2261 clear_bit(__QUEUE_STATE_STACK_XOFF
, &q
->state
);
2267 * netdev_reset_queue - reset the packets and bytes count of a network device
2268 * @dev_queue: network device
2270 * Reset the bytes and packet count of a network device and clear the
2271 * software flow control OFF bit for this network device
2273 static inline void netdev_reset_queue(struct net_device
*dev_queue
)
2275 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue
, 0));
2279 * netif_running - test if up
2280 * @dev: network device
2282 * Test if the device has been brought up.
2284 static inline bool netif_running(const struct net_device
*dev
)
2286 return test_bit(__LINK_STATE_START
, &dev
->state
);
2290 * Routines to manage the subqueues on a device. We only need start
2291 * stop, and a check if it's stopped. All other device management is
2292 * done at the overall netdevice level.
2293 * Also test the device if we're multiqueue.
2297 * netif_start_subqueue - allow sending packets on subqueue
2298 * @dev: network device
2299 * @queue_index: sub queue index
2301 * Start individual transmit queue of a device with multiple transmit queues.
2303 static inline void netif_start_subqueue(struct net_device
*dev
, u16 queue_index
)
2305 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2307 netif_tx_start_queue(txq
);
2311 * netif_stop_subqueue - stop sending packets on subqueue
2312 * @dev: network device
2313 * @queue_index: sub queue index
2315 * Stop individual transmit queue of a device with multiple transmit queues.
2317 static inline void netif_stop_subqueue(struct net_device
*dev
, u16 queue_index
)
2319 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2320 #ifdef CONFIG_NETPOLL_TRAP
2324 netif_tx_stop_queue(txq
);
2328 * netif_subqueue_stopped - test status of subqueue
2329 * @dev: network device
2330 * @queue_index: sub queue index
2332 * Check individual transmit queue of a device with multiple transmit queues.
2334 static inline bool __netif_subqueue_stopped(const struct net_device
*dev
,
2337 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2339 return netif_tx_queue_stopped(txq
);
2342 static inline bool netif_subqueue_stopped(const struct net_device
*dev
,
2343 struct sk_buff
*skb
)
2345 return __netif_subqueue_stopped(dev
, skb_get_queue_mapping(skb
));
2349 * netif_wake_subqueue - allow sending packets on subqueue
2350 * @dev: network device
2351 * @queue_index: sub queue index
2353 * Resume individual transmit queue of a device with multiple transmit queues.
2355 static inline void netif_wake_subqueue(struct net_device
*dev
, u16 queue_index
)
2357 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2358 #ifdef CONFIG_NETPOLL_TRAP
2362 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &txq
->state
))
2363 __netif_schedule(txq
->qdisc
);
2367 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
2370 static inline int netif_set_xps_queue(struct net_device
*dev
,
2371 const struct cpumask
*mask
,
2379 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2380 * as a distribution range limit for the returned value.
2382 static inline u16
skb_tx_hash(const struct net_device
*dev
,
2383 const struct sk_buff
*skb
)
2385 return __skb_tx_hash(dev
, skb
, dev
->real_num_tx_queues
);
2389 * netif_is_multiqueue - test if device has multiple transmit queues
2390 * @dev: network device
2392 * Check if device has multiple transmit queues
2394 static inline bool netif_is_multiqueue(const struct net_device
*dev
)
2396 return dev
->num_tx_queues
> 1;
2399 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
);
2402 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
);
2404 static inline int netif_set_real_num_rx_queues(struct net_device
*dev
,
2411 static inline int netif_copy_real_num_queues(struct net_device
*to_dev
,
2412 const struct net_device
*from_dev
)
2416 err
= netif_set_real_num_tx_queues(to_dev
,
2417 from_dev
->real_num_tx_queues
);
2421 return netif_set_real_num_rx_queues(to_dev
,
2422 from_dev
->real_num_rx_queues
);
2429 static inline unsigned int get_netdev_rx_queue_index(
2430 struct netdev_rx_queue
*queue
)
2432 struct net_device
*dev
= queue
->dev
;
2433 int index
= queue
- dev
->_rx
;
2435 BUG_ON(index
>= dev
->num_rx_queues
);
2440 #define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2441 int netif_get_num_default_rss_queues(void);
2443 enum skb_free_reason
{
2444 SKB_REASON_CONSUMED
,
2448 void __dev_kfree_skb_irq(struct sk_buff
*skb
, enum skb_free_reason reason
);
2449 void __dev_kfree_skb_any(struct sk_buff
*skb
, enum skb_free_reason reason
);
2452 * It is not allowed to call kfree_skb() or consume_skb() from hardware
2453 * interrupt context or with hardware interrupts being disabled.
2454 * (in_irq() || irqs_disabled())
2456 * We provide four helpers that can be used in following contexts :
2458 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
2459 * replacing kfree_skb(skb)
2461 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
2462 * Typically used in place of consume_skb(skb) in TX completion path
2464 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
2465 * replacing kfree_skb(skb)
2467 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
2468 * and consumed a packet. Used in place of consume_skb(skb)
2470 static inline void dev_kfree_skb_irq(struct sk_buff
*skb
)
2472 __dev_kfree_skb_irq(skb
, SKB_REASON_DROPPED
);
2475 static inline void dev_consume_skb_irq(struct sk_buff
*skb
)
2477 __dev_kfree_skb_irq(skb
, SKB_REASON_CONSUMED
);
2480 static inline void dev_kfree_skb_any(struct sk_buff
*skb
)
2482 __dev_kfree_skb_any(skb
, SKB_REASON_DROPPED
);
2485 static inline void dev_consume_skb_any(struct sk_buff
*skb
)
2487 __dev_kfree_skb_any(skb
, SKB_REASON_CONSUMED
);
2490 int netif_rx(struct sk_buff
*skb
);
2491 int netif_rx_ni(struct sk_buff
*skb
);
2492 int netif_receive_skb(struct sk_buff
*skb
);
2493 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
);
2494 void napi_gro_flush(struct napi_struct
*napi
, bool flush_old
);
2495 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
);
2496 gro_result_t
napi_gro_frags(struct napi_struct
*napi
);
2497 struct packet_offload
*gro_find_receive_by_type(__be16 type
);
2498 struct packet_offload
*gro_find_complete_by_type(__be16 type
);
2500 static inline void napi_free_frags(struct napi_struct
*napi
)
2502 kfree_skb(napi
->skb
);
2506 int netdev_rx_handler_register(struct net_device
*dev
,
2507 rx_handler_func_t
*rx_handler
,
2508 void *rx_handler_data
);
2509 void netdev_rx_handler_unregister(struct net_device
*dev
);
2511 bool dev_valid_name(const char *name
);
2512 int dev_ioctl(struct net
*net
, unsigned int cmd
, void __user
*);
2513 int dev_ethtool(struct net
*net
, struct ifreq
*);
2514 unsigned int dev_get_flags(const struct net_device
*);
2515 int __dev_change_flags(struct net_device
*, unsigned int flags
);
2516 int dev_change_flags(struct net_device
*, unsigned int);
2517 void __dev_notify_flags(struct net_device
*, unsigned int old_flags
,
2518 unsigned int gchanges
);
2519 int dev_change_name(struct net_device
*, const char *);
2520 int dev_set_alias(struct net_device
*, const char *, size_t);
2521 int dev_change_net_namespace(struct net_device
*, struct net
*, const char *);
2522 int dev_set_mtu(struct net_device
*, int);
2523 void dev_set_group(struct net_device
*, int);
2524 int dev_set_mac_address(struct net_device
*, struct sockaddr
*);
2525 int dev_change_carrier(struct net_device
*, bool new_carrier
);
2526 int dev_get_phys_port_id(struct net_device
*dev
,
2527 struct netdev_phys_port_id
*ppid
);
2528 int dev_hard_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
2529 struct netdev_queue
*txq
);
2530 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
);
2532 extern int netdev_budget
;
2534 /* Called by rtnetlink.c:rtnl_unlock() */
2535 void netdev_run_todo(void);
2538 * dev_put - release reference to device
2539 * @dev: network device
2541 * Release reference to device to allow it to be freed.
2543 static inline void dev_put(struct net_device
*dev
)
2545 this_cpu_dec(*dev
->pcpu_refcnt
);
2549 * dev_hold - get reference to device
2550 * @dev: network device
2552 * Hold reference to device to keep it from being freed.
2554 static inline void dev_hold(struct net_device
*dev
)
2556 this_cpu_inc(*dev
->pcpu_refcnt
);
2559 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
2560 * and _off may be called from IRQ context, but it is caller
2561 * who is responsible for serialization of these calls.
2563 * The name carrier is inappropriate, these functions should really be
2564 * called netif_lowerlayer_*() because they represent the state of any
2565 * kind of lower layer not just hardware media.
2568 void linkwatch_init_dev(struct net_device
*dev
);
2569 void linkwatch_fire_event(struct net_device
*dev
);
2570 void linkwatch_forget_dev(struct net_device
*dev
);
2573 * netif_carrier_ok - test if carrier present
2574 * @dev: network device
2576 * Check if carrier is present on device
2578 static inline bool netif_carrier_ok(const struct net_device
*dev
)
2580 return !test_bit(__LINK_STATE_NOCARRIER
, &dev
->state
);
2583 unsigned long dev_trans_start(struct net_device
*dev
);
2585 void __netdev_watchdog_up(struct net_device
*dev
);
2587 void netif_carrier_on(struct net_device
*dev
);
2589 void netif_carrier_off(struct net_device
*dev
);
2592 * netif_dormant_on - mark device as dormant.
2593 * @dev: network device
2595 * Mark device as dormant (as per RFC2863).
2597 * The dormant state indicates that the relevant interface is not
2598 * actually in a condition to pass packets (i.e., it is not 'up') but is
2599 * in a "pending" state, waiting for some external event. For "on-
2600 * demand" interfaces, this new state identifies the situation where the
2601 * interface is waiting for events to place it in the up state.
2604 static inline void netif_dormant_on(struct net_device
*dev
)
2606 if (!test_and_set_bit(__LINK_STATE_DORMANT
, &dev
->state
))
2607 linkwatch_fire_event(dev
);
2611 * netif_dormant_off - set device as not dormant.
2612 * @dev: network device
2614 * Device is not in dormant state.
2616 static inline void netif_dormant_off(struct net_device
*dev
)
2618 if (test_and_clear_bit(__LINK_STATE_DORMANT
, &dev
->state
))
2619 linkwatch_fire_event(dev
);
2623 * netif_dormant - test if carrier present
2624 * @dev: network device
2626 * Check if carrier is present on device
2628 static inline bool netif_dormant(const struct net_device
*dev
)
2630 return test_bit(__LINK_STATE_DORMANT
, &dev
->state
);
2635 * netif_oper_up - test if device is operational
2636 * @dev: network device
2638 * Check if carrier is operational
2640 static inline bool netif_oper_up(const struct net_device
*dev
)
2642 return (dev
->operstate
== IF_OPER_UP
||
2643 dev
->operstate
== IF_OPER_UNKNOWN
/* backward compat */);
2647 * netif_device_present - is device available or removed
2648 * @dev: network device
2650 * Check if device has not been removed from system.
2652 static inline bool netif_device_present(struct net_device
*dev
)
2654 return test_bit(__LINK_STATE_PRESENT
, &dev
->state
);
2657 void netif_device_detach(struct net_device
*dev
);
2659 void netif_device_attach(struct net_device
*dev
);
2662 * Network interface message level settings
2666 NETIF_MSG_DRV
= 0x0001,
2667 NETIF_MSG_PROBE
= 0x0002,
2668 NETIF_MSG_LINK
= 0x0004,
2669 NETIF_MSG_TIMER
= 0x0008,
2670 NETIF_MSG_IFDOWN
= 0x0010,
2671 NETIF_MSG_IFUP
= 0x0020,
2672 NETIF_MSG_RX_ERR
= 0x0040,
2673 NETIF_MSG_TX_ERR
= 0x0080,
2674 NETIF_MSG_TX_QUEUED
= 0x0100,
2675 NETIF_MSG_INTR
= 0x0200,
2676 NETIF_MSG_TX_DONE
= 0x0400,
2677 NETIF_MSG_RX_STATUS
= 0x0800,
2678 NETIF_MSG_PKTDATA
= 0x1000,
2679 NETIF_MSG_HW
= 0x2000,
2680 NETIF_MSG_WOL
= 0x4000,
2683 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2684 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2685 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2686 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2687 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2688 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2689 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2690 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2691 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2692 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2693 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2694 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2695 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2696 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2697 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2699 static inline u32
netif_msg_init(int debug_value
, int default_msg_enable_bits
)
2702 if (debug_value
< 0 || debug_value
>= (sizeof(u32
) * 8))
2703 return default_msg_enable_bits
;
2704 if (debug_value
== 0) /* no output */
2706 /* set low N bits */
2707 return (1 << debug_value
) - 1;
2710 static inline void __netif_tx_lock(struct netdev_queue
*txq
, int cpu
)
2712 spin_lock(&txq
->_xmit_lock
);
2713 txq
->xmit_lock_owner
= cpu
;
2716 static inline void __netif_tx_lock_bh(struct netdev_queue
*txq
)
2718 spin_lock_bh(&txq
->_xmit_lock
);
2719 txq
->xmit_lock_owner
= smp_processor_id();
2722 static inline bool __netif_tx_trylock(struct netdev_queue
*txq
)
2724 bool ok
= spin_trylock(&txq
->_xmit_lock
);
2726 txq
->xmit_lock_owner
= smp_processor_id();
2730 static inline void __netif_tx_unlock(struct netdev_queue
*txq
)
2732 txq
->xmit_lock_owner
= -1;
2733 spin_unlock(&txq
->_xmit_lock
);
2736 static inline void __netif_tx_unlock_bh(struct netdev_queue
*txq
)
2738 txq
->xmit_lock_owner
= -1;
2739 spin_unlock_bh(&txq
->_xmit_lock
);
2742 static inline void txq_trans_update(struct netdev_queue
*txq
)
2744 if (txq
->xmit_lock_owner
!= -1)
2745 txq
->trans_start
= jiffies
;
2749 * netif_tx_lock - grab network device transmit lock
2750 * @dev: network device
2752 * Get network device transmit lock
2754 static inline void netif_tx_lock(struct net_device
*dev
)
2759 spin_lock(&dev
->tx_global_lock
);
2760 cpu
= smp_processor_id();
2761 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2762 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2764 /* We are the only thread of execution doing a
2765 * freeze, but we have to grab the _xmit_lock in
2766 * order to synchronize with threads which are in
2767 * the ->hard_start_xmit() handler and already
2768 * checked the frozen bit.
2770 __netif_tx_lock(txq
, cpu
);
2771 set_bit(__QUEUE_STATE_FROZEN
, &txq
->state
);
2772 __netif_tx_unlock(txq
);
2776 static inline void netif_tx_lock_bh(struct net_device
*dev
)
2782 static inline void netif_tx_unlock(struct net_device
*dev
)
2786 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2787 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2789 /* No need to grab the _xmit_lock here. If the
2790 * queue is not stopped for another reason, we
2793 clear_bit(__QUEUE_STATE_FROZEN
, &txq
->state
);
2794 netif_schedule_queue(txq
);
2796 spin_unlock(&dev
->tx_global_lock
);
2799 static inline void netif_tx_unlock_bh(struct net_device
*dev
)
2801 netif_tx_unlock(dev
);
2805 #define HARD_TX_LOCK(dev, txq, cpu) { \
2806 if ((dev->features & NETIF_F_LLTX) == 0) { \
2807 __netif_tx_lock(txq, cpu); \
2811 #define HARD_TX_UNLOCK(dev, txq) { \
2812 if ((dev->features & NETIF_F_LLTX) == 0) { \
2813 __netif_tx_unlock(txq); \
2817 static inline void netif_tx_disable(struct net_device
*dev
)
2823 cpu
= smp_processor_id();
2824 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2825 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2827 __netif_tx_lock(txq
, cpu
);
2828 netif_tx_stop_queue(txq
);
2829 __netif_tx_unlock(txq
);
2834 static inline void netif_addr_lock(struct net_device
*dev
)
2836 spin_lock(&dev
->addr_list_lock
);
2839 static inline void netif_addr_lock_nested(struct net_device
*dev
)
2841 spin_lock_nested(&dev
->addr_list_lock
, SINGLE_DEPTH_NESTING
);
2844 static inline void netif_addr_lock_bh(struct net_device
*dev
)
2846 spin_lock_bh(&dev
->addr_list_lock
);
2849 static inline void netif_addr_unlock(struct net_device
*dev
)
2851 spin_unlock(&dev
->addr_list_lock
);
2854 static inline void netif_addr_unlock_bh(struct net_device
*dev
)
2856 spin_unlock_bh(&dev
->addr_list_lock
);
2860 * dev_addrs walker. Should be used only for read access. Call with
2861 * rcu_read_lock held.
2863 #define for_each_dev_addr(dev, ha) \
2864 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2866 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
2868 void ether_setup(struct net_device
*dev
);
2870 /* Support for loadable net-drivers */
2871 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
2872 void (*setup
)(struct net_device
*),
2873 unsigned int txqs
, unsigned int rxqs
);
2874 #define alloc_netdev(sizeof_priv, name, setup) \
2875 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2877 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2878 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2880 int register_netdev(struct net_device
*dev
);
2881 void unregister_netdev(struct net_device
*dev
);
2883 /* General hardware address lists handling functions */
2884 int __hw_addr_sync(struct netdev_hw_addr_list
*to_list
,
2885 struct netdev_hw_addr_list
*from_list
, int addr_len
);
2886 void __hw_addr_unsync(struct netdev_hw_addr_list
*to_list
,
2887 struct netdev_hw_addr_list
*from_list
, int addr_len
);
2888 void __hw_addr_init(struct netdev_hw_addr_list
*list
);
2890 /* Functions used for device addresses handling */
2891 int dev_addr_add(struct net_device
*dev
, const unsigned char *addr
,
2892 unsigned char addr_type
);
2893 int dev_addr_del(struct net_device
*dev
, const unsigned char *addr
,
2894 unsigned char addr_type
);
2895 void dev_addr_flush(struct net_device
*dev
);
2896 int dev_addr_init(struct net_device
*dev
);
2898 /* Functions used for unicast addresses handling */
2899 int dev_uc_add(struct net_device
*dev
, const unsigned char *addr
);
2900 int dev_uc_add_excl(struct net_device
*dev
, const unsigned char *addr
);
2901 int dev_uc_del(struct net_device
*dev
, const unsigned char *addr
);
2902 int dev_uc_sync(struct net_device
*to
, struct net_device
*from
);
2903 int dev_uc_sync_multiple(struct net_device
*to
, struct net_device
*from
);
2904 void dev_uc_unsync(struct net_device
*to
, struct net_device
*from
);
2905 void dev_uc_flush(struct net_device
*dev
);
2906 void dev_uc_init(struct net_device
*dev
);
2908 /* Functions used for multicast addresses handling */
2909 int dev_mc_add(struct net_device
*dev
, const unsigned char *addr
);
2910 int dev_mc_add_global(struct net_device
*dev
, const unsigned char *addr
);
2911 int dev_mc_add_excl(struct net_device
*dev
, const unsigned char *addr
);
2912 int dev_mc_del(struct net_device
*dev
, const unsigned char *addr
);
2913 int dev_mc_del_global(struct net_device
*dev
, const unsigned char *addr
);
2914 int dev_mc_sync(struct net_device
*to
, struct net_device
*from
);
2915 int dev_mc_sync_multiple(struct net_device
*to
, struct net_device
*from
);
2916 void dev_mc_unsync(struct net_device
*to
, struct net_device
*from
);
2917 void dev_mc_flush(struct net_device
*dev
);
2918 void dev_mc_init(struct net_device
*dev
);
2920 /* Functions used for secondary unicast and multicast support */
2921 void dev_set_rx_mode(struct net_device
*dev
);
2922 void __dev_set_rx_mode(struct net_device
*dev
);
2923 int dev_set_promiscuity(struct net_device
*dev
, int inc
);
2924 int dev_set_allmulti(struct net_device
*dev
, int inc
);
2925 void netdev_state_change(struct net_device
*dev
);
2926 void netdev_notify_peers(struct net_device
*dev
);
2927 void netdev_features_change(struct net_device
*dev
);
2928 /* Load a device via the kmod */
2929 void dev_load(struct net
*net
, const char *name
);
2930 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
2931 struct rtnl_link_stats64
*storage
);
2932 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
2933 const struct net_device_stats
*netdev_stats
);
2935 extern int netdev_max_backlog
;
2936 extern int netdev_tstamp_prequeue
;
2937 extern int weight_p
;
2938 extern int bpf_jit_enable
;
2940 bool netdev_has_upper_dev(struct net_device
*dev
, struct net_device
*upper_dev
);
2941 struct net_device
*netdev_all_upper_get_next_dev_rcu(struct net_device
*dev
,
2942 struct list_head
**iter
);
2944 /* iterate through upper list, must be called under RCU read lock */
2945 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
2946 for (iter = &(dev)->all_adj_list.upper, \
2947 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
2949 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
2951 void *netdev_lower_get_next_private(struct net_device
*dev
,
2952 struct list_head
**iter
);
2953 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
2954 struct list_head
**iter
);
2956 #define netdev_for_each_lower_private(dev, priv, iter) \
2957 for (iter = (dev)->adj_list.lower.next, \
2958 priv = netdev_lower_get_next_private(dev, &(iter)); \
2960 priv = netdev_lower_get_next_private(dev, &(iter)))
2962 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
2963 for (iter = &(dev)->adj_list.lower, \
2964 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
2966 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
2968 void *netdev_adjacent_get_private(struct list_head
*adj_list
);
2969 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
);
2970 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
);
2971 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
);
2972 int netdev_upper_dev_link(struct net_device
*dev
, struct net_device
*upper_dev
);
2973 int netdev_master_upper_dev_link(struct net_device
*dev
,
2974 struct net_device
*upper_dev
);
2975 int netdev_master_upper_dev_link_private(struct net_device
*dev
,
2976 struct net_device
*upper_dev
,
2978 void netdev_upper_dev_unlink(struct net_device
*dev
,
2979 struct net_device
*upper_dev
);
2980 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
);
2981 void *netdev_lower_dev_get_private(struct net_device
*dev
,
2982 struct net_device
*lower_dev
);
2983 int skb_checksum_help(struct sk_buff
*skb
);
2984 struct sk_buff
*__skb_gso_segment(struct sk_buff
*skb
,
2985 netdev_features_t features
, bool tx_path
);
2986 struct sk_buff
*skb_mac_gso_segment(struct sk_buff
*skb
,
2987 netdev_features_t features
);
2990 struct sk_buff
*skb_gso_segment(struct sk_buff
*skb
, netdev_features_t features
)
2992 return __skb_gso_segment(skb
, features
, true);
2994 __be16
skb_network_protocol(struct sk_buff
*skb
);
2996 static inline bool can_checksum_protocol(netdev_features_t features
,
2999 return ((features
& NETIF_F_GEN_CSUM
) ||
3000 ((features
& NETIF_F_V4_CSUM
) &&
3001 protocol
== htons(ETH_P_IP
)) ||
3002 ((features
& NETIF_F_V6_CSUM
) &&
3003 protocol
== htons(ETH_P_IPV6
)) ||
3004 ((features
& NETIF_F_FCOE_CRC
) &&
3005 protocol
== htons(ETH_P_FCOE
)));
3009 void netdev_rx_csum_fault(struct net_device
*dev
);
3011 static inline void netdev_rx_csum_fault(struct net_device
*dev
)
3015 /* rx skb timestamps */
3016 void net_enable_timestamp(void);
3017 void net_disable_timestamp(void);
3019 #ifdef CONFIG_PROC_FS
3020 int __init
dev_proc_init(void);
3022 #define dev_proc_init() 0
3025 int netdev_class_create_file_ns(struct class_attribute
*class_attr
,
3027 void netdev_class_remove_file_ns(struct class_attribute
*class_attr
,
3030 static inline int netdev_class_create_file(struct class_attribute
*class_attr
)
3032 return netdev_class_create_file_ns(class_attr
, NULL
);
3035 static inline void netdev_class_remove_file(struct class_attribute
*class_attr
)
3037 netdev_class_remove_file_ns(class_attr
, NULL
);
3040 extern struct kobj_ns_type_operations net_ns_type_operations
;
3042 const char *netdev_drivername(const struct net_device
*dev
);
3044 void linkwatch_run_queue(void);
3046 static inline netdev_features_t
netdev_get_wanted_features(
3047 struct net_device
*dev
)
3049 return (dev
->features
& ~dev
->hw_features
) | dev
->wanted_features
;
3051 netdev_features_t
netdev_increment_features(netdev_features_t all
,
3052 netdev_features_t one
, netdev_features_t mask
);
3054 /* Allow TSO being used on stacked device :
3055 * Performing the GSO segmentation before last device
3056 * is a performance improvement.
3058 static inline netdev_features_t
netdev_add_tso_features(netdev_features_t features
,
3059 netdev_features_t mask
)
3061 return netdev_increment_features(features
, NETIF_F_ALL_TSO
, mask
);
3064 int __netdev_update_features(struct net_device
*dev
);
3065 void netdev_update_features(struct net_device
*dev
);
3066 void netdev_change_features(struct net_device
*dev
);
3068 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
3069 struct net_device
*dev
);
3071 netdev_features_t
netif_skb_features(struct sk_buff
*skb
);
3073 static inline bool net_gso_ok(netdev_features_t features
, int gso_type
)
3075 netdev_features_t feature
= gso_type
<< NETIF_F_GSO_SHIFT
;
3077 /* check flags correspondence */
3078 BUILD_BUG_ON(SKB_GSO_TCPV4
!= (NETIF_F_TSO
>> NETIF_F_GSO_SHIFT
));
3079 BUILD_BUG_ON(SKB_GSO_UDP
!= (NETIF_F_UFO
>> NETIF_F_GSO_SHIFT
));
3080 BUILD_BUG_ON(SKB_GSO_DODGY
!= (NETIF_F_GSO_ROBUST
>> NETIF_F_GSO_SHIFT
));
3081 BUILD_BUG_ON(SKB_GSO_TCP_ECN
!= (NETIF_F_TSO_ECN
>> NETIF_F_GSO_SHIFT
));
3082 BUILD_BUG_ON(SKB_GSO_TCPV6
!= (NETIF_F_TSO6
>> NETIF_F_GSO_SHIFT
));
3083 BUILD_BUG_ON(SKB_GSO_FCOE
!= (NETIF_F_FSO
>> NETIF_F_GSO_SHIFT
));
3085 return (features
& feature
) == feature
;
3088 static inline bool skb_gso_ok(struct sk_buff
*skb
, netdev_features_t features
)
3090 return net_gso_ok(features
, skb_shinfo(skb
)->gso_type
) &&
3091 (!skb_has_frag_list(skb
) || (features
& NETIF_F_FRAGLIST
));
3094 static inline bool netif_needs_gso(struct sk_buff
*skb
,
3095 netdev_features_t features
)
3097 return skb_is_gso(skb
) && (!skb_gso_ok(skb
, features
) ||
3098 unlikely((skb
->ip_summed
!= CHECKSUM_PARTIAL
) &&
3099 (skb
->ip_summed
!= CHECKSUM_UNNECESSARY
)));
3102 static inline void netif_set_gso_max_size(struct net_device
*dev
,
3105 dev
->gso_max_size
= size
;
3108 static inline void skb_gso_error_unwind(struct sk_buff
*skb
, __be16 protocol
,
3109 int pulled_hlen
, u16 mac_offset
,
3112 skb
->protocol
= protocol
;
3113 skb
->encapsulation
= 1;
3114 skb_push(skb
, pulled_hlen
);
3115 skb_reset_transport_header(skb
);
3116 skb
->mac_header
= mac_offset
;
3117 skb
->network_header
= skb
->mac_header
+ mac_len
;
3118 skb
->mac_len
= mac_len
;
3121 static inline bool netif_is_macvlan(struct net_device
*dev
)
3123 return dev
->priv_flags
& IFF_MACVLAN
;
3126 static inline bool netif_is_bond_master(struct net_device
*dev
)
3128 return dev
->flags
& IFF_MASTER
&& dev
->priv_flags
& IFF_BONDING
;
3131 static inline bool netif_is_bond_slave(struct net_device
*dev
)
3133 return dev
->flags
& IFF_SLAVE
&& dev
->priv_flags
& IFF_BONDING
;
3136 static inline bool netif_supports_nofcs(struct net_device
*dev
)
3138 return dev
->priv_flags
& IFF_SUPP_NOFCS
;
3141 extern struct pernet_operations __net_initdata loopback_net_ops
;
3143 /* Logging, debugging and troubleshooting/diagnostic helpers. */
3145 /* netdev_printk helpers, similar to dev_printk */
3147 static inline const char *netdev_name(const struct net_device
*dev
)
3149 if (dev
->reg_state
!= NETREG_REGISTERED
)
3150 return "(unregistered net_device)";
3155 int netdev_printk(const char *level
, const struct net_device
*dev
,
3156 const char *format
, ...);
3158 int netdev_emerg(const struct net_device
*dev
, const char *format
, ...);
3160 int netdev_alert(const struct net_device
*dev
, const char *format
, ...);
3162 int netdev_crit(const struct net_device
*dev
, const char *format
, ...);
3164 int netdev_err(const struct net_device
*dev
, const char *format
, ...);
3166 int netdev_warn(const struct net_device
*dev
, const char *format
, ...);
3168 int netdev_notice(const struct net_device
*dev
, const char *format
, ...);
3170 int netdev_info(const struct net_device
*dev
, const char *format
, ...);
3172 #define MODULE_ALIAS_NETDEV(device) \
3173 MODULE_ALIAS("netdev-" device)
3175 #if defined(CONFIG_DYNAMIC_DEBUG)
3176 #define netdev_dbg(__dev, format, args...) \
3178 dynamic_netdev_dbg(__dev, format, ##args); \
3180 #elif defined(DEBUG)
3181 #define netdev_dbg(__dev, format, args...) \
3182 netdev_printk(KERN_DEBUG, __dev, format, ##args)
3184 #define netdev_dbg(__dev, format, args...) \
3187 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
3192 #if defined(VERBOSE_DEBUG)
3193 #define netdev_vdbg netdev_dbg
3196 #define netdev_vdbg(dev, format, args...) \
3199 netdev_printk(KERN_DEBUG, dev, format, ##args); \
3205 * netdev_WARN() acts like dev_printk(), but with the key difference
3206 * of using a WARN/WARN_ON to get the message out, including the
3207 * file/line information and a backtrace.
3209 #define netdev_WARN(dev, format, args...) \
3210 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args)
3212 /* netif printk helpers, similar to netdev_printk */
3214 #define netif_printk(priv, type, level, dev, fmt, args...) \
3216 if (netif_msg_##type(priv)) \
3217 netdev_printk(level, (dev), fmt, ##args); \
3220 #define netif_level(level, priv, type, dev, fmt, args...) \
3222 if (netif_msg_##type(priv)) \
3223 netdev_##level(dev, fmt, ##args); \
3226 #define netif_emerg(priv, type, dev, fmt, args...) \
3227 netif_level(emerg, priv, type, dev, fmt, ##args)
3228 #define netif_alert(priv, type, dev, fmt, args...) \
3229 netif_level(alert, priv, type, dev, fmt, ##args)
3230 #define netif_crit(priv, type, dev, fmt, args...) \
3231 netif_level(crit, priv, type, dev, fmt, ##args)
3232 #define netif_err(priv, type, dev, fmt, args...) \
3233 netif_level(err, priv, type, dev, fmt, ##args)
3234 #define netif_warn(priv, type, dev, fmt, args...) \
3235 netif_level(warn, priv, type, dev, fmt, ##args)
3236 #define netif_notice(priv, type, dev, fmt, args...) \
3237 netif_level(notice, priv, type, dev, fmt, ##args)
3238 #define netif_info(priv, type, dev, fmt, args...) \
3239 netif_level(info, priv, type, dev, fmt, ##args)
3241 #if defined(CONFIG_DYNAMIC_DEBUG)
3242 #define netif_dbg(priv, type, netdev, format, args...) \
3244 if (netif_msg_##type(priv)) \
3245 dynamic_netdev_dbg(netdev, format, ##args); \
3247 #elif defined(DEBUG)
3248 #define netif_dbg(priv, type, dev, format, args...) \
3249 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
3251 #define netif_dbg(priv, type, dev, format, args...) \
3254 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3259 #if defined(VERBOSE_DEBUG)
3260 #define netif_vdbg netif_dbg
3262 #define netif_vdbg(priv, type, dev, format, args...) \
3265 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3271 * The list of packet types we will receive (as opposed to discard)
3272 * and the routines to invoke.
3274 * Why 16. Because with 16 the only overlap we get on a hash of the
3275 * low nibble of the protocol value is RARP/SNAP/X.25.
3277 * NOTE: That is no longer true with the addition of VLAN tags. Not
3278 * sure which should go first, but I bet it won't make much
3279 * difference if we are running VLANs. The good news is that
3280 * this protocol won't be in the list unless compiled in, so
3281 * the average user (w/out VLANs) will not be adversely affected.
3297 #define PTYPE_HASH_SIZE (16)
3298 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
3300 #endif /* _LINUX_NETDEVICE_H */