]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/netdevice.h
MAINTAINERS: Update MAX77802 PMIC entry
[mirror_ubuntu-artful-kernel.git] / include / linux / netdevice.h
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
27
28 #include <linux/timer.h>
29 #include <linux/bug.h>
30 #include <linux/delay.h>
31 #include <linux/atomic.h>
32 #include <linux/prefetch.h>
33 #include <asm/cache.h>
34 #include <asm/byteorder.h>
35
36 #include <linux/percpu.h>
37 #include <linux/rculist.h>
38 #include <linux/dmaengine.h>
39 #include <linux/workqueue.h>
40 #include <linux/dynamic_queue_limits.h>
41
42 #include <linux/ethtool.h>
43 #include <net/net_namespace.h>
44 #ifdef CONFIG_DCB
45 #include <net/dcbnl.h>
46 #endif
47 #include <net/netprio_cgroup.h>
48
49 #include <linux/netdev_features.h>
50 #include <linux/neighbour.h>
51 #include <uapi/linux/netdevice.h>
52 #include <uapi/linux/if_bonding.h>
53 #include <uapi/linux/pkt_cls.h>
54 #include <linux/hashtable.h>
55
56 struct netpoll_info;
57 struct device;
58 struct phy_device;
59 struct dsa_switch_tree;
60
61 /* 802.11 specific */
62 struct wireless_dev;
63 /* 802.15.4 specific */
64 struct wpan_dev;
65 struct mpls_dev;
66 /* UDP Tunnel offloads */
67 struct udp_tunnel_info;
68 struct bpf_prog;
69
70 void netdev_set_default_ethtool_ops(struct net_device *dev,
71 const struct ethtool_ops *ops);
72
73 /* Backlog congestion levels */
74 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
75 #define NET_RX_DROP 1 /* packet dropped */
76
77 /*
78 * Transmit return codes: transmit return codes originate from three different
79 * namespaces:
80 *
81 * - qdisc return codes
82 * - driver transmit return codes
83 * - errno values
84 *
85 * Drivers are allowed to return any one of those in their hard_start_xmit()
86 * function. Real network devices commonly used with qdiscs should only return
87 * the driver transmit return codes though - when qdiscs are used, the actual
88 * transmission happens asynchronously, so the value is not propagated to
89 * higher layers. Virtual network devices transmit synchronously; in this case
90 * the driver transmit return codes are consumed by dev_queue_xmit(), and all
91 * others are propagated to higher layers.
92 */
93
94 /* qdisc ->enqueue() return codes. */
95 #define NET_XMIT_SUCCESS 0x00
96 #define NET_XMIT_DROP 0x01 /* skb dropped */
97 #define NET_XMIT_CN 0x02 /* congestion notification */
98 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
99
100 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
101 * indicates that the device will soon be dropping packets, or already drops
102 * some packets of the same priority; prompting us to send less aggressively. */
103 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
104 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
105
106 /* Driver transmit return codes */
107 #define NETDEV_TX_MASK 0xf0
108
109 enum netdev_tx {
110 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
111 NETDEV_TX_OK = 0x00, /* driver took care of packet */
112 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
113 };
114 typedef enum netdev_tx netdev_tx_t;
115
116 /*
117 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
118 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
119 */
120 static inline bool dev_xmit_complete(int rc)
121 {
122 /*
123 * Positive cases with an skb consumed by a driver:
124 * - successful transmission (rc == NETDEV_TX_OK)
125 * - error while transmitting (rc < 0)
126 * - error while queueing to a different device (rc & NET_XMIT_MASK)
127 */
128 if (likely(rc < NET_XMIT_MASK))
129 return true;
130
131 return false;
132 }
133
134 /*
135 * Compute the worst-case header length according to the protocols
136 * used.
137 */
138
139 #if defined(CONFIG_HYPERV_NET)
140 # define LL_MAX_HEADER 128
141 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
142 # if defined(CONFIG_MAC80211_MESH)
143 # define LL_MAX_HEADER 128
144 # else
145 # define LL_MAX_HEADER 96
146 # endif
147 #else
148 # define LL_MAX_HEADER 32
149 #endif
150
151 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
152 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
153 #define MAX_HEADER LL_MAX_HEADER
154 #else
155 #define MAX_HEADER (LL_MAX_HEADER + 48)
156 #endif
157
158 /*
159 * Old network device statistics. Fields are native words
160 * (unsigned long) so they can be read and written atomically.
161 */
162
163 struct net_device_stats {
164 unsigned long rx_packets;
165 unsigned long tx_packets;
166 unsigned long rx_bytes;
167 unsigned long tx_bytes;
168 unsigned long rx_errors;
169 unsigned long tx_errors;
170 unsigned long rx_dropped;
171 unsigned long tx_dropped;
172 unsigned long multicast;
173 unsigned long collisions;
174 unsigned long rx_length_errors;
175 unsigned long rx_over_errors;
176 unsigned long rx_crc_errors;
177 unsigned long rx_frame_errors;
178 unsigned long rx_fifo_errors;
179 unsigned long rx_missed_errors;
180 unsigned long tx_aborted_errors;
181 unsigned long tx_carrier_errors;
182 unsigned long tx_fifo_errors;
183 unsigned long tx_heartbeat_errors;
184 unsigned long tx_window_errors;
185 unsigned long rx_compressed;
186 unsigned long tx_compressed;
187 };
188
189
190 #include <linux/cache.h>
191 #include <linux/skbuff.h>
192
193 #ifdef CONFIG_RPS
194 #include <linux/static_key.h>
195 extern struct static_key rps_needed;
196 extern struct static_key rfs_needed;
197 #endif
198
199 struct neighbour;
200 struct neigh_parms;
201 struct sk_buff;
202
203 struct netdev_hw_addr {
204 struct list_head list;
205 unsigned char addr[MAX_ADDR_LEN];
206 unsigned char type;
207 #define NETDEV_HW_ADDR_T_LAN 1
208 #define NETDEV_HW_ADDR_T_SAN 2
209 #define NETDEV_HW_ADDR_T_SLAVE 3
210 #define NETDEV_HW_ADDR_T_UNICAST 4
211 #define NETDEV_HW_ADDR_T_MULTICAST 5
212 bool global_use;
213 int sync_cnt;
214 int refcount;
215 int synced;
216 struct rcu_head rcu_head;
217 };
218
219 struct netdev_hw_addr_list {
220 struct list_head list;
221 int count;
222 };
223
224 #define netdev_hw_addr_list_count(l) ((l)->count)
225 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
226 #define netdev_hw_addr_list_for_each(ha, l) \
227 list_for_each_entry(ha, &(l)->list, list)
228
229 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
230 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
231 #define netdev_for_each_uc_addr(ha, dev) \
232 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
233
234 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
235 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
236 #define netdev_for_each_mc_addr(ha, dev) \
237 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
238
239 struct hh_cache {
240 unsigned int hh_len;
241 seqlock_t hh_lock;
242
243 /* cached hardware header; allow for machine alignment needs. */
244 #define HH_DATA_MOD 16
245 #define HH_DATA_OFF(__len) \
246 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
247 #define HH_DATA_ALIGN(__len) \
248 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
249 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
250 };
251
252 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
253 * Alternative is:
254 * dev->hard_header_len ? (dev->hard_header_len +
255 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
256 *
257 * We could use other alignment values, but we must maintain the
258 * relationship HH alignment <= LL alignment.
259 */
260 #define LL_RESERVED_SPACE(dev) \
261 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
262 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
263 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
264
265 struct header_ops {
266 int (*create) (struct sk_buff *skb, struct net_device *dev,
267 unsigned short type, const void *daddr,
268 const void *saddr, unsigned int len);
269 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
270 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
271 void (*cache_update)(struct hh_cache *hh,
272 const struct net_device *dev,
273 const unsigned char *haddr);
274 bool (*validate)(const char *ll_header, unsigned int len);
275 };
276
277 /* These flag bits are private to the generic network queueing
278 * layer; they may not be explicitly referenced by any other
279 * code.
280 */
281
282 enum netdev_state_t {
283 __LINK_STATE_START,
284 __LINK_STATE_PRESENT,
285 __LINK_STATE_NOCARRIER,
286 __LINK_STATE_LINKWATCH_PENDING,
287 __LINK_STATE_DORMANT,
288 };
289
290
291 /*
292 * This structure holds boot-time configured netdevice settings. They
293 * are then used in the device probing.
294 */
295 struct netdev_boot_setup {
296 char name[IFNAMSIZ];
297 struct ifmap map;
298 };
299 #define NETDEV_BOOT_SETUP_MAX 8
300
301 int __init netdev_boot_setup(char *str);
302
303 /*
304 * Structure for NAPI scheduling similar to tasklet but with weighting
305 */
306 struct napi_struct {
307 /* The poll_list must only be managed by the entity which
308 * changes the state of the NAPI_STATE_SCHED bit. This means
309 * whoever atomically sets that bit can add this napi_struct
310 * to the per-CPU poll_list, and whoever clears that bit
311 * can remove from the list right before clearing the bit.
312 */
313 struct list_head poll_list;
314
315 unsigned long state;
316 int weight;
317 unsigned int gro_count;
318 int (*poll)(struct napi_struct *, int);
319 #ifdef CONFIG_NETPOLL
320 int poll_owner;
321 #endif
322 struct net_device *dev;
323 struct sk_buff *gro_list;
324 struct sk_buff *skb;
325 struct hrtimer timer;
326 struct list_head dev_list;
327 struct hlist_node napi_hash_node;
328 unsigned int napi_id;
329 };
330
331 enum {
332 NAPI_STATE_SCHED, /* Poll is scheduled */
333 NAPI_STATE_MISSED, /* reschedule a napi */
334 NAPI_STATE_DISABLE, /* Disable pending */
335 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
336 NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
337 NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
338 NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
339 };
340
341 enum {
342 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
343 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
344 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
345 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
346 NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
347 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
348 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
349 };
350
351 enum gro_result {
352 GRO_MERGED,
353 GRO_MERGED_FREE,
354 GRO_HELD,
355 GRO_NORMAL,
356 GRO_DROP,
357 GRO_CONSUMED,
358 };
359 typedef enum gro_result gro_result_t;
360
361 /*
362 * enum rx_handler_result - Possible return values for rx_handlers.
363 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
364 * further.
365 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
366 * case skb->dev was changed by rx_handler.
367 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
368 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
369 *
370 * rx_handlers are functions called from inside __netif_receive_skb(), to do
371 * special processing of the skb, prior to delivery to protocol handlers.
372 *
373 * Currently, a net_device can only have a single rx_handler registered. Trying
374 * to register a second rx_handler will return -EBUSY.
375 *
376 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
377 * To unregister a rx_handler on a net_device, use
378 * netdev_rx_handler_unregister().
379 *
380 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
381 * do with the skb.
382 *
383 * If the rx_handler consumed the skb in some way, it should return
384 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
385 * the skb to be delivered in some other way.
386 *
387 * If the rx_handler changed skb->dev, to divert the skb to another
388 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
389 * new device will be called if it exists.
390 *
391 * If the rx_handler decides the skb should be ignored, it should return
392 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
393 * are registered on exact device (ptype->dev == skb->dev).
394 *
395 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
396 * delivered, it should return RX_HANDLER_PASS.
397 *
398 * A device without a registered rx_handler will behave as if rx_handler
399 * returned RX_HANDLER_PASS.
400 */
401
402 enum rx_handler_result {
403 RX_HANDLER_CONSUMED,
404 RX_HANDLER_ANOTHER,
405 RX_HANDLER_EXACT,
406 RX_HANDLER_PASS,
407 };
408 typedef enum rx_handler_result rx_handler_result_t;
409 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
410
411 void __napi_schedule(struct napi_struct *n);
412 void __napi_schedule_irqoff(struct napi_struct *n);
413
414 static inline bool napi_disable_pending(struct napi_struct *n)
415 {
416 return test_bit(NAPI_STATE_DISABLE, &n->state);
417 }
418
419 bool napi_schedule_prep(struct napi_struct *n);
420
421 /**
422 * napi_schedule - schedule NAPI poll
423 * @n: NAPI context
424 *
425 * Schedule NAPI poll routine to be called if it is not already
426 * running.
427 */
428 static inline void napi_schedule(struct napi_struct *n)
429 {
430 if (napi_schedule_prep(n))
431 __napi_schedule(n);
432 }
433
434 /**
435 * napi_schedule_irqoff - schedule NAPI poll
436 * @n: NAPI context
437 *
438 * Variant of napi_schedule(), assuming hard irqs are masked.
439 */
440 static inline void napi_schedule_irqoff(struct napi_struct *n)
441 {
442 if (napi_schedule_prep(n))
443 __napi_schedule_irqoff(n);
444 }
445
446 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
447 static inline bool napi_reschedule(struct napi_struct *napi)
448 {
449 if (napi_schedule_prep(napi)) {
450 __napi_schedule(napi);
451 return true;
452 }
453 return false;
454 }
455
456 bool napi_complete_done(struct napi_struct *n, int work_done);
457 /**
458 * napi_complete - NAPI processing complete
459 * @n: NAPI context
460 *
461 * Mark NAPI processing as complete.
462 * Consider using napi_complete_done() instead.
463 * Return false if device should avoid rearming interrupts.
464 */
465 static inline bool napi_complete(struct napi_struct *n)
466 {
467 return napi_complete_done(n, 0);
468 }
469
470 /**
471 * napi_hash_del - remove a NAPI from global table
472 * @napi: NAPI context
473 *
474 * Warning: caller must observe RCU grace period
475 * before freeing memory containing @napi, if
476 * this function returns true.
477 * Note: core networking stack automatically calls it
478 * from netif_napi_del().
479 * Drivers might want to call this helper to combine all
480 * the needed RCU grace periods into a single one.
481 */
482 bool napi_hash_del(struct napi_struct *napi);
483
484 /**
485 * napi_disable - prevent NAPI from scheduling
486 * @n: NAPI context
487 *
488 * Stop NAPI from being scheduled on this context.
489 * Waits till any outstanding processing completes.
490 */
491 void napi_disable(struct napi_struct *n);
492
493 /**
494 * napi_enable - enable NAPI scheduling
495 * @n: NAPI context
496 *
497 * Resume NAPI from being scheduled on this context.
498 * Must be paired with napi_disable.
499 */
500 static inline void napi_enable(struct napi_struct *n)
501 {
502 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
503 smp_mb__before_atomic();
504 clear_bit(NAPI_STATE_SCHED, &n->state);
505 clear_bit(NAPI_STATE_NPSVC, &n->state);
506 }
507
508 /**
509 * napi_synchronize - wait until NAPI is not running
510 * @n: NAPI context
511 *
512 * Wait until NAPI is done being scheduled on this context.
513 * Waits till any outstanding processing completes but
514 * does not disable future activations.
515 */
516 static inline void napi_synchronize(const struct napi_struct *n)
517 {
518 if (IS_ENABLED(CONFIG_SMP))
519 while (test_bit(NAPI_STATE_SCHED, &n->state))
520 msleep(1);
521 else
522 barrier();
523 }
524
525 enum netdev_queue_state_t {
526 __QUEUE_STATE_DRV_XOFF,
527 __QUEUE_STATE_STACK_XOFF,
528 __QUEUE_STATE_FROZEN,
529 };
530
531 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
532 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
533 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
534
535 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
536 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
537 QUEUE_STATE_FROZEN)
538 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
539 QUEUE_STATE_FROZEN)
540
541 /*
542 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
543 * netif_tx_* functions below are used to manipulate this flag. The
544 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
545 * queue independently. The netif_xmit_*stopped functions below are called
546 * to check if the queue has been stopped by the driver or stack (either
547 * of the XOFF bits are set in the state). Drivers should not need to call
548 * netif_xmit*stopped functions, they should only be using netif_tx_*.
549 */
550
551 struct netdev_queue {
552 /*
553 * read-mostly part
554 */
555 struct net_device *dev;
556 struct Qdisc __rcu *qdisc;
557 struct Qdisc *qdisc_sleeping;
558 #ifdef CONFIG_SYSFS
559 struct kobject kobj;
560 #endif
561 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
562 int numa_node;
563 #endif
564 unsigned long tx_maxrate;
565 /*
566 * Number of TX timeouts for this queue
567 * (/sys/class/net/DEV/Q/trans_timeout)
568 */
569 unsigned long trans_timeout;
570 /*
571 * write-mostly part
572 */
573 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
574 int xmit_lock_owner;
575 /*
576 * Time (in jiffies) of last Tx
577 */
578 unsigned long trans_start;
579
580 unsigned long state;
581
582 #ifdef CONFIG_BQL
583 struct dql dql;
584 #endif
585 } ____cacheline_aligned_in_smp;
586
587 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
588 {
589 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
590 return q->numa_node;
591 #else
592 return NUMA_NO_NODE;
593 #endif
594 }
595
596 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
597 {
598 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
599 q->numa_node = node;
600 #endif
601 }
602
603 #ifdef CONFIG_RPS
604 /*
605 * This structure holds an RPS map which can be of variable length. The
606 * map is an array of CPUs.
607 */
608 struct rps_map {
609 unsigned int len;
610 struct rcu_head rcu;
611 u16 cpus[0];
612 };
613 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
614
615 /*
616 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
617 * tail pointer for that CPU's input queue at the time of last enqueue, and
618 * a hardware filter index.
619 */
620 struct rps_dev_flow {
621 u16 cpu;
622 u16 filter;
623 unsigned int last_qtail;
624 };
625 #define RPS_NO_FILTER 0xffff
626
627 /*
628 * The rps_dev_flow_table structure contains a table of flow mappings.
629 */
630 struct rps_dev_flow_table {
631 unsigned int mask;
632 struct rcu_head rcu;
633 struct rps_dev_flow flows[0];
634 };
635 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
636 ((_num) * sizeof(struct rps_dev_flow)))
637
638 /*
639 * The rps_sock_flow_table contains mappings of flows to the last CPU
640 * on which they were processed by the application (set in recvmsg).
641 * Each entry is a 32bit value. Upper part is the high-order bits
642 * of flow hash, lower part is CPU number.
643 * rps_cpu_mask is used to partition the space, depending on number of
644 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
645 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
646 * meaning we use 32-6=26 bits for the hash.
647 */
648 struct rps_sock_flow_table {
649 u32 mask;
650
651 u32 ents[0] ____cacheline_aligned_in_smp;
652 };
653 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
654
655 #define RPS_NO_CPU 0xffff
656
657 extern u32 rps_cpu_mask;
658 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
659
660 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
661 u32 hash)
662 {
663 if (table && hash) {
664 unsigned int index = hash & table->mask;
665 u32 val = hash & ~rps_cpu_mask;
666
667 /* We only give a hint, preemption can change CPU under us */
668 val |= raw_smp_processor_id();
669
670 if (table->ents[index] != val)
671 table->ents[index] = val;
672 }
673 }
674
675 #ifdef CONFIG_RFS_ACCEL
676 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
677 u16 filter_id);
678 #endif
679 #endif /* CONFIG_RPS */
680
681 /* This structure contains an instance of an RX queue. */
682 struct netdev_rx_queue {
683 #ifdef CONFIG_RPS
684 struct rps_map __rcu *rps_map;
685 struct rps_dev_flow_table __rcu *rps_flow_table;
686 #endif
687 struct kobject kobj;
688 struct net_device *dev;
689 } ____cacheline_aligned_in_smp;
690
691 /*
692 * RX queue sysfs structures and functions.
693 */
694 struct rx_queue_attribute {
695 struct attribute attr;
696 ssize_t (*show)(struct netdev_rx_queue *queue,
697 struct rx_queue_attribute *attr, char *buf);
698 ssize_t (*store)(struct netdev_rx_queue *queue,
699 struct rx_queue_attribute *attr, const char *buf, size_t len);
700 };
701
702 #ifdef CONFIG_XPS
703 /*
704 * This structure holds an XPS map which can be of variable length. The
705 * map is an array of queues.
706 */
707 struct xps_map {
708 unsigned int len;
709 unsigned int alloc_len;
710 struct rcu_head rcu;
711 u16 queues[0];
712 };
713 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
714 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
715 - sizeof(struct xps_map)) / sizeof(u16))
716
717 /*
718 * This structure holds all XPS maps for device. Maps are indexed by CPU.
719 */
720 struct xps_dev_maps {
721 struct rcu_head rcu;
722 struct xps_map __rcu *cpu_map[0];
723 };
724 #define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
725 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
726 #endif /* CONFIG_XPS */
727
728 #define TC_MAX_QUEUE 16
729 #define TC_BITMASK 15
730 /* HW offloaded queuing disciplines txq count and offset maps */
731 struct netdev_tc_txq {
732 u16 count;
733 u16 offset;
734 };
735
736 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
737 /*
738 * This structure is to hold information about the device
739 * configured to run FCoE protocol stack.
740 */
741 struct netdev_fcoe_hbainfo {
742 char manufacturer[64];
743 char serial_number[64];
744 char hardware_version[64];
745 char driver_version[64];
746 char optionrom_version[64];
747 char firmware_version[64];
748 char model[256];
749 char model_description[256];
750 };
751 #endif
752
753 #define MAX_PHYS_ITEM_ID_LEN 32
754
755 /* This structure holds a unique identifier to identify some
756 * physical item (port for example) used by a netdevice.
757 */
758 struct netdev_phys_item_id {
759 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
760 unsigned char id_len;
761 };
762
763 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
764 struct netdev_phys_item_id *b)
765 {
766 return a->id_len == b->id_len &&
767 memcmp(a->id, b->id, a->id_len) == 0;
768 }
769
770 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
771 struct sk_buff *skb);
772
773 /* These structures hold the attributes of qdisc and classifiers
774 * that are being passed to the netdevice through the setup_tc op.
775 */
776 enum {
777 TC_SETUP_MQPRIO,
778 TC_SETUP_CLSU32,
779 TC_SETUP_CLSFLOWER,
780 TC_SETUP_MATCHALL,
781 TC_SETUP_CLSBPF,
782 };
783
784 struct tc_cls_u32_offload;
785
786 struct tc_to_netdev {
787 unsigned int type;
788 union {
789 struct tc_cls_u32_offload *cls_u32;
790 struct tc_cls_flower_offload *cls_flower;
791 struct tc_cls_matchall_offload *cls_mall;
792 struct tc_cls_bpf_offload *cls_bpf;
793 struct tc_mqprio_qopt *mqprio;
794 };
795 bool egress_dev;
796 };
797
798 /* These structures hold the attributes of xdp state that are being passed
799 * to the netdevice through the xdp op.
800 */
801 enum xdp_netdev_command {
802 /* Set or clear a bpf program used in the earliest stages of packet
803 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
804 * is responsible for calling bpf_prog_put on any old progs that are
805 * stored. In case of error, the callee need not release the new prog
806 * reference, but on success it takes ownership and must bpf_prog_put
807 * when it is no longer used.
808 */
809 XDP_SETUP_PROG,
810 /* Check if a bpf program is set on the device. The callee should
811 * return true if a program is currently attached and running.
812 */
813 XDP_QUERY_PROG,
814 };
815
816 struct netlink_ext_ack;
817
818 struct netdev_xdp {
819 enum xdp_netdev_command command;
820 union {
821 /* XDP_SETUP_PROG */
822 struct {
823 struct bpf_prog *prog;
824 struct netlink_ext_ack *extack;
825 };
826 /* XDP_QUERY_PROG */
827 bool prog_attached;
828 };
829 };
830
831 #ifdef CONFIG_XFRM_OFFLOAD
832 struct xfrmdev_ops {
833 int (*xdo_dev_state_add) (struct xfrm_state *x);
834 void (*xdo_dev_state_delete) (struct xfrm_state *x);
835 void (*xdo_dev_state_free) (struct xfrm_state *x);
836 bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
837 struct xfrm_state *x);
838 };
839 #endif
840
841 /*
842 * This structure defines the management hooks for network devices.
843 * The following hooks can be defined; unless noted otherwise, they are
844 * optional and can be filled with a null pointer.
845 *
846 * int (*ndo_init)(struct net_device *dev);
847 * This function is called once when a network device is registered.
848 * The network device can use this for any late stage initialization
849 * or semantic validation. It can fail with an error code which will
850 * be propagated back to register_netdev.
851 *
852 * void (*ndo_uninit)(struct net_device *dev);
853 * This function is called when device is unregistered or when registration
854 * fails. It is not called if init fails.
855 *
856 * int (*ndo_open)(struct net_device *dev);
857 * This function is called when a network device transitions to the up
858 * state.
859 *
860 * int (*ndo_stop)(struct net_device *dev);
861 * This function is called when a network device transitions to the down
862 * state.
863 *
864 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
865 * struct net_device *dev);
866 * Called when a packet needs to be transmitted.
867 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
868 * the queue before that can happen; it's for obsolete devices and weird
869 * corner cases, but the stack really does a non-trivial amount
870 * of useless work if you return NETDEV_TX_BUSY.
871 * Required; cannot be NULL.
872 *
873 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
874 * struct net_device *dev
875 * netdev_features_t features);
876 * Called by core transmit path to determine if device is capable of
877 * performing offload operations on a given packet. This is to give
878 * the device an opportunity to implement any restrictions that cannot
879 * be otherwise expressed by feature flags. The check is called with
880 * the set of features that the stack has calculated and it returns
881 * those the driver believes to be appropriate.
882 *
883 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
884 * void *accel_priv, select_queue_fallback_t fallback);
885 * Called to decide which queue to use when device supports multiple
886 * transmit queues.
887 *
888 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
889 * This function is called to allow device receiver to make
890 * changes to configuration when multicast or promiscuous is enabled.
891 *
892 * void (*ndo_set_rx_mode)(struct net_device *dev);
893 * This function is called device changes address list filtering.
894 * If driver handles unicast address filtering, it should set
895 * IFF_UNICAST_FLT in its priv_flags.
896 *
897 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
898 * This function is called when the Media Access Control address
899 * needs to be changed. If this interface is not defined, the
900 * MAC address can not be changed.
901 *
902 * int (*ndo_validate_addr)(struct net_device *dev);
903 * Test if Media Access Control address is valid for the device.
904 *
905 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
906 * Called when a user requests an ioctl which can't be handled by
907 * the generic interface code. If not defined ioctls return
908 * not supported error code.
909 *
910 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
911 * Used to set network devices bus interface parameters. This interface
912 * is retained for legacy reasons; new devices should use the bus
913 * interface (PCI) for low level management.
914 *
915 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
916 * Called when a user wants to change the Maximum Transfer Unit
917 * of a device. If not defined, any request to change MTU will
918 * will return an error.
919 *
920 * void (*ndo_tx_timeout)(struct net_device *dev);
921 * Callback used when the transmitter has not made any progress
922 * for dev->watchdog ticks.
923 *
924 * void (*ndo_get_stats64)(struct net_device *dev,
925 * struct rtnl_link_stats64 *storage);
926 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
927 * Called when a user wants to get the network device usage
928 * statistics. Drivers must do one of the following:
929 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
930 * rtnl_link_stats64 structure passed by the caller.
931 * 2. Define @ndo_get_stats to update a net_device_stats structure
932 * (which should normally be dev->stats) and return a pointer to
933 * it. The structure may be changed asynchronously only if each
934 * field is written atomically.
935 * 3. Update dev->stats asynchronously and atomically, and define
936 * neither operation.
937 *
938 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
939 * Return true if this device supports offload stats of this attr_id.
940 *
941 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
942 * void *attr_data)
943 * Get statistics for offload operations by attr_id. Write it into the
944 * attr_data pointer.
945 *
946 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
947 * If device supports VLAN filtering this function is called when a
948 * VLAN id is registered.
949 *
950 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
951 * If device supports VLAN filtering this function is called when a
952 * VLAN id is unregistered.
953 *
954 * void (*ndo_poll_controller)(struct net_device *dev);
955 *
956 * SR-IOV management functions.
957 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
958 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
959 * u8 qos, __be16 proto);
960 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
961 * int max_tx_rate);
962 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
963 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
964 * int (*ndo_get_vf_config)(struct net_device *dev,
965 * int vf, struct ifla_vf_info *ivf);
966 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
967 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
968 * struct nlattr *port[]);
969 *
970 * Enable or disable the VF ability to query its RSS Redirection Table and
971 * Hash Key. This is needed since on some devices VF share this information
972 * with PF and querying it may introduce a theoretical security risk.
973 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
974 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
975 * int (*ndo_setup_tc)(struct net_device *dev, u32 handle,
976 * __be16 protocol, struct tc_to_netdev *tc);
977 * Called to setup any 'tc' scheduler, classifier or action on @dev.
978 * This is always called from the stack with the rtnl lock held and netif
979 * tx queues stopped. This allows the netdevice to perform queue
980 * management safely.
981 *
982 * Fiber Channel over Ethernet (FCoE) offload functions.
983 * int (*ndo_fcoe_enable)(struct net_device *dev);
984 * Called when the FCoE protocol stack wants to start using LLD for FCoE
985 * so the underlying device can perform whatever needed configuration or
986 * initialization to support acceleration of FCoE traffic.
987 *
988 * int (*ndo_fcoe_disable)(struct net_device *dev);
989 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
990 * so the underlying device can perform whatever needed clean-ups to
991 * stop supporting acceleration of FCoE traffic.
992 *
993 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
994 * struct scatterlist *sgl, unsigned int sgc);
995 * Called when the FCoE Initiator wants to initialize an I/O that
996 * is a possible candidate for Direct Data Placement (DDP). The LLD can
997 * perform necessary setup and returns 1 to indicate the device is set up
998 * successfully to perform DDP on this I/O, otherwise this returns 0.
999 *
1000 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
1001 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
1002 * indicated by the FC exchange id 'xid', so the underlying device can
1003 * clean up and reuse resources for later DDP requests.
1004 *
1005 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
1006 * struct scatterlist *sgl, unsigned int sgc);
1007 * Called when the FCoE Target wants to initialize an I/O that
1008 * is a possible candidate for Direct Data Placement (DDP). The LLD can
1009 * perform necessary setup and returns 1 to indicate the device is set up
1010 * successfully to perform DDP on this I/O, otherwise this returns 0.
1011 *
1012 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1013 * struct netdev_fcoe_hbainfo *hbainfo);
1014 * Called when the FCoE Protocol stack wants information on the underlying
1015 * device. This information is utilized by the FCoE protocol stack to
1016 * register attributes with Fiber Channel management service as per the
1017 * FC-GS Fabric Device Management Information(FDMI) specification.
1018 *
1019 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
1020 * Called when the underlying device wants to override default World Wide
1021 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
1022 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
1023 * protocol stack to use.
1024 *
1025 * RFS acceleration.
1026 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
1027 * u16 rxq_index, u32 flow_id);
1028 * Set hardware filter for RFS. rxq_index is the target queue index;
1029 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
1030 * Return the filter ID on success, or a negative error code.
1031 *
1032 * Slave management functions (for bridge, bonding, etc).
1033 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
1034 * Called to make another netdev an underling.
1035 *
1036 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
1037 * Called to release previously enslaved netdev.
1038 *
1039 * Feature/offload setting functions.
1040 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1041 * netdev_features_t features);
1042 * Adjusts the requested feature flags according to device-specific
1043 * constraints, and returns the resulting flags. Must not modify
1044 * the device state.
1045 *
1046 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1047 * Called to update device configuration to new features. Passed
1048 * feature set might be less than what was returned by ndo_fix_features()).
1049 * Must return >0 or -errno if it changed dev->features itself.
1050 *
1051 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
1052 * struct net_device *dev,
1053 * const unsigned char *addr, u16 vid, u16 flags)
1054 * Adds an FDB entry to dev for addr.
1055 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
1056 * struct net_device *dev,
1057 * const unsigned char *addr, u16 vid)
1058 * Deletes the FDB entry from dev coresponding to addr.
1059 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
1060 * struct net_device *dev, struct net_device *filter_dev,
1061 * int *idx)
1062 * Used to add FDB entries to dump requests. Implementers should add
1063 * entries to skb and update idx with the number of entries.
1064 *
1065 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
1066 * u16 flags)
1067 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
1068 * struct net_device *dev, u32 filter_mask,
1069 * int nlflags)
1070 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
1071 * u16 flags);
1072 *
1073 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
1074 * Called to change device carrier. Soft-devices (like dummy, team, etc)
1075 * which do not represent real hardware may define this to allow their
1076 * userspace components to manage their virtual carrier state. Devices
1077 * that determine carrier state from physical hardware properties (eg
1078 * network cables) or protocol-dependent mechanisms (eg
1079 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
1080 *
1081 * int (*ndo_get_phys_port_id)(struct net_device *dev,
1082 * struct netdev_phys_item_id *ppid);
1083 * Called to get ID of physical port of this device. If driver does
1084 * not implement this, it is assumed that the hw is not able to have
1085 * multiple net devices on single physical port.
1086 *
1087 * void (*ndo_udp_tunnel_add)(struct net_device *dev,
1088 * struct udp_tunnel_info *ti);
1089 * Called by UDP tunnel to notify a driver about the UDP port and socket
1090 * address family that a UDP tunnel is listnening to. It is called only
1091 * when a new port starts listening. The operation is protected by the
1092 * RTNL.
1093 *
1094 * void (*ndo_udp_tunnel_del)(struct net_device *dev,
1095 * struct udp_tunnel_info *ti);
1096 * Called by UDP tunnel to notify the driver about a UDP port and socket
1097 * address family that the UDP tunnel is not listening to anymore. The
1098 * operation is protected by the RTNL.
1099 *
1100 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1101 * struct net_device *dev)
1102 * Called by upper layer devices to accelerate switching or other
1103 * station functionality into hardware. 'pdev is the lowerdev
1104 * to use for the offload and 'dev' is the net device that will
1105 * back the offload. Returns a pointer to the private structure
1106 * the upper layer will maintain.
1107 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
1108 * Called by upper layer device to delete the station created
1109 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
1110 * the station and priv is the structure returned by the add
1111 * operation.
1112 * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
1113 * struct net_device *dev,
1114 * void *priv);
1115 * Callback to use for xmit over the accelerated station. This
1116 * is used in place of ndo_start_xmit on accelerated net
1117 * devices.
1118 * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1119 * int queue_index, u32 maxrate);
1120 * Called when a user wants to set a max-rate limitation of specific
1121 * TX queue.
1122 * int (*ndo_get_iflink)(const struct net_device *dev);
1123 * Called to get the iflink value of this device.
1124 * void (*ndo_change_proto_down)(struct net_device *dev,
1125 * bool proto_down);
1126 * This function is used to pass protocol port error state information
1127 * to the switch driver. The switch driver can react to the proto_down
1128 * by doing a phys down on the associated switch port.
1129 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1130 * This function is used to get egress tunnel information for given skb.
1131 * This is useful for retrieving outer tunnel header parameters while
1132 * sampling packet.
1133 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
1134 * This function is used to specify the headroom that the skb must
1135 * consider when allocation skb during packet reception. Setting
1136 * appropriate rx headroom value allows avoiding skb head copy on
1137 * forward. Setting a negative value resets the rx headroom to the
1138 * default value.
1139 * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp);
1140 * This function is used to set or query state related to XDP on the
1141 * netdevice. See definition of enum xdp_netdev_command for details.
1142 *
1143 */
1144 struct net_device_ops {
1145 int (*ndo_init)(struct net_device *dev);
1146 void (*ndo_uninit)(struct net_device *dev);
1147 int (*ndo_open)(struct net_device *dev);
1148 int (*ndo_stop)(struct net_device *dev);
1149 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1150 struct net_device *dev);
1151 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1152 struct net_device *dev,
1153 netdev_features_t features);
1154 u16 (*ndo_select_queue)(struct net_device *dev,
1155 struct sk_buff *skb,
1156 void *accel_priv,
1157 select_queue_fallback_t fallback);
1158 void (*ndo_change_rx_flags)(struct net_device *dev,
1159 int flags);
1160 void (*ndo_set_rx_mode)(struct net_device *dev);
1161 int (*ndo_set_mac_address)(struct net_device *dev,
1162 void *addr);
1163 int (*ndo_validate_addr)(struct net_device *dev);
1164 int (*ndo_do_ioctl)(struct net_device *dev,
1165 struct ifreq *ifr, int cmd);
1166 int (*ndo_set_config)(struct net_device *dev,
1167 struct ifmap *map);
1168 int (*ndo_change_mtu)(struct net_device *dev,
1169 int new_mtu);
1170 int (*ndo_neigh_setup)(struct net_device *dev,
1171 struct neigh_parms *);
1172 void (*ndo_tx_timeout) (struct net_device *dev);
1173
1174 void (*ndo_get_stats64)(struct net_device *dev,
1175 struct rtnl_link_stats64 *storage);
1176 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1177 int (*ndo_get_offload_stats)(int attr_id,
1178 const struct net_device *dev,
1179 void *attr_data);
1180 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1181
1182 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1183 __be16 proto, u16 vid);
1184 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1185 __be16 proto, u16 vid);
1186 #ifdef CONFIG_NET_POLL_CONTROLLER
1187 void (*ndo_poll_controller)(struct net_device *dev);
1188 int (*ndo_netpoll_setup)(struct net_device *dev,
1189 struct netpoll_info *info);
1190 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1191 #endif
1192 int (*ndo_set_vf_mac)(struct net_device *dev,
1193 int queue, u8 *mac);
1194 int (*ndo_set_vf_vlan)(struct net_device *dev,
1195 int queue, u16 vlan,
1196 u8 qos, __be16 proto);
1197 int (*ndo_set_vf_rate)(struct net_device *dev,
1198 int vf, int min_tx_rate,
1199 int max_tx_rate);
1200 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1201 int vf, bool setting);
1202 int (*ndo_set_vf_trust)(struct net_device *dev,
1203 int vf, bool setting);
1204 int (*ndo_get_vf_config)(struct net_device *dev,
1205 int vf,
1206 struct ifla_vf_info *ivf);
1207 int (*ndo_set_vf_link_state)(struct net_device *dev,
1208 int vf, int link_state);
1209 int (*ndo_get_vf_stats)(struct net_device *dev,
1210 int vf,
1211 struct ifla_vf_stats
1212 *vf_stats);
1213 int (*ndo_set_vf_port)(struct net_device *dev,
1214 int vf,
1215 struct nlattr *port[]);
1216 int (*ndo_get_vf_port)(struct net_device *dev,
1217 int vf, struct sk_buff *skb);
1218 int (*ndo_set_vf_guid)(struct net_device *dev,
1219 int vf, u64 guid,
1220 int guid_type);
1221 int (*ndo_set_vf_rss_query_en)(
1222 struct net_device *dev,
1223 int vf, bool setting);
1224 int (*ndo_setup_tc)(struct net_device *dev,
1225 u32 handle,
1226 __be16 protocol,
1227 struct tc_to_netdev *tc);
1228 #if IS_ENABLED(CONFIG_FCOE)
1229 int (*ndo_fcoe_enable)(struct net_device *dev);
1230 int (*ndo_fcoe_disable)(struct net_device *dev);
1231 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1232 u16 xid,
1233 struct scatterlist *sgl,
1234 unsigned int sgc);
1235 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1236 u16 xid);
1237 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1238 u16 xid,
1239 struct scatterlist *sgl,
1240 unsigned int sgc);
1241 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1242 struct netdev_fcoe_hbainfo *hbainfo);
1243 #endif
1244
1245 #if IS_ENABLED(CONFIG_LIBFCOE)
1246 #define NETDEV_FCOE_WWNN 0
1247 #define NETDEV_FCOE_WWPN 1
1248 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1249 u64 *wwn, int type);
1250 #endif
1251
1252 #ifdef CONFIG_RFS_ACCEL
1253 int (*ndo_rx_flow_steer)(struct net_device *dev,
1254 const struct sk_buff *skb,
1255 u16 rxq_index,
1256 u32 flow_id);
1257 #endif
1258 int (*ndo_add_slave)(struct net_device *dev,
1259 struct net_device *slave_dev);
1260 int (*ndo_del_slave)(struct net_device *dev,
1261 struct net_device *slave_dev);
1262 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1263 netdev_features_t features);
1264 int (*ndo_set_features)(struct net_device *dev,
1265 netdev_features_t features);
1266 int (*ndo_neigh_construct)(struct net_device *dev,
1267 struct neighbour *n);
1268 void (*ndo_neigh_destroy)(struct net_device *dev,
1269 struct neighbour *n);
1270
1271 int (*ndo_fdb_add)(struct ndmsg *ndm,
1272 struct nlattr *tb[],
1273 struct net_device *dev,
1274 const unsigned char *addr,
1275 u16 vid,
1276 u16 flags);
1277 int (*ndo_fdb_del)(struct ndmsg *ndm,
1278 struct nlattr *tb[],
1279 struct net_device *dev,
1280 const unsigned char *addr,
1281 u16 vid);
1282 int (*ndo_fdb_dump)(struct sk_buff *skb,
1283 struct netlink_callback *cb,
1284 struct net_device *dev,
1285 struct net_device *filter_dev,
1286 int *idx);
1287
1288 int (*ndo_bridge_setlink)(struct net_device *dev,
1289 struct nlmsghdr *nlh,
1290 u16 flags);
1291 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1292 u32 pid, u32 seq,
1293 struct net_device *dev,
1294 u32 filter_mask,
1295 int nlflags);
1296 int (*ndo_bridge_dellink)(struct net_device *dev,
1297 struct nlmsghdr *nlh,
1298 u16 flags);
1299 int (*ndo_change_carrier)(struct net_device *dev,
1300 bool new_carrier);
1301 int (*ndo_get_phys_port_id)(struct net_device *dev,
1302 struct netdev_phys_item_id *ppid);
1303 int (*ndo_get_phys_port_name)(struct net_device *dev,
1304 char *name, size_t len);
1305 void (*ndo_udp_tunnel_add)(struct net_device *dev,
1306 struct udp_tunnel_info *ti);
1307 void (*ndo_udp_tunnel_del)(struct net_device *dev,
1308 struct udp_tunnel_info *ti);
1309 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1310 struct net_device *dev);
1311 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1312 void *priv);
1313
1314 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
1315 struct net_device *dev,
1316 void *priv);
1317 int (*ndo_get_lock_subclass)(struct net_device *dev);
1318 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1319 int queue_index,
1320 u32 maxrate);
1321 int (*ndo_get_iflink)(const struct net_device *dev);
1322 int (*ndo_change_proto_down)(struct net_device *dev,
1323 bool proto_down);
1324 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1325 struct sk_buff *skb);
1326 void (*ndo_set_rx_headroom)(struct net_device *dev,
1327 int needed_headroom);
1328 int (*ndo_xdp)(struct net_device *dev,
1329 struct netdev_xdp *xdp);
1330 };
1331
1332 /**
1333 * enum net_device_priv_flags - &struct net_device priv_flags
1334 *
1335 * These are the &struct net_device, they are only set internally
1336 * by drivers and used in the kernel. These flags are invisible to
1337 * userspace; this means that the order of these flags can change
1338 * during any kernel release.
1339 *
1340 * You should have a pretty good reason to be extending these flags.
1341 *
1342 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1343 * @IFF_EBRIDGE: Ethernet bridging device
1344 * @IFF_BONDING: bonding master or slave
1345 * @IFF_ISATAP: ISATAP interface (RFC4214)
1346 * @IFF_WAN_HDLC: WAN HDLC device
1347 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1348 * release skb->dst
1349 * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1350 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1351 * @IFF_MACVLAN_PORT: device used as macvlan port
1352 * @IFF_BRIDGE_PORT: device used as bridge port
1353 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1354 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1355 * @IFF_UNICAST_FLT: Supports unicast filtering
1356 * @IFF_TEAM_PORT: device used as team port
1357 * @IFF_SUPP_NOFCS: device supports sending custom FCS
1358 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1359 * change when it's running
1360 * @IFF_MACVLAN: Macvlan device
1361 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
1362 * underlying stacked devices
1363 * @IFF_IPVLAN_MASTER: IPvlan master device
1364 * @IFF_IPVLAN_SLAVE: IPvlan slave device
1365 * @IFF_L3MDEV_MASTER: device is an L3 master device
1366 * @IFF_NO_QUEUE: device can run without qdisc attached
1367 * @IFF_OPENVSWITCH: device is a Open vSwitch master
1368 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
1369 * @IFF_TEAM: device is a team device
1370 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
1371 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
1372 * entity (i.e. the master device for bridged veth)
1373 * @IFF_MACSEC: device is a MACsec device
1374 */
1375 enum netdev_priv_flags {
1376 IFF_802_1Q_VLAN = 1<<0,
1377 IFF_EBRIDGE = 1<<1,
1378 IFF_BONDING = 1<<2,
1379 IFF_ISATAP = 1<<3,
1380 IFF_WAN_HDLC = 1<<4,
1381 IFF_XMIT_DST_RELEASE = 1<<5,
1382 IFF_DONT_BRIDGE = 1<<6,
1383 IFF_DISABLE_NETPOLL = 1<<7,
1384 IFF_MACVLAN_PORT = 1<<8,
1385 IFF_BRIDGE_PORT = 1<<9,
1386 IFF_OVS_DATAPATH = 1<<10,
1387 IFF_TX_SKB_SHARING = 1<<11,
1388 IFF_UNICAST_FLT = 1<<12,
1389 IFF_TEAM_PORT = 1<<13,
1390 IFF_SUPP_NOFCS = 1<<14,
1391 IFF_LIVE_ADDR_CHANGE = 1<<15,
1392 IFF_MACVLAN = 1<<16,
1393 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1394 IFF_IPVLAN_MASTER = 1<<18,
1395 IFF_IPVLAN_SLAVE = 1<<19,
1396 IFF_L3MDEV_MASTER = 1<<20,
1397 IFF_NO_QUEUE = 1<<21,
1398 IFF_OPENVSWITCH = 1<<22,
1399 IFF_L3MDEV_SLAVE = 1<<23,
1400 IFF_TEAM = 1<<24,
1401 IFF_RXFH_CONFIGURED = 1<<25,
1402 IFF_PHONY_HEADROOM = 1<<26,
1403 IFF_MACSEC = 1<<27,
1404 };
1405
1406 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1407 #define IFF_EBRIDGE IFF_EBRIDGE
1408 #define IFF_BONDING IFF_BONDING
1409 #define IFF_ISATAP IFF_ISATAP
1410 #define IFF_WAN_HDLC IFF_WAN_HDLC
1411 #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1412 #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1413 #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1414 #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1415 #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1416 #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1417 #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1418 #define IFF_UNICAST_FLT IFF_UNICAST_FLT
1419 #define IFF_TEAM_PORT IFF_TEAM_PORT
1420 #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1421 #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1422 #define IFF_MACVLAN IFF_MACVLAN
1423 #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1424 #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
1425 #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
1426 #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
1427 #define IFF_NO_QUEUE IFF_NO_QUEUE
1428 #define IFF_OPENVSWITCH IFF_OPENVSWITCH
1429 #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1430 #define IFF_TEAM IFF_TEAM
1431 #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
1432 #define IFF_MACSEC IFF_MACSEC
1433
1434 /**
1435 * struct net_device - The DEVICE structure.
1436 * Actually, this whole structure is a big mistake. It mixes I/O
1437 * data with strictly "high-level" data, and it has to know about
1438 * almost every data structure used in the INET module.
1439 *
1440 * @name: This is the first field of the "visible" part of this structure
1441 * (i.e. as seen by users in the "Space.c" file). It is the name
1442 * of the interface.
1443 *
1444 * @name_hlist: Device name hash chain, please keep it close to name[]
1445 * @ifalias: SNMP alias
1446 * @mem_end: Shared memory end
1447 * @mem_start: Shared memory start
1448 * @base_addr: Device I/O address
1449 * @irq: Device IRQ number
1450 *
1451 * @carrier_changes: Stats to monitor carrier on<->off transitions
1452 *
1453 * @state: Generic network queuing layer state, see netdev_state_t
1454 * @dev_list: The global list of network devices
1455 * @napi_list: List entry used for polling NAPI devices
1456 * @unreg_list: List entry when we are unregistering the
1457 * device; see the function unregister_netdev
1458 * @close_list: List entry used when we are closing the device
1459 * @ptype_all: Device-specific packet handlers for all protocols
1460 * @ptype_specific: Device-specific, protocol-specific packet handlers
1461 *
1462 * @adj_list: Directly linked devices, like slaves for bonding
1463 * @features: Currently active device features
1464 * @hw_features: User-changeable features
1465 *
1466 * @wanted_features: User-requested features
1467 * @vlan_features: Mask of features inheritable by VLAN devices
1468 *
1469 * @hw_enc_features: Mask of features inherited by encapsulating devices
1470 * This field indicates what encapsulation
1471 * offloads the hardware is capable of doing,
1472 * and drivers will need to set them appropriately.
1473 *
1474 * @mpls_features: Mask of features inheritable by MPLS
1475 *
1476 * @ifindex: interface index
1477 * @group: The group the device belongs to
1478 *
1479 * @stats: Statistics struct, which was left as a legacy, use
1480 * rtnl_link_stats64 instead
1481 *
1482 * @rx_dropped: Dropped packets by core network,
1483 * do not use this in drivers
1484 * @tx_dropped: Dropped packets by core network,
1485 * do not use this in drivers
1486 * @rx_nohandler: nohandler dropped packets by core network on
1487 * inactive devices, do not use this in drivers
1488 *
1489 * @wireless_handlers: List of functions to handle Wireless Extensions,
1490 * instead of ioctl,
1491 * see <net/iw_handler.h> for details.
1492 * @wireless_data: Instance data managed by the core of wireless extensions
1493 *
1494 * @netdev_ops: Includes several pointers to callbacks,
1495 * if one wants to override the ndo_*() functions
1496 * @ethtool_ops: Management operations
1497 * @ndisc_ops: Includes callbacks for different IPv6 neighbour
1498 * discovery handling. Necessary for e.g. 6LoWPAN.
1499 * @header_ops: Includes callbacks for creating,parsing,caching,etc
1500 * of Layer 2 headers.
1501 *
1502 * @flags: Interface flags (a la BSD)
1503 * @priv_flags: Like 'flags' but invisible to userspace,
1504 * see if.h for the definitions
1505 * @gflags: Global flags ( kept as legacy )
1506 * @padded: How much padding added by alloc_netdev()
1507 * @operstate: RFC2863 operstate
1508 * @link_mode: Mapping policy to operstate
1509 * @if_port: Selectable AUI, TP, ...
1510 * @dma: DMA channel
1511 * @mtu: Interface MTU value
1512 * @min_mtu: Interface Minimum MTU value
1513 * @max_mtu: Interface Maximum MTU value
1514 * @type: Interface hardware type
1515 * @hard_header_len: Maximum hardware header length.
1516 * @min_header_len: Minimum hardware header length
1517 *
1518 * @needed_headroom: Extra headroom the hardware may need, but not in all
1519 * cases can this be guaranteed
1520 * @needed_tailroom: Extra tailroom the hardware may need, but not in all
1521 * cases can this be guaranteed. Some cases also use
1522 * LL_MAX_HEADER instead to allocate the skb
1523 *
1524 * interface address info:
1525 *
1526 * @perm_addr: Permanent hw address
1527 * @addr_assign_type: Hw address assignment type
1528 * @addr_len: Hardware address length
1529 * @neigh_priv_len: Used in neigh_alloc()
1530 * @dev_id: Used to differentiate devices that share
1531 * the same link layer address
1532 * @dev_port: Used to differentiate devices that share
1533 * the same function
1534 * @addr_list_lock: XXX: need comments on this one
1535 * @uc_promisc: Counter that indicates promiscuous mode
1536 * has been enabled due to the need to listen to
1537 * additional unicast addresses in a device that
1538 * does not implement ndo_set_rx_mode()
1539 * @uc: unicast mac addresses
1540 * @mc: multicast mac addresses
1541 * @dev_addrs: list of device hw addresses
1542 * @queues_kset: Group of all Kobjects in the Tx and RX queues
1543 * @promiscuity: Number of times the NIC is told to work in
1544 * promiscuous mode; if it becomes 0 the NIC will
1545 * exit promiscuous mode
1546 * @allmulti: Counter, enables or disables allmulticast mode
1547 *
1548 * @vlan_info: VLAN info
1549 * @dsa_ptr: dsa specific data
1550 * @tipc_ptr: TIPC specific data
1551 * @atalk_ptr: AppleTalk link
1552 * @ip_ptr: IPv4 specific data
1553 * @dn_ptr: DECnet specific data
1554 * @ip6_ptr: IPv6 specific data
1555 * @ax25_ptr: AX.25 specific data
1556 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1557 *
1558 * @dev_addr: Hw address (before bcast,
1559 * because most packets are unicast)
1560 *
1561 * @_rx: Array of RX queues
1562 * @num_rx_queues: Number of RX queues
1563 * allocated at register_netdev() time
1564 * @real_num_rx_queues: Number of RX queues currently active in device
1565 *
1566 * @rx_handler: handler for received packets
1567 * @rx_handler_data: XXX: need comments on this one
1568 * @ingress_queue: XXX: need comments on this one
1569 * @broadcast: hw bcast address
1570 *
1571 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1572 * indexed by RX queue number. Assigned by driver.
1573 * This must only be set if the ndo_rx_flow_steer
1574 * operation is defined
1575 * @index_hlist: Device index hash chain
1576 *
1577 * @_tx: Array of TX queues
1578 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
1579 * @real_num_tx_queues: Number of TX queues currently active in device
1580 * @qdisc: Root qdisc from userspace point of view
1581 * @tx_queue_len: Max frames per queue allowed
1582 * @tx_global_lock: XXX: need comments on this one
1583 *
1584 * @xps_maps: XXX: need comments on this one
1585 *
1586 * @watchdog_timeo: Represents the timeout that is used by
1587 * the watchdog (see dev_watchdog())
1588 * @watchdog_timer: List of timers
1589 *
1590 * @pcpu_refcnt: Number of references to this device
1591 * @todo_list: Delayed register/unregister
1592 * @link_watch_list: XXX: need comments on this one
1593 *
1594 * @reg_state: Register/unregister state machine
1595 * @dismantle: Device is going to be freed
1596 * @rtnl_link_state: This enum represents the phases of creating
1597 * a new link
1598 *
1599 * @destructor: Called from unregister,
1600 * can be used to call free_netdev
1601 * @npinfo: XXX: need comments on this one
1602 * @nd_net: Network namespace this network device is inside
1603 *
1604 * @ml_priv: Mid-layer private
1605 * @lstats: Loopback statistics
1606 * @tstats: Tunnel statistics
1607 * @dstats: Dummy statistics
1608 * @vstats: Virtual ethernet statistics
1609 *
1610 * @garp_port: GARP
1611 * @mrp_port: MRP
1612 *
1613 * @dev: Class/net/name entry
1614 * @sysfs_groups: Space for optional device, statistics and wireless
1615 * sysfs groups
1616 *
1617 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1618 * @rtnl_link_ops: Rtnl_link_ops
1619 *
1620 * @gso_max_size: Maximum size of generic segmentation offload
1621 * @gso_max_segs: Maximum number of segments that can be passed to the
1622 * NIC for GSO
1623 *
1624 * @dcbnl_ops: Data Center Bridging netlink ops
1625 * @num_tc: Number of traffic classes in the net device
1626 * @tc_to_txq: XXX: need comments on this one
1627 * @prio_tc_map: XXX: need comments on this one
1628 *
1629 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
1630 *
1631 * @priomap: XXX: need comments on this one
1632 * @phydev: Physical device may attach itself
1633 * for hardware timestamping
1634 *
1635 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
1636 * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
1637 *
1638 * @proto_down: protocol port state information can be sent to the
1639 * switch driver and used to set the phys state of the
1640 * switch port.
1641 *
1642 * FIXME: cleanup struct net_device such that network protocol info
1643 * moves out.
1644 */
1645
1646 struct net_device {
1647 char name[IFNAMSIZ];
1648 struct hlist_node name_hlist;
1649 char *ifalias;
1650 /*
1651 * I/O specific fields
1652 * FIXME: Merge these and struct ifmap into one
1653 */
1654 unsigned long mem_end;
1655 unsigned long mem_start;
1656 unsigned long base_addr;
1657 int irq;
1658
1659 atomic_t carrier_changes;
1660
1661 /*
1662 * Some hardware also needs these fields (state,dev_list,
1663 * napi_list,unreg_list,close_list) but they are not
1664 * part of the usual set specified in Space.c.
1665 */
1666
1667 unsigned long state;
1668
1669 struct list_head dev_list;
1670 struct list_head napi_list;
1671 struct list_head unreg_list;
1672 struct list_head close_list;
1673 struct list_head ptype_all;
1674 struct list_head ptype_specific;
1675
1676 struct {
1677 struct list_head upper;
1678 struct list_head lower;
1679 } adj_list;
1680
1681 netdev_features_t features;
1682 netdev_features_t hw_features;
1683 netdev_features_t wanted_features;
1684 netdev_features_t vlan_features;
1685 netdev_features_t hw_enc_features;
1686 netdev_features_t mpls_features;
1687 netdev_features_t gso_partial_features;
1688
1689 int ifindex;
1690 int group;
1691
1692 struct net_device_stats stats;
1693
1694 atomic_long_t rx_dropped;
1695 atomic_long_t tx_dropped;
1696 atomic_long_t rx_nohandler;
1697
1698 #ifdef CONFIG_WIRELESS_EXT
1699 const struct iw_handler_def *wireless_handlers;
1700 struct iw_public_data *wireless_data;
1701 #endif
1702 const struct net_device_ops *netdev_ops;
1703 const struct ethtool_ops *ethtool_ops;
1704 #ifdef CONFIG_NET_SWITCHDEV
1705 const struct switchdev_ops *switchdev_ops;
1706 #endif
1707 #ifdef CONFIG_NET_L3_MASTER_DEV
1708 const struct l3mdev_ops *l3mdev_ops;
1709 #endif
1710 #if IS_ENABLED(CONFIG_IPV6)
1711 const struct ndisc_ops *ndisc_ops;
1712 #endif
1713
1714 #ifdef CONFIG_XFRM
1715 const struct xfrmdev_ops *xfrmdev_ops;
1716 #endif
1717
1718 const struct header_ops *header_ops;
1719
1720 unsigned int flags;
1721 unsigned int priv_flags;
1722
1723 unsigned short gflags;
1724 unsigned short padded;
1725
1726 unsigned char operstate;
1727 unsigned char link_mode;
1728
1729 unsigned char if_port;
1730 unsigned char dma;
1731
1732 unsigned int mtu;
1733 unsigned int min_mtu;
1734 unsigned int max_mtu;
1735 unsigned short type;
1736 unsigned short hard_header_len;
1737 unsigned char min_header_len;
1738
1739 unsigned short needed_headroom;
1740 unsigned short needed_tailroom;
1741
1742 /* Interface address info. */
1743 unsigned char perm_addr[MAX_ADDR_LEN];
1744 unsigned char addr_assign_type;
1745 unsigned char addr_len;
1746 unsigned short neigh_priv_len;
1747 unsigned short dev_id;
1748 unsigned short dev_port;
1749 spinlock_t addr_list_lock;
1750 unsigned char name_assign_type;
1751 bool uc_promisc;
1752 struct netdev_hw_addr_list uc;
1753 struct netdev_hw_addr_list mc;
1754 struct netdev_hw_addr_list dev_addrs;
1755
1756 #ifdef CONFIG_SYSFS
1757 struct kset *queues_kset;
1758 #endif
1759 unsigned int promiscuity;
1760 unsigned int allmulti;
1761
1762
1763 /* Protocol-specific pointers */
1764
1765 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1766 struct vlan_info __rcu *vlan_info;
1767 #endif
1768 #if IS_ENABLED(CONFIG_NET_DSA)
1769 struct dsa_switch_tree *dsa_ptr;
1770 #endif
1771 #if IS_ENABLED(CONFIG_TIPC)
1772 struct tipc_bearer __rcu *tipc_ptr;
1773 #endif
1774 void *atalk_ptr;
1775 struct in_device __rcu *ip_ptr;
1776 struct dn_dev __rcu *dn_ptr;
1777 struct inet6_dev __rcu *ip6_ptr;
1778 void *ax25_ptr;
1779 struct wireless_dev *ieee80211_ptr;
1780 struct wpan_dev *ieee802154_ptr;
1781 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
1782 struct mpls_dev __rcu *mpls_ptr;
1783 #endif
1784
1785 /*
1786 * Cache lines mostly used on receive path (including eth_type_trans())
1787 */
1788 /* Interface address info used in eth_type_trans() */
1789 unsigned char *dev_addr;
1790
1791 #ifdef CONFIG_SYSFS
1792 struct netdev_rx_queue *_rx;
1793
1794 unsigned int num_rx_queues;
1795 unsigned int real_num_rx_queues;
1796 #endif
1797
1798 struct bpf_prog __rcu *xdp_prog;
1799 unsigned long gro_flush_timeout;
1800 rx_handler_func_t __rcu *rx_handler;
1801 void __rcu *rx_handler_data;
1802
1803 #ifdef CONFIG_NET_CLS_ACT
1804 struct tcf_proto __rcu *ingress_cl_list;
1805 #endif
1806 struct netdev_queue __rcu *ingress_queue;
1807 #ifdef CONFIG_NETFILTER_INGRESS
1808 struct nf_hook_entry __rcu *nf_hooks_ingress;
1809 #endif
1810
1811 unsigned char broadcast[MAX_ADDR_LEN];
1812 #ifdef CONFIG_RFS_ACCEL
1813 struct cpu_rmap *rx_cpu_rmap;
1814 #endif
1815 struct hlist_node index_hlist;
1816
1817 /*
1818 * Cache lines mostly used on transmit path
1819 */
1820 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1821 unsigned int num_tx_queues;
1822 unsigned int real_num_tx_queues;
1823 struct Qdisc *qdisc;
1824 #ifdef CONFIG_NET_SCHED
1825 DECLARE_HASHTABLE (qdisc_hash, 4);
1826 #endif
1827 unsigned long tx_queue_len;
1828 spinlock_t tx_global_lock;
1829 int watchdog_timeo;
1830
1831 #ifdef CONFIG_XPS
1832 struct xps_dev_maps __rcu *xps_maps;
1833 #endif
1834 #ifdef CONFIG_NET_CLS_ACT
1835 struct tcf_proto __rcu *egress_cl_list;
1836 #endif
1837
1838 /* These may be needed for future network-power-down code. */
1839 struct timer_list watchdog_timer;
1840
1841 int __percpu *pcpu_refcnt;
1842 struct list_head todo_list;
1843
1844 struct list_head link_watch_list;
1845
1846 enum { NETREG_UNINITIALIZED=0,
1847 NETREG_REGISTERED, /* completed register_netdevice */
1848 NETREG_UNREGISTERING, /* called unregister_netdevice */
1849 NETREG_UNREGISTERED, /* completed unregister todo */
1850 NETREG_RELEASED, /* called free_netdev */
1851 NETREG_DUMMY, /* dummy device for NAPI poll */
1852 } reg_state:8;
1853
1854 bool dismantle;
1855
1856 enum {
1857 RTNL_LINK_INITIALIZED,
1858 RTNL_LINK_INITIALIZING,
1859 } rtnl_link_state:16;
1860
1861 void (*destructor)(struct net_device *dev);
1862
1863 #ifdef CONFIG_NETPOLL
1864 struct netpoll_info __rcu *npinfo;
1865 #endif
1866
1867 possible_net_t nd_net;
1868
1869 /* mid-layer private */
1870 union {
1871 void *ml_priv;
1872 struct pcpu_lstats __percpu *lstats;
1873 struct pcpu_sw_netstats __percpu *tstats;
1874 struct pcpu_dstats __percpu *dstats;
1875 struct pcpu_vstats __percpu *vstats;
1876 };
1877
1878 #if IS_ENABLED(CONFIG_GARP)
1879 struct garp_port __rcu *garp_port;
1880 #endif
1881 #if IS_ENABLED(CONFIG_MRP)
1882 struct mrp_port __rcu *mrp_port;
1883 #endif
1884
1885 struct device dev;
1886 const struct attribute_group *sysfs_groups[4];
1887 const struct attribute_group *sysfs_rx_queue_group;
1888
1889 const struct rtnl_link_ops *rtnl_link_ops;
1890
1891 /* for setting kernel sock attribute on TCP connection setup */
1892 #define GSO_MAX_SIZE 65536
1893 unsigned int gso_max_size;
1894 #define GSO_MAX_SEGS 65535
1895 u16 gso_max_segs;
1896
1897 #ifdef CONFIG_DCB
1898 const struct dcbnl_rtnl_ops *dcbnl_ops;
1899 #endif
1900 u8 num_tc;
1901 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1902 u8 prio_tc_map[TC_BITMASK + 1];
1903
1904 #if IS_ENABLED(CONFIG_FCOE)
1905 unsigned int fcoe_ddp_xid;
1906 #endif
1907 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1908 struct netprio_map __rcu *priomap;
1909 #endif
1910 struct phy_device *phydev;
1911 struct lock_class_key *qdisc_tx_busylock;
1912 struct lock_class_key *qdisc_running_key;
1913 bool proto_down;
1914 };
1915 #define to_net_dev(d) container_of(d, struct net_device, dev)
1916
1917 static inline bool netif_elide_gro(const struct net_device *dev)
1918 {
1919 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
1920 return true;
1921 return false;
1922 }
1923
1924 #define NETDEV_ALIGN 32
1925
1926 static inline
1927 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1928 {
1929 return dev->prio_tc_map[prio & TC_BITMASK];
1930 }
1931
1932 static inline
1933 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1934 {
1935 if (tc >= dev->num_tc)
1936 return -EINVAL;
1937
1938 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1939 return 0;
1940 }
1941
1942 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
1943 void netdev_reset_tc(struct net_device *dev);
1944 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
1945 int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
1946
1947 static inline
1948 int netdev_get_num_tc(struct net_device *dev)
1949 {
1950 return dev->num_tc;
1951 }
1952
1953 static inline
1954 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1955 unsigned int index)
1956 {
1957 return &dev->_tx[index];
1958 }
1959
1960 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
1961 const struct sk_buff *skb)
1962 {
1963 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
1964 }
1965
1966 static inline void netdev_for_each_tx_queue(struct net_device *dev,
1967 void (*f)(struct net_device *,
1968 struct netdev_queue *,
1969 void *),
1970 void *arg)
1971 {
1972 unsigned int i;
1973
1974 for (i = 0; i < dev->num_tx_queues; i++)
1975 f(dev, &dev->_tx[i], arg);
1976 }
1977
1978 #define netdev_lockdep_set_classes(dev) \
1979 { \
1980 static struct lock_class_key qdisc_tx_busylock_key; \
1981 static struct lock_class_key qdisc_running_key; \
1982 static struct lock_class_key qdisc_xmit_lock_key; \
1983 static struct lock_class_key dev_addr_list_lock_key; \
1984 unsigned int i; \
1985 \
1986 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
1987 (dev)->qdisc_running_key = &qdisc_running_key; \
1988 lockdep_set_class(&(dev)->addr_list_lock, \
1989 &dev_addr_list_lock_key); \
1990 for (i = 0; i < (dev)->num_tx_queues; i++) \
1991 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
1992 &qdisc_xmit_lock_key); \
1993 }
1994
1995 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1996 struct sk_buff *skb,
1997 void *accel_priv);
1998
1999 /* returns the headroom that the master device needs to take in account
2000 * when forwarding to this dev
2001 */
2002 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2003 {
2004 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2005 }
2006
2007 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2008 {
2009 if (dev->netdev_ops->ndo_set_rx_headroom)
2010 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2011 }
2012
2013 /* set the device rx headroom to the dev's default */
2014 static inline void netdev_reset_rx_headroom(struct net_device *dev)
2015 {
2016 netdev_set_rx_headroom(dev, -1);
2017 }
2018
2019 /*
2020 * Net namespace inlines
2021 */
2022 static inline
2023 struct net *dev_net(const struct net_device *dev)
2024 {
2025 return read_pnet(&dev->nd_net);
2026 }
2027
2028 static inline
2029 void dev_net_set(struct net_device *dev, struct net *net)
2030 {
2031 write_pnet(&dev->nd_net, net);
2032 }
2033
2034 /**
2035 * netdev_priv - access network device private data
2036 * @dev: network device
2037 *
2038 * Get network device private data
2039 */
2040 static inline void *netdev_priv(const struct net_device *dev)
2041 {
2042 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
2043 }
2044
2045 /* Set the sysfs physical device reference for the network logical device
2046 * if set prior to registration will cause a symlink during initialization.
2047 */
2048 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2049
2050 /* Set the sysfs device type for the network logical device to allow
2051 * fine-grained identification of different network device types. For
2052 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
2053 */
2054 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2055
2056 /* Default NAPI poll() weight
2057 * Device drivers are strongly advised to not use bigger value
2058 */
2059 #define NAPI_POLL_WEIGHT 64
2060
2061 /**
2062 * netif_napi_add - initialize a NAPI context
2063 * @dev: network device
2064 * @napi: NAPI context
2065 * @poll: polling function
2066 * @weight: default weight
2067 *
2068 * netif_napi_add() must be used to initialize a NAPI context prior to calling
2069 * *any* of the other NAPI-related functions.
2070 */
2071 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2072 int (*poll)(struct napi_struct *, int), int weight);
2073
2074 /**
2075 * netif_tx_napi_add - initialize a NAPI context
2076 * @dev: network device
2077 * @napi: NAPI context
2078 * @poll: polling function
2079 * @weight: default weight
2080 *
2081 * This variant of netif_napi_add() should be used from drivers using NAPI
2082 * to exclusively poll a TX queue.
2083 * This will avoid we add it into napi_hash[], thus polluting this hash table.
2084 */
2085 static inline void netif_tx_napi_add(struct net_device *dev,
2086 struct napi_struct *napi,
2087 int (*poll)(struct napi_struct *, int),
2088 int weight)
2089 {
2090 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2091 netif_napi_add(dev, napi, poll, weight);
2092 }
2093
2094 /**
2095 * netif_napi_del - remove a NAPI context
2096 * @napi: NAPI context
2097 *
2098 * netif_napi_del() removes a NAPI context from the network device NAPI list
2099 */
2100 void netif_napi_del(struct napi_struct *napi);
2101
2102 struct napi_gro_cb {
2103 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
2104 void *frag0;
2105
2106 /* Length of frag0. */
2107 unsigned int frag0_len;
2108
2109 /* This indicates where we are processing relative to skb->data. */
2110 int data_offset;
2111
2112 /* This is non-zero if the packet cannot be merged with the new skb. */
2113 u16 flush;
2114
2115 /* Save the IP ID here and check when we get to the transport layer */
2116 u16 flush_id;
2117
2118 /* Number of segments aggregated. */
2119 u16 count;
2120
2121 /* Start offset for remote checksum offload */
2122 u16 gro_remcsum_start;
2123
2124 /* jiffies when first packet was created/queued */
2125 unsigned long age;
2126
2127 /* Used in ipv6_gro_receive() and foo-over-udp */
2128 u16 proto;
2129
2130 /* This is non-zero if the packet may be of the same flow. */
2131 u8 same_flow:1;
2132
2133 /* Used in tunnel GRO receive */
2134 u8 encap_mark:1;
2135
2136 /* GRO checksum is valid */
2137 u8 csum_valid:1;
2138
2139 /* Number of checksums via CHECKSUM_UNNECESSARY */
2140 u8 csum_cnt:3;
2141
2142 /* Free the skb? */
2143 u8 free:2;
2144 #define NAPI_GRO_FREE 1
2145 #define NAPI_GRO_FREE_STOLEN_HEAD 2
2146
2147 /* Used in foo-over-udp, set in udp[46]_gro_receive */
2148 u8 is_ipv6:1;
2149
2150 /* Used in GRE, set in fou/gue_gro_receive */
2151 u8 is_fou:1;
2152
2153 /* Used to determine if flush_id can be ignored */
2154 u8 is_atomic:1;
2155
2156 /* Number of gro_receive callbacks this packet already went through */
2157 u8 recursion_counter:4;
2158
2159 /* 1 bit hole */
2160
2161 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
2162 __wsum csum;
2163
2164 /* used in skb_gro_receive() slow path */
2165 struct sk_buff *last;
2166 };
2167
2168 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2169
2170 #define GRO_RECURSION_LIMIT 15
2171 static inline int gro_recursion_inc_test(struct sk_buff *skb)
2172 {
2173 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2174 }
2175
2176 typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
2177 static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
2178 struct sk_buff **head,
2179 struct sk_buff *skb)
2180 {
2181 if (unlikely(gro_recursion_inc_test(skb))) {
2182 NAPI_GRO_CB(skb)->flush |= 1;
2183 return NULL;
2184 }
2185
2186 return cb(head, skb);
2187 }
2188
2189 typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
2190 struct sk_buff *);
2191 static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
2192 struct sock *sk,
2193 struct sk_buff **head,
2194 struct sk_buff *skb)
2195 {
2196 if (unlikely(gro_recursion_inc_test(skb))) {
2197 NAPI_GRO_CB(skb)->flush |= 1;
2198 return NULL;
2199 }
2200
2201 return cb(sk, head, skb);
2202 }
2203
2204 struct packet_type {
2205 __be16 type; /* This is really htons(ether_type). */
2206 struct net_device *dev; /* NULL is wildcarded here */
2207 int (*func) (struct sk_buff *,
2208 struct net_device *,
2209 struct packet_type *,
2210 struct net_device *);
2211 bool (*id_match)(struct packet_type *ptype,
2212 struct sock *sk);
2213 void *af_packet_priv;
2214 struct list_head list;
2215 };
2216
2217 struct offload_callbacks {
2218 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2219 netdev_features_t features);
2220 struct sk_buff **(*gro_receive)(struct sk_buff **head,
2221 struct sk_buff *skb);
2222 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2223 };
2224
2225 struct packet_offload {
2226 __be16 type; /* This is really htons(ether_type). */
2227 u16 priority;
2228 struct offload_callbacks callbacks;
2229 struct list_head list;
2230 };
2231
2232 /* often modified stats are per-CPU, other are shared (netdev->stats) */
2233 struct pcpu_sw_netstats {
2234 u64 rx_packets;
2235 u64 rx_bytes;
2236 u64 tx_packets;
2237 u64 tx_bytes;
2238 struct u64_stats_sync syncp;
2239 };
2240
2241 #define __netdev_alloc_pcpu_stats(type, gfp) \
2242 ({ \
2243 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2244 if (pcpu_stats) { \
2245 int __cpu; \
2246 for_each_possible_cpu(__cpu) { \
2247 typeof(type) *stat; \
2248 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2249 u64_stats_init(&stat->syncp); \
2250 } \
2251 } \
2252 pcpu_stats; \
2253 })
2254
2255 #define netdev_alloc_pcpu_stats(type) \
2256 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2257
2258 enum netdev_lag_tx_type {
2259 NETDEV_LAG_TX_TYPE_UNKNOWN,
2260 NETDEV_LAG_TX_TYPE_RANDOM,
2261 NETDEV_LAG_TX_TYPE_BROADCAST,
2262 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2263 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2264 NETDEV_LAG_TX_TYPE_HASH,
2265 };
2266
2267 struct netdev_lag_upper_info {
2268 enum netdev_lag_tx_type tx_type;
2269 };
2270
2271 struct netdev_lag_lower_state_info {
2272 u8 link_up : 1,
2273 tx_enabled : 1;
2274 };
2275
2276 #include <linux/notifier.h>
2277
2278 /* netdevice notifier chain. Please remember to update the rtnetlink
2279 * notification exclusion list in rtnetlink_event() when adding new
2280 * types.
2281 */
2282 #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
2283 #define NETDEV_DOWN 0x0002
2284 #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
2285 detected a hardware crash and restarted
2286 - we can use this eg to kick tcp sessions
2287 once done */
2288 #define NETDEV_CHANGE 0x0004 /* Notify device state change */
2289 #define NETDEV_REGISTER 0x0005
2290 #define NETDEV_UNREGISTER 0x0006
2291 #define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
2292 #define NETDEV_CHANGEADDR 0x0008
2293 #define NETDEV_GOING_DOWN 0x0009
2294 #define NETDEV_CHANGENAME 0x000A
2295 #define NETDEV_FEAT_CHANGE 0x000B
2296 #define NETDEV_BONDING_FAILOVER 0x000C
2297 #define NETDEV_PRE_UP 0x000D
2298 #define NETDEV_PRE_TYPE_CHANGE 0x000E
2299 #define NETDEV_POST_TYPE_CHANGE 0x000F
2300 #define NETDEV_POST_INIT 0x0010
2301 #define NETDEV_UNREGISTER_FINAL 0x0011
2302 #define NETDEV_RELEASE 0x0012
2303 #define NETDEV_NOTIFY_PEERS 0x0013
2304 #define NETDEV_JOIN 0x0014
2305 #define NETDEV_CHANGEUPPER 0x0015
2306 #define NETDEV_RESEND_IGMP 0x0016
2307 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
2308 #define NETDEV_CHANGEINFODATA 0x0018
2309 #define NETDEV_BONDING_INFO 0x0019
2310 #define NETDEV_PRECHANGEUPPER 0x001A
2311 #define NETDEV_CHANGELOWERSTATE 0x001B
2312 #define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
2313 #define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
2314
2315 int register_netdevice_notifier(struct notifier_block *nb);
2316 int unregister_netdevice_notifier(struct notifier_block *nb);
2317
2318 struct netdev_notifier_info {
2319 struct net_device *dev;
2320 };
2321
2322 struct netdev_notifier_change_info {
2323 struct netdev_notifier_info info; /* must be first */
2324 unsigned int flags_changed;
2325 };
2326
2327 struct netdev_notifier_changeupper_info {
2328 struct netdev_notifier_info info; /* must be first */
2329 struct net_device *upper_dev; /* new upper dev */
2330 bool master; /* is upper dev master */
2331 bool linking; /* is the notification for link or unlink */
2332 void *upper_info; /* upper dev info */
2333 };
2334
2335 struct netdev_notifier_changelowerstate_info {
2336 struct netdev_notifier_info info; /* must be first */
2337 void *lower_state_info; /* is lower dev state */
2338 };
2339
2340 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2341 struct net_device *dev)
2342 {
2343 info->dev = dev;
2344 }
2345
2346 static inline struct net_device *
2347 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2348 {
2349 return info->dev;
2350 }
2351
2352 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2353
2354
2355 extern rwlock_t dev_base_lock; /* Device list lock */
2356
2357 #define for_each_netdev(net, d) \
2358 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2359 #define for_each_netdev_reverse(net, d) \
2360 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2361 #define for_each_netdev_rcu(net, d) \
2362 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2363 #define for_each_netdev_safe(net, d, n) \
2364 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2365 #define for_each_netdev_continue(net, d) \
2366 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2367 #define for_each_netdev_continue_rcu(net, d) \
2368 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2369 #define for_each_netdev_in_bond_rcu(bond, slave) \
2370 for_each_netdev_rcu(&init_net, slave) \
2371 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2372 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2373
2374 static inline struct net_device *next_net_device(struct net_device *dev)
2375 {
2376 struct list_head *lh;
2377 struct net *net;
2378
2379 net = dev_net(dev);
2380 lh = dev->dev_list.next;
2381 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2382 }
2383
2384 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2385 {
2386 struct list_head *lh;
2387 struct net *net;
2388
2389 net = dev_net(dev);
2390 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2391 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2392 }
2393
2394 static inline struct net_device *first_net_device(struct net *net)
2395 {
2396 return list_empty(&net->dev_base_head) ? NULL :
2397 net_device_entry(net->dev_base_head.next);
2398 }
2399
2400 static inline struct net_device *first_net_device_rcu(struct net *net)
2401 {
2402 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2403
2404 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2405 }
2406
2407 int netdev_boot_setup_check(struct net_device *dev);
2408 unsigned long netdev_boot_base(const char *prefix, int unit);
2409 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2410 const char *hwaddr);
2411 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2412 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2413 void dev_add_pack(struct packet_type *pt);
2414 void dev_remove_pack(struct packet_type *pt);
2415 void __dev_remove_pack(struct packet_type *pt);
2416 void dev_add_offload(struct packet_offload *po);
2417 void dev_remove_offload(struct packet_offload *po);
2418
2419 int dev_get_iflink(const struct net_device *dev);
2420 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2421 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2422 unsigned short mask);
2423 struct net_device *dev_get_by_name(struct net *net, const char *name);
2424 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2425 struct net_device *__dev_get_by_name(struct net *net, const char *name);
2426 int dev_alloc_name(struct net_device *dev, const char *name);
2427 int dev_open(struct net_device *dev);
2428 int dev_close(struct net_device *dev);
2429 int dev_close_many(struct list_head *head, bool unlink);
2430 void dev_disable_lro(struct net_device *dev);
2431 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2432 int dev_queue_xmit(struct sk_buff *skb);
2433 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
2434 int register_netdevice(struct net_device *dev);
2435 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2436 void unregister_netdevice_many(struct list_head *head);
2437 static inline void unregister_netdevice(struct net_device *dev)
2438 {
2439 unregister_netdevice_queue(dev, NULL);
2440 }
2441
2442 int netdev_refcnt_read(const struct net_device *dev);
2443 void free_netdev(struct net_device *dev);
2444 void netdev_freemem(struct net_device *dev);
2445 void synchronize_net(void);
2446 int init_dummy_netdev(struct net_device *dev);
2447
2448 DECLARE_PER_CPU(int, xmit_recursion);
2449 #define XMIT_RECURSION_LIMIT 10
2450
2451 static inline int dev_recursion_level(void)
2452 {
2453 return this_cpu_read(xmit_recursion);
2454 }
2455
2456 struct net_device *dev_get_by_index(struct net *net, int ifindex);
2457 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2458 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2459 int netdev_get_name(struct net *net, char *name, int ifindex);
2460 int dev_restart(struct net_device *dev);
2461 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
2462
2463 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2464 {
2465 return NAPI_GRO_CB(skb)->data_offset;
2466 }
2467
2468 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2469 {
2470 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2471 }
2472
2473 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2474 {
2475 NAPI_GRO_CB(skb)->data_offset += len;
2476 }
2477
2478 static inline void *skb_gro_header_fast(struct sk_buff *skb,
2479 unsigned int offset)
2480 {
2481 return NAPI_GRO_CB(skb)->frag0 + offset;
2482 }
2483
2484 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2485 {
2486 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2487 }
2488
2489 static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
2490 {
2491 NAPI_GRO_CB(skb)->frag0 = NULL;
2492 NAPI_GRO_CB(skb)->frag0_len = 0;
2493 }
2494
2495 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2496 unsigned int offset)
2497 {
2498 if (!pskb_may_pull(skb, hlen))
2499 return NULL;
2500
2501 skb_gro_frag0_invalidate(skb);
2502 return skb->data + offset;
2503 }
2504
2505 static inline void *skb_gro_network_header(struct sk_buff *skb)
2506 {
2507 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2508 skb_network_offset(skb);
2509 }
2510
2511 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2512 const void *start, unsigned int len)
2513 {
2514 if (NAPI_GRO_CB(skb)->csum_valid)
2515 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2516 csum_partial(start, len, 0));
2517 }
2518
2519 /* GRO checksum functions. These are logical equivalents of the normal
2520 * checksum functions (in skbuff.h) except that they operate on the GRO
2521 * offsets and fields in sk_buff.
2522 */
2523
2524 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2525
2526 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2527 {
2528 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
2529 }
2530
2531 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2532 bool zero_okay,
2533 __sum16 check)
2534 {
2535 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
2536 skb_checksum_start_offset(skb) <
2537 skb_gro_offset(skb)) &&
2538 !skb_at_gro_remcsum_start(skb) &&
2539 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2540 (!zero_okay || check));
2541 }
2542
2543 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2544 __wsum psum)
2545 {
2546 if (NAPI_GRO_CB(skb)->csum_valid &&
2547 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2548 return 0;
2549
2550 NAPI_GRO_CB(skb)->csum = psum;
2551
2552 return __skb_gro_checksum_complete(skb);
2553 }
2554
2555 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2556 {
2557 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2558 /* Consume a checksum from CHECKSUM_UNNECESSARY */
2559 NAPI_GRO_CB(skb)->csum_cnt--;
2560 } else {
2561 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
2562 * verified a new top level checksum or an encapsulated one
2563 * during GRO. This saves work if we fallback to normal path.
2564 */
2565 __skb_incr_checksum_unnecessary(skb);
2566 }
2567 }
2568
2569 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2570 compute_pseudo) \
2571 ({ \
2572 __sum16 __ret = 0; \
2573 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2574 __ret = __skb_gro_checksum_validate_complete(skb, \
2575 compute_pseudo(skb, proto)); \
2576 if (__ret) \
2577 __skb_mark_checksum_bad(skb); \
2578 else \
2579 skb_gro_incr_csum_unnecessary(skb); \
2580 __ret; \
2581 })
2582
2583 #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2584 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2585
2586 #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2587 compute_pseudo) \
2588 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2589
2590 #define skb_gro_checksum_simple_validate(skb) \
2591 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2592
2593 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2594 {
2595 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2596 !NAPI_GRO_CB(skb)->csum_valid);
2597 }
2598
2599 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2600 __sum16 check, __wsum pseudo)
2601 {
2602 NAPI_GRO_CB(skb)->csum = ~pseudo;
2603 NAPI_GRO_CB(skb)->csum_valid = 1;
2604 }
2605
2606 #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
2607 do { \
2608 if (__skb_gro_checksum_convert_check(skb)) \
2609 __skb_gro_checksum_convert(skb, check, \
2610 compute_pseudo(skb, proto)); \
2611 } while (0)
2612
2613 struct gro_remcsum {
2614 int offset;
2615 __wsum delta;
2616 };
2617
2618 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2619 {
2620 grc->offset = 0;
2621 grc->delta = 0;
2622 }
2623
2624 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2625 unsigned int off, size_t hdrlen,
2626 int start, int offset,
2627 struct gro_remcsum *grc,
2628 bool nopartial)
2629 {
2630 __wsum delta;
2631 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
2632
2633 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2634
2635 if (!nopartial) {
2636 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
2637 return ptr;
2638 }
2639
2640 ptr = skb_gro_header_fast(skb, off);
2641 if (skb_gro_header_hard(skb, off + plen)) {
2642 ptr = skb_gro_header_slow(skb, off + plen, off);
2643 if (!ptr)
2644 return NULL;
2645 }
2646
2647 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
2648 start, offset);
2649
2650 /* Adjust skb->csum since we changed the packet */
2651 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
2652
2653 grc->offset = off + hdrlen + offset;
2654 grc->delta = delta;
2655
2656 return ptr;
2657 }
2658
2659 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2660 struct gro_remcsum *grc)
2661 {
2662 void *ptr;
2663 size_t plen = grc->offset + sizeof(u16);
2664
2665 if (!grc->delta)
2666 return;
2667
2668 ptr = skb_gro_header_fast(skb, grc->offset);
2669 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
2670 ptr = skb_gro_header_slow(skb, plen, grc->offset);
2671 if (!ptr)
2672 return;
2673 }
2674
2675 remcsum_unadjust((__sum16 *)ptr, grc->delta);
2676 }
2677
2678 #ifdef CONFIG_XFRM_OFFLOAD
2679 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
2680 {
2681 if (PTR_ERR(pp) != -EINPROGRESS)
2682 NAPI_GRO_CB(skb)->flush |= flush;
2683 }
2684 #else
2685 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
2686 {
2687 NAPI_GRO_CB(skb)->flush |= flush;
2688 }
2689 #endif
2690
2691 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2692 unsigned short type,
2693 const void *daddr, const void *saddr,
2694 unsigned int len)
2695 {
2696 if (!dev->header_ops || !dev->header_ops->create)
2697 return 0;
2698
2699 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
2700 }
2701
2702 static inline int dev_parse_header(const struct sk_buff *skb,
2703 unsigned char *haddr)
2704 {
2705 const struct net_device *dev = skb->dev;
2706
2707 if (!dev->header_ops || !dev->header_ops->parse)
2708 return 0;
2709 return dev->header_ops->parse(skb, haddr);
2710 }
2711
2712 /* ll_header must have at least hard_header_len allocated */
2713 static inline bool dev_validate_header(const struct net_device *dev,
2714 char *ll_header, int len)
2715 {
2716 if (likely(len >= dev->hard_header_len))
2717 return true;
2718 if (len < dev->min_header_len)
2719 return false;
2720
2721 if (capable(CAP_SYS_RAWIO)) {
2722 memset(ll_header + len, 0, dev->hard_header_len - len);
2723 return true;
2724 }
2725
2726 if (dev->header_ops && dev->header_ops->validate)
2727 return dev->header_ops->validate(ll_header, len);
2728
2729 return false;
2730 }
2731
2732 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
2733 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
2734 static inline int unregister_gifconf(unsigned int family)
2735 {
2736 return register_gifconf(family, NULL);
2737 }
2738
2739 #ifdef CONFIG_NET_FLOW_LIMIT
2740 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
2741 struct sd_flow_limit {
2742 u64 count;
2743 unsigned int num_buckets;
2744 unsigned int history_head;
2745 u16 history[FLOW_LIMIT_HISTORY];
2746 u8 buckets[];
2747 };
2748
2749 extern int netdev_flow_limit_table_len;
2750 #endif /* CONFIG_NET_FLOW_LIMIT */
2751
2752 /*
2753 * Incoming packets are placed on per-CPU queues
2754 */
2755 struct softnet_data {
2756 struct list_head poll_list;
2757 struct sk_buff_head process_queue;
2758
2759 /* stats */
2760 unsigned int processed;
2761 unsigned int time_squeeze;
2762 unsigned int received_rps;
2763 #ifdef CONFIG_RPS
2764 struct softnet_data *rps_ipi_list;
2765 #endif
2766 #ifdef CONFIG_NET_FLOW_LIMIT
2767 struct sd_flow_limit __rcu *flow_limit;
2768 #endif
2769 struct Qdisc *output_queue;
2770 struct Qdisc **output_queue_tailp;
2771 struct sk_buff *completion_queue;
2772
2773 #ifdef CONFIG_RPS
2774 /* input_queue_head should be written by cpu owning this struct,
2775 * and only read by other cpus. Worth using a cache line.
2776 */
2777 unsigned int input_queue_head ____cacheline_aligned_in_smp;
2778
2779 /* Elements below can be accessed between CPUs for RPS/RFS */
2780 struct call_single_data csd ____cacheline_aligned_in_smp;
2781 struct softnet_data *rps_ipi_next;
2782 unsigned int cpu;
2783 unsigned int input_queue_tail;
2784 #endif
2785 unsigned int dropped;
2786 struct sk_buff_head input_pkt_queue;
2787 struct napi_struct backlog;
2788
2789 };
2790
2791 static inline void input_queue_head_incr(struct softnet_data *sd)
2792 {
2793 #ifdef CONFIG_RPS
2794 sd->input_queue_head++;
2795 #endif
2796 }
2797
2798 static inline void input_queue_tail_incr_save(struct softnet_data *sd,
2799 unsigned int *qtail)
2800 {
2801 #ifdef CONFIG_RPS
2802 *qtail = ++sd->input_queue_tail;
2803 #endif
2804 }
2805
2806 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
2807
2808 void __netif_schedule(struct Qdisc *q);
2809 void netif_schedule_queue(struct netdev_queue *txq);
2810
2811 static inline void netif_tx_schedule_all(struct net_device *dev)
2812 {
2813 unsigned int i;
2814
2815 for (i = 0; i < dev->num_tx_queues; i++)
2816 netif_schedule_queue(netdev_get_tx_queue(dev, i));
2817 }
2818
2819 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
2820 {
2821 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2822 }
2823
2824 /**
2825 * netif_start_queue - allow transmit
2826 * @dev: network device
2827 *
2828 * Allow upper layers to call the device hard_start_xmit routine.
2829 */
2830 static inline void netif_start_queue(struct net_device *dev)
2831 {
2832 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
2833 }
2834
2835 static inline void netif_tx_start_all_queues(struct net_device *dev)
2836 {
2837 unsigned int i;
2838
2839 for (i = 0; i < dev->num_tx_queues; i++) {
2840 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2841 netif_tx_start_queue(txq);
2842 }
2843 }
2844
2845 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
2846
2847 /**
2848 * netif_wake_queue - restart transmit
2849 * @dev: network device
2850 *
2851 * Allow upper layers to call the device hard_start_xmit routine.
2852 * Used for flow control when transmit resources are available.
2853 */
2854 static inline void netif_wake_queue(struct net_device *dev)
2855 {
2856 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
2857 }
2858
2859 static inline void netif_tx_wake_all_queues(struct net_device *dev)
2860 {
2861 unsigned int i;
2862
2863 for (i = 0; i < dev->num_tx_queues; i++) {
2864 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2865 netif_tx_wake_queue(txq);
2866 }
2867 }
2868
2869 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
2870 {
2871 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2872 }
2873
2874 /**
2875 * netif_stop_queue - stop transmitted packets
2876 * @dev: network device
2877 *
2878 * Stop upper layers calling the device hard_start_xmit routine.
2879 * Used for flow control when transmit resources are unavailable.
2880 */
2881 static inline void netif_stop_queue(struct net_device *dev)
2882 {
2883 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
2884 }
2885
2886 void netif_tx_stop_all_queues(struct net_device *dev);
2887
2888 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2889 {
2890 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2891 }
2892
2893 /**
2894 * netif_queue_stopped - test if transmit queue is flowblocked
2895 * @dev: network device
2896 *
2897 * Test if transmit queue on device is currently unable to send.
2898 */
2899 static inline bool netif_queue_stopped(const struct net_device *dev)
2900 {
2901 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
2902 }
2903
2904 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
2905 {
2906 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2907 }
2908
2909 static inline bool
2910 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
2911 {
2912 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2913 }
2914
2915 static inline bool
2916 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
2917 {
2918 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
2919 }
2920
2921 /**
2922 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
2923 * @dev_queue: pointer to transmit queue
2924 *
2925 * BQL enabled drivers might use this helper in their ndo_start_xmit(),
2926 * to give appropriate hint to the CPU.
2927 */
2928 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
2929 {
2930 #ifdef CONFIG_BQL
2931 prefetchw(&dev_queue->dql.num_queued);
2932 #endif
2933 }
2934
2935 /**
2936 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
2937 * @dev_queue: pointer to transmit queue
2938 *
2939 * BQL enabled drivers might use this helper in their TX completion path,
2940 * to give appropriate hint to the CPU.
2941 */
2942 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
2943 {
2944 #ifdef CONFIG_BQL
2945 prefetchw(&dev_queue->dql.limit);
2946 #endif
2947 }
2948
2949 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2950 unsigned int bytes)
2951 {
2952 #ifdef CONFIG_BQL
2953 dql_queued(&dev_queue->dql, bytes);
2954
2955 if (likely(dql_avail(&dev_queue->dql) >= 0))
2956 return;
2957
2958 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2959
2960 /*
2961 * The XOFF flag must be set before checking the dql_avail below,
2962 * because in netdev_tx_completed_queue we update the dql_completed
2963 * before checking the XOFF flag.
2964 */
2965 smp_mb();
2966
2967 /* check again in case another CPU has just made room avail */
2968 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2969 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2970 #endif
2971 }
2972
2973 /**
2974 * netdev_sent_queue - report the number of bytes queued to hardware
2975 * @dev: network device
2976 * @bytes: number of bytes queued to the hardware device queue
2977 *
2978 * Report the number of bytes queued for sending/completion to the network
2979 * device hardware queue. @bytes should be a good approximation and should
2980 * exactly match netdev_completed_queue() @bytes
2981 */
2982 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
2983 {
2984 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
2985 }
2986
2987 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
2988 unsigned int pkts, unsigned int bytes)
2989 {
2990 #ifdef CONFIG_BQL
2991 if (unlikely(!bytes))
2992 return;
2993
2994 dql_completed(&dev_queue->dql, bytes);
2995
2996 /*
2997 * Without the memory barrier there is a small possiblity that
2998 * netdev_tx_sent_queue will miss the update and cause the queue to
2999 * be stopped forever
3000 */
3001 smp_mb();
3002
3003 if (dql_avail(&dev_queue->dql) < 0)
3004 return;
3005
3006 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3007 netif_schedule_queue(dev_queue);
3008 #endif
3009 }
3010
3011 /**
3012 * netdev_completed_queue - report bytes and packets completed by device
3013 * @dev: network device
3014 * @pkts: actual number of packets sent over the medium
3015 * @bytes: actual number of bytes sent over the medium
3016 *
3017 * Report the number of bytes and packets transmitted by the network device
3018 * hardware queue over the physical medium, @bytes must exactly match the
3019 * @bytes amount passed to netdev_sent_queue()
3020 */
3021 static inline void netdev_completed_queue(struct net_device *dev,
3022 unsigned int pkts, unsigned int bytes)
3023 {
3024 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3025 }
3026
3027 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3028 {
3029 #ifdef CONFIG_BQL
3030 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3031 dql_reset(&q->dql);
3032 #endif
3033 }
3034
3035 /**
3036 * netdev_reset_queue - reset the packets and bytes count of a network device
3037 * @dev_queue: network device
3038 *
3039 * Reset the bytes and packet count of a network device and clear the
3040 * software flow control OFF bit for this network device
3041 */
3042 static inline void netdev_reset_queue(struct net_device *dev_queue)
3043 {
3044 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
3045 }
3046
3047 /**
3048 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
3049 * @dev: network device
3050 * @queue_index: given tx queue index
3051 *
3052 * Returns 0 if given tx queue index >= number of device tx queues,
3053 * otherwise returns the originally passed tx queue index.
3054 */
3055 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3056 {
3057 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3058 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3059 dev->name, queue_index,
3060 dev->real_num_tx_queues);
3061 return 0;
3062 }
3063
3064 return queue_index;
3065 }
3066
3067 /**
3068 * netif_running - test if up
3069 * @dev: network device
3070 *
3071 * Test if the device has been brought up.
3072 */
3073 static inline bool netif_running(const struct net_device *dev)
3074 {
3075 return test_bit(__LINK_STATE_START, &dev->state);
3076 }
3077
3078 /*
3079 * Routines to manage the subqueues on a device. We only need start,
3080 * stop, and a check if it's stopped. All other device management is
3081 * done at the overall netdevice level.
3082 * Also test the device if we're multiqueue.
3083 */
3084
3085 /**
3086 * netif_start_subqueue - allow sending packets on subqueue
3087 * @dev: network device
3088 * @queue_index: sub queue index
3089 *
3090 * Start individual transmit queue of a device with multiple transmit queues.
3091 */
3092 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3093 {
3094 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3095
3096 netif_tx_start_queue(txq);
3097 }
3098
3099 /**
3100 * netif_stop_subqueue - stop sending packets on subqueue
3101 * @dev: network device
3102 * @queue_index: sub queue index
3103 *
3104 * Stop individual transmit queue of a device with multiple transmit queues.
3105 */
3106 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3107 {
3108 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3109 netif_tx_stop_queue(txq);
3110 }
3111
3112 /**
3113 * netif_subqueue_stopped - test status of subqueue
3114 * @dev: network device
3115 * @queue_index: sub queue index
3116 *
3117 * Check individual transmit queue of a device with multiple transmit queues.
3118 */
3119 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3120 u16 queue_index)
3121 {
3122 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3123
3124 return netif_tx_queue_stopped(txq);
3125 }
3126
3127 static inline bool netif_subqueue_stopped(const struct net_device *dev,
3128 struct sk_buff *skb)
3129 {
3130 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3131 }
3132
3133 /**
3134 * netif_wake_subqueue - allow sending packets on subqueue
3135 * @dev: network device
3136 * @queue_index: sub queue index
3137 *
3138 * Resume individual transmit queue of a device with multiple transmit queues.
3139 */
3140 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3141 {
3142 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3143
3144 netif_tx_wake_queue(txq);
3145 }
3146
3147 #ifdef CONFIG_XPS
3148 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3149 u16 index);
3150 #else
3151 static inline int netif_set_xps_queue(struct net_device *dev,
3152 const struct cpumask *mask,
3153 u16 index)
3154 {
3155 return 0;
3156 }
3157 #endif
3158
3159 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
3160 unsigned int num_tx_queues);
3161
3162 /*
3163 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
3164 * as a distribution range limit for the returned value.
3165 */
3166 static inline u16 skb_tx_hash(const struct net_device *dev,
3167 struct sk_buff *skb)
3168 {
3169 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
3170 }
3171
3172 /**
3173 * netif_is_multiqueue - test if device has multiple transmit queues
3174 * @dev: network device
3175 *
3176 * Check if device has multiple transmit queues
3177 */
3178 static inline bool netif_is_multiqueue(const struct net_device *dev)
3179 {
3180 return dev->num_tx_queues > 1;
3181 }
3182
3183 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3184
3185 #ifdef CONFIG_SYSFS
3186 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3187 #else
3188 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3189 unsigned int rxq)
3190 {
3191 return 0;
3192 }
3193 #endif
3194
3195 #ifdef CONFIG_SYSFS
3196 static inline unsigned int get_netdev_rx_queue_index(
3197 struct netdev_rx_queue *queue)
3198 {
3199 struct net_device *dev = queue->dev;
3200 int index = queue - dev->_rx;
3201
3202 BUG_ON(index >= dev->num_rx_queues);
3203 return index;
3204 }
3205 #endif
3206
3207 #define DEFAULT_MAX_NUM_RSS_QUEUES (8)
3208 int netif_get_num_default_rss_queues(void);
3209
3210 enum skb_free_reason {
3211 SKB_REASON_CONSUMED,
3212 SKB_REASON_DROPPED,
3213 };
3214
3215 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3216 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3217
3218 /*
3219 * It is not allowed to call kfree_skb() or consume_skb() from hardware
3220 * interrupt context or with hardware interrupts being disabled.
3221 * (in_irq() || irqs_disabled())
3222 *
3223 * We provide four helpers that can be used in following contexts :
3224 *
3225 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
3226 * replacing kfree_skb(skb)
3227 *
3228 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
3229 * Typically used in place of consume_skb(skb) in TX completion path
3230 *
3231 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
3232 * replacing kfree_skb(skb)
3233 *
3234 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
3235 * and consumed a packet. Used in place of consume_skb(skb)
3236 */
3237 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3238 {
3239 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3240 }
3241
3242 static inline void dev_consume_skb_irq(struct sk_buff *skb)
3243 {
3244 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3245 }
3246
3247 static inline void dev_kfree_skb_any(struct sk_buff *skb)
3248 {
3249 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3250 }
3251
3252 static inline void dev_consume_skb_any(struct sk_buff *skb)
3253 {
3254 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3255 }
3256
3257 int netif_rx(struct sk_buff *skb);
3258 int netif_rx_ni(struct sk_buff *skb);
3259 int netif_receive_skb(struct sk_buff *skb);
3260 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3261 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3262 struct sk_buff *napi_get_frags(struct napi_struct *napi);
3263 gro_result_t napi_gro_frags(struct napi_struct *napi);
3264 struct packet_offload *gro_find_receive_by_type(__be16 type);
3265 struct packet_offload *gro_find_complete_by_type(__be16 type);
3266
3267 static inline void napi_free_frags(struct napi_struct *napi)
3268 {
3269 kfree_skb(napi->skb);
3270 napi->skb = NULL;
3271 }
3272
3273 bool netdev_is_rx_handler_busy(struct net_device *dev);
3274 int netdev_rx_handler_register(struct net_device *dev,
3275 rx_handler_func_t *rx_handler,
3276 void *rx_handler_data);
3277 void netdev_rx_handler_unregister(struct net_device *dev);
3278
3279 bool dev_valid_name(const char *name);
3280 int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
3281 int dev_ethtool(struct net *net, struct ifreq *);
3282 unsigned int dev_get_flags(const struct net_device *);
3283 int __dev_change_flags(struct net_device *, unsigned int flags);
3284 int dev_change_flags(struct net_device *, unsigned int);
3285 void __dev_notify_flags(struct net_device *, unsigned int old_flags,
3286 unsigned int gchanges);
3287 int dev_change_name(struct net_device *, const char *);
3288 int dev_set_alias(struct net_device *, const char *, size_t);
3289 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3290 int dev_set_mtu(struct net_device *, int);
3291 void dev_set_group(struct net_device *, int);
3292 int dev_set_mac_address(struct net_device *, struct sockaddr *);
3293 int dev_change_carrier(struct net_device *, bool new_carrier);
3294 int dev_get_phys_port_id(struct net_device *dev,
3295 struct netdev_phys_item_id *ppid);
3296 int dev_get_phys_port_name(struct net_device *dev,
3297 char *name, size_t len);
3298 int dev_change_proto_down(struct net_device *dev, bool proto_down);
3299 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3300 int fd, u32 flags);
3301 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
3302 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3303 struct netdev_queue *txq, int *ret);
3304 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3305 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3306 bool is_skb_forwardable(const struct net_device *dev,
3307 const struct sk_buff *skb);
3308
3309 static __always_inline int ____dev_forward_skb(struct net_device *dev,
3310 struct sk_buff *skb)
3311 {
3312 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
3313 unlikely(!is_skb_forwardable(dev, skb))) {
3314 atomic_long_inc(&dev->rx_dropped);
3315 kfree_skb(skb);
3316 return NET_RX_DROP;
3317 }
3318
3319 skb_scrub_packet(skb, true);
3320 skb->priority = 0;
3321 return 0;
3322 }
3323
3324 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3325
3326 extern int netdev_budget;
3327 extern unsigned int netdev_budget_usecs;
3328
3329 /* Called by rtnetlink.c:rtnl_unlock() */
3330 void netdev_run_todo(void);
3331
3332 /**
3333 * dev_put - release reference to device
3334 * @dev: network device
3335 *
3336 * Release reference to device to allow it to be freed.
3337 */
3338 static inline void dev_put(struct net_device *dev)
3339 {
3340 this_cpu_dec(*dev->pcpu_refcnt);
3341 }
3342
3343 /**
3344 * dev_hold - get reference to device
3345 * @dev: network device
3346 *
3347 * Hold reference to device to keep it from being freed.
3348 */
3349 static inline void dev_hold(struct net_device *dev)
3350 {
3351 this_cpu_inc(*dev->pcpu_refcnt);
3352 }
3353
3354 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
3355 * and _off may be called from IRQ context, but it is caller
3356 * who is responsible for serialization of these calls.
3357 *
3358 * The name carrier is inappropriate, these functions should really be
3359 * called netif_lowerlayer_*() because they represent the state of any
3360 * kind of lower layer not just hardware media.
3361 */
3362
3363 void linkwatch_init_dev(struct net_device *dev);
3364 void linkwatch_fire_event(struct net_device *dev);
3365 void linkwatch_forget_dev(struct net_device *dev);
3366
3367 /**
3368 * netif_carrier_ok - test if carrier present
3369 * @dev: network device
3370 *
3371 * Check if carrier is present on device
3372 */
3373 static inline bool netif_carrier_ok(const struct net_device *dev)
3374 {
3375 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
3376 }
3377
3378 unsigned long dev_trans_start(struct net_device *dev);
3379
3380 void __netdev_watchdog_up(struct net_device *dev);
3381
3382 void netif_carrier_on(struct net_device *dev);
3383
3384 void netif_carrier_off(struct net_device *dev);
3385
3386 /**
3387 * netif_dormant_on - mark device as dormant.
3388 * @dev: network device
3389 *
3390 * Mark device as dormant (as per RFC2863).
3391 *
3392 * The dormant state indicates that the relevant interface is not
3393 * actually in a condition to pass packets (i.e., it is not 'up') but is
3394 * in a "pending" state, waiting for some external event. For "on-
3395 * demand" interfaces, this new state identifies the situation where the
3396 * interface is waiting for events to place it in the up state.
3397 */
3398 static inline void netif_dormant_on(struct net_device *dev)
3399 {
3400 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
3401 linkwatch_fire_event(dev);
3402 }
3403
3404 /**
3405 * netif_dormant_off - set device as not dormant.
3406 * @dev: network device
3407 *
3408 * Device is not in dormant state.
3409 */
3410 static inline void netif_dormant_off(struct net_device *dev)
3411 {
3412 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
3413 linkwatch_fire_event(dev);
3414 }
3415
3416 /**
3417 * netif_dormant - test if device is dormant
3418 * @dev: network device
3419 *
3420 * Check if device is dormant.
3421 */
3422 static inline bool netif_dormant(const struct net_device *dev)
3423 {
3424 return test_bit(__LINK_STATE_DORMANT, &dev->state);
3425 }
3426
3427
3428 /**
3429 * netif_oper_up - test if device is operational
3430 * @dev: network device
3431 *
3432 * Check if carrier is operational
3433 */
3434 static inline bool netif_oper_up(const struct net_device *dev)
3435 {
3436 return (dev->operstate == IF_OPER_UP ||
3437 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
3438 }
3439
3440 /**
3441 * netif_device_present - is device available or removed
3442 * @dev: network device
3443 *
3444 * Check if device has not been removed from system.
3445 */
3446 static inline bool netif_device_present(struct net_device *dev)
3447 {
3448 return test_bit(__LINK_STATE_PRESENT, &dev->state);
3449 }
3450
3451 void netif_device_detach(struct net_device *dev);
3452
3453 void netif_device_attach(struct net_device *dev);
3454
3455 /*
3456 * Network interface message level settings
3457 */
3458
3459 enum {
3460 NETIF_MSG_DRV = 0x0001,
3461 NETIF_MSG_PROBE = 0x0002,
3462 NETIF_MSG_LINK = 0x0004,
3463 NETIF_MSG_TIMER = 0x0008,
3464 NETIF_MSG_IFDOWN = 0x0010,
3465 NETIF_MSG_IFUP = 0x0020,
3466 NETIF_MSG_RX_ERR = 0x0040,
3467 NETIF_MSG_TX_ERR = 0x0080,
3468 NETIF_MSG_TX_QUEUED = 0x0100,
3469 NETIF_MSG_INTR = 0x0200,
3470 NETIF_MSG_TX_DONE = 0x0400,
3471 NETIF_MSG_RX_STATUS = 0x0800,
3472 NETIF_MSG_PKTDATA = 0x1000,
3473 NETIF_MSG_HW = 0x2000,
3474 NETIF_MSG_WOL = 0x4000,
3475 };
3476
3477 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
3478 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
3479 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
3480 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
3481 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
3482 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
3483 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
3484 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
3485 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
3486 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
3487 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
3488 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
3489 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
3490 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
3491 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
3492
3493 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
3494 {
3495 /* use default */
3496 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
3497 return default_msg_enable_bits;
3498 if (debug_value == 0) /* no output */
3499 return 0;
3500 /* set low N bits */
3501 return (1 << debug_value) - 1;
3502 }
3503
3504 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
3505 {
3506 spin_lock(&txq->_xmit_lock);
3507 txq->xmit_lock_owner = cpu;
3508 }
3509
3510 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
3511 {
3512 __acquire(&txq->_xmit_lock);
3513 return true;
3514 }
3515
3516 static inline void __netif_tx_release(struct netdev_queue *txq)
3517 {
3518 __release(&txq->_xmit_lock);
3519 }
3520
3521 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
3522 {
3523 spin_lock_bh(&txq->_xmit_lock);
3524 txq->xmit_lock_owner = smp_processor_id();
3525 }
3526
3527 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
3528 {
3529 bool ok = spin_trylock(&txq->_xmit_lock);
3530 if (likely(ok))
3531 txq->xmit_lock_owner = smp_processor_id();
3532 return ok;
3533 }
3534
3535 static inline void __netif_tx_unlock(struct netdev_queue *txq)
3536 {
3537 txq->xmit_lock_owner = -1;
3538 spin_unlock(&txq->_xmit_lock);
3539 }
3540
3541 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
3542 {
3543 txq->xmit_lock_owner = -1;
3544 spin_unlock_bh(&txq->_xmit_lock);
3545 }
3546
3547 static inline void txq_trans_update(struct netdev_queue *txq)
3548 {
3549 if (txq->xmit_lock_owner != -1)
3550 txq->trans_start = jiffies;
3551 }
3552
3553 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
3554 static inline void netif_trans_update(struct net_device *dev)
3555 {
3556 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
3557
3558 if (txq->trans_start != jiffies)
3559 txq->trans_start = jiffies;
3560 }
3561
3562 /**
3563 * netif_tx_lock - grab network device transmit lock
3564 * @dev: network device
3565 *
3566 * Get network device transmit lock
3567 */
3568 static inline void netif_tx_lock(struct net_device *dev)
3569 {
3570 unsigned int i;
3571 int cpu;
3572
3573 spin_lock(&dev->tx_global_lock);
3574 cpu = smp_processor_id();
3575 for (i = 0; i < dev->num_tx_queues; i++) {
3576 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3577
3578 /* We are the only thread of execution doing a
3579 * freeze, but we have to grab the _xmit_lock in
3580 * order to synchronize with threads which are in
3581 * the ->hard_start_xmit() handler and already
3582 * checked the frozen bit.
3583 */
3584 __netif_tx_lock(txq, cpu);
3585 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
3586 __netif_tx_unlock(txq);
3587 }
3588 }
3589
3590 static inline void netif_tx_lock_bh(struct net_device *dev)
3591 {
3592 local_bh_disable();
3593 netif_tx_lock(dev);
3594 }
3595
3596 static inline void netif_tx_unlock(struct net_device *dev)
3597 {
3598 unsigned int i;
3599
3600 for (i = 0; i < dev->num_tx_queues; i++) {
3601 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3602
3603 /* No need to grab the _xmit_lock here. If the
3604 * queue is not stopped for another reason, we
3605 * force a schedule.
3606 */
3607 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
3608 netif_schedule_queue(txq);
3609 }
3610 spin_unlock(&dev->tx_global_lock);
3611 }
3612
3613 static inline void netif_tx_unlock_bh(struct net_device *dev)
3614 {
3615 netif_tx_unlock(dev);
3616 local_bh_enable();
3617 }
3618
3619 #define HARD_TX_LOCK(dev, txq, cpu) { \
3620 if ((dev->features & NETIF_F_LLTX) == 0) { \
3621 __netif_tx_lock(txq, cpu); \
3622 } else { \
3623 __netif_tx_acquire(txq); \
3624 } \
3625 }
3626
3627 #define HARD_TX_TRYLOCK(dev, txq) \
3628 (((dev->features & NETIF_F_LLTX) == 0) ? \
3629 __netif_tx_trylock(txq) : \
3630 __netif_tx_acquire(txq))
3631
3632 #define HARD_TX_UNLOCK(dev, txq) { \
3633 if ((dev->features & NETIF_F_LLTX) == 0) { \
3634 __netif_tx_unlock(txq); \
3635 } else { \
3636 __netif_tx_release(txq); \
3637 } \
3638 }
3639
3640 static inline void netif_tx_disable(struct net_device *dev)
3641 {
3642 unsigned int i;
3643 int cpu;
3644
3645 local_bh_disable();
3646 cpu = smp_processor_id();
3647 for (i = 0; i < dev->num_tx_queues; i++) {
3648 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3649
3650 __netif_tx_lock(txq, cpu);
3651 netif_tx_stop_queue(txq);
3652 __netif_tx_unlock(txq);
3653 }
3654 local_bh_enable();
3655 }
3656
3657 static inline void netif_addr_lock(struct net_device *dev)
3658 {
3659 spin_lock(&dev->addr_list_lock);
3660 }
3661
3662 static inline void netif_addr_lock_nested(struct net_device *dev)
3663 {
3664 int subclass = SINGLE_DEPTH_NESTING;
3665
3666 if (dev->netdev_ops->ndo_get_lock_subclass)
3667 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
3668
3669 spin_lock_nested(&dev->addr_list_lock, subclass);
3670 }
3671
3672 static inline void netif_addr_lock_bh(struct net_device *dev)
3673 {
3674 spin_lock_bh(&dev->addr_list_lock);
3675 }
3676
3677 static inline void netif_addr_unlock(struct net_device *dev)
3678 {
3679 spin_unlock(&dev->addr_list_lock);
3680 }
3681
3682 static inline void netif_addr_unlock_bh(struct net_device *dev)
3683 {
3684 spin_unlock_bh(&dev->addr_list_lock);
3685 }
3686
3687 /*
3688 * dev_addrs walker. Should be used only for read access. Call with
3689 * rcu_read_lock held.
3690 */
3691 #define for_each_dev_addr(dev, ha) \
3692 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
3693
3694 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
3695
3696 void ether_setup(struct net_device *dev);
3697
3698 /* Support for loadable net-drivers */
3699 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
3700 unsigned char name_assign_type,
3701 void (*setup)(struct net_device *),
3702 unsigned int txqs, unsigned int rxqs);
3703 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
3704 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
3705
3706 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
3707 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
3708 count)
3709
3710 int register_netdev(struct net_device *dev);
3711 void unregister_netdev(struct net_device *dev);
3712
3713 /* General hardware address lists handling functions */
3714 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3715 struct netdev_hw_addr_list *from_list, int addr_len);
3716 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3717 struct netdev_hw_addr_list *from_list, int addr_len);
3718 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
3719 struct net_device *dev,
3720 int (*sync)(struct net_device *, const unsigned char *),
3721 int (*unsync)(struct net_device *,
3722 const unsigned char *));
3723 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
3724 struct net_device *dev,
3725 int (*unsync)(struct net_device *,
3726 const unsigned char *));
3727 void __hw_addr_init(struct netdev_hw_addr_list *list);
3728
3729 /* Functions used for device addresses handling */
3730 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
3731 unsigned char addr_type);
3732 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
3733 unsigned char addr_type);
3734 void dev_addr_flush(struct net_device *dev);
3735 int dev_addr_init(struct net_device *dev);
3736
3737 /* Functions used for unicast addresses handling */
3738 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
3739 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
3740 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
3741 int dev_uc_sync(struct net_device *to, struct net_device *from);
3742 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
3743 void dev_uc_unsync(struct net_device *to, struct net_device *from);
3744 void dev_uc_flush(struct net_device *dev);
3745 void dev_uc_init(struct net_device *dev);
3746
3747 /**
3748 * __dev_uc_sync - Synchonize device's unicast list
3749 * @dev: device to sync
3750 * @sync: function to call if address should be added
3751 * @unsync: function to call if address should be removed
3752 *
3753 * Add newly added addresses to the interface, and release
3754 * addresses that have been deleted.
3755 */
3756 static inline int __dev_uc_sync(struct net_device *dev,
3757 int (*sync)(struct net_device *,
3758 const unsigned char *),
3759 int (*unsync)(struct net_device *,
3760 const unsigned char *))
3761 {
3762 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
3763 }
3764
3765 /**
3766 * __dev_uc_unsync - Remove synchronized addresses from device
3767 * @dev: device to sync
3768 * @unsync: function to call if address should be removed
3769 *
3770 * Remove all addresses that were added to the device by dev_uc_sync().
3771 */
3772 static inline void __dev_uc_unsync(struct net_device *dev,
3773 int (*unsync)(struct net_device *,
3774 const unsigned char *))
3775 {
3776 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
3777 }
3778
3779 /* Functions used for multicast addresses handling */
3780 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
3781 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
3782 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
3783 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
3784 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
3785 int dev_mc_sync(struct net_device *to, struct net_device *from);
3786 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
3787 void dev_mc_unsync(struct net_device *to, struct net_device *from);
3788 void dev_mc_flush(struct net_device *dev);
3789 void dev_mc_init(struct net_device *dev);
3790
3791 /**
3792 * __dev_mc_sync - Synchonize device's multicast list
3793 * @dev: device to sync
3794 * @sync: function to call if address should be added
3795 * @unsync: function to call if address should be removed
3796 *
3797 * Add newly added addresses to the interface, and release
3798 * addresses that have been deleted.
3799 */
3800 static inline int __dev_mc_sync(struct net_device *dev,
3801 int (*sync)(struct net_device *,
3802 const unsigned char *),
3803 int (*unsync)(struct net_device *,
3804 const unsigned char *))
3805 {
3806 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
3807 }
3808
3809 /**
3810 * __dev_mc_unsync - Remove synchronized addresses from device
3811 * @dev: device to sync
3812 * @unsync: function to call if address should be removed
3813 *
3814 * Remove all addresses that were added to the device by dev_mc_sync().
3815 */
3816 static inline void __dev_mc_unsync(struct net_device *dev,
3817 int (*unsync)(struct net_device *,
3818 const unsigned char *))
3819 {
3820 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
3821 }
3822
3823 /* Functions used for secondary unicast and multicast support */
3824 void dev_set_rx_mode(struct net_device *dev);
3825 void __dev_set_rx_mode(struct net_device *dev);
3826 int dev_set_promiscuity(struct net_device *dev, int inc);
3827 int dev_set_allmulti(struct net_device *dev, int inc);
3828 void netdev_state_change(struct net_device *dev);
3829 void netdev_notify_peers(struct net_device *dev);
3830 void netdev_features_change(struct net_device *dev);
3831 /* Load a device via the kmod */
3832 void dev_load(struct net *net, const char *name);
3833 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
3834 struct rtnl_link_stats64 *storage);
3835 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
3836 const struct net_device_stats *netdev_stats);
3837
3838 extern int netdev_max_backlog;
3839 extern int netdev_tstamp_prequeue;
3840 extern int weight_p;
3841 extern int dev_weight_rx_bias;
3842 extern int dev_weight_tx_bias;
3843 extern int dev_rx_weight;
3844 extern int dev_tx_weight;
3845
3846 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
3847 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
3848 struct list_head **iter);
3849 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
3850 struct list_head **iter);
3851
3852 /* iterate through upper list, must be called under RCU read lock */
3853 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
3854 for (iter = &(dev)->adj_list.upper, \
3855 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
3856 updev; \
3857 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
3858
3859 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
3860 int (*fn)(struct net_device *upper_dev,
3861 void *data),
3862 void *data);
3863
3864 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
3865 struct net_device *upper_dev);
3866
3867 void *netdev_lower_get_next_private(struct net_device *dev,
3868 struct list_head **iter);
3869 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3870 struct list_head **iter);
3871
3872 #define netdev_for_each_lower_private(dev, priv, iter) \
3873 for (iter = (dev)->adj_list.lower.next, \
3874 priv = netdev_lower_get_next_private(dev, &(iter)); \
3875 priv; \
3876 priv = netdev_lower_get_next_private(dev, &(iter)))
3877
3878 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
3879 for (iter = &(dev)->adj_list.lower, \
3880 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
3881 priv; \
3882 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
3883
3884 void *netdev_lower_get_next(struct net_device *dev,
3885 struct list_head **iter);
3886
3887 #define netdev_for_each_lower_dev(dev, ldev, iter) \
3888 for (iter = (dev)->adj_list.lower.next, \
3889 ldev = netdev_lower_get_next(dev, &(iter)); \
3890 ldev; \
3891 ldev = netdev_lower_get_next(dev, &(iter)))
3892
3893 struct net_device *netdev_all_lower_get_next(struct net_device *dev,
3894 struct list_head **iter);
3895 struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
3896 struct list_head **iter);
3897
3898 int netdev_walk_all_lower_dev(struct net_device *dev,
3899 int (*fn)(struct net_device *lower_dev,
3900 void *data),
3901 void *data);
3902 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
3903 int (*fn)(struct net_device *lower_dev,
3904 void *data),
3905 void *data);
3906
3907 void *netdev_adjacent_get_private(struct list_head *adj_list);
3908 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
3909 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
3910 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
3911 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
3912 int netdev_master_upper_dev_link(struct net_device *dev,
3913 struct net_device *upper_dev,
3914 void *upper_priv, void *upper_info);
3915 void netdev_upper_dev_unlink(struct net_device *dev,
3916 struct net_device *upper_dev);
3917 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
3918 void *netdev_lower_dev_get_private(struct net_device *dev,
3919 struct net_device *lower_dev);
3920 void netdev_lower_state_changed(struct net_device *lower_dev,
3921 void *lower_state_info);
3922
3923 /* RSS keys are 40 or 52 bytes long */
3924 #define NETDEV_RSS_KEY_LEN 52
3925 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
3926 void netdev_rss_key_fill(void *buffer, size_t len);
3927
3928 int dev_get_nest_level(struct net_device *dev);
3929 int skb_checksum_help(struct sk_buff *skb);
3930 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3931 netdev_features_t features, bool tx_path);
3932 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3933 netdev_features_t features);
3934
3935 struct netdev_bonding_info {
3936 ifslave slave;
3937 ifbond master;
3938 };
3939
3940 struct netdev_notifier_bonding_info {
3941 struct netdev_notifier_info info; /* must be first */
3942 struct netdev_bonding_info bonding_info;
3943 };
3944
3945 void netdev_bonding_info_change(struct net_device *dev,
3946 struct netdev_bonding_info *bonding_info);
3947
3948 static inline
3949 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
3950 {
3951 return __skb_gso_segment(skb, features, true);
3952 }
3953 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
3954
3955 static inline bool can_checksum_protocol(netdev_features_t features,
3956 __be16 protocol)
3957 {
3958 if (protocol == htons(ETH_P_FCOE))
3959 return !!(features & NETIF_F_FCOE_CRC);
3960
3961 /* Assume this is an IP checksum (not SCTP CRC) */
3962
3963 if (features & NETIF_F_HW_CSUM) {
3964 /* Can checksum everything */
3965 return true;
3966 }
3967
3968 switch (protocol) {
3969 case htons(ETH_P_IP):
3970 return !!(features & NETIF_F_IP_CSUM);
3971 case htons(ETH_P_IPV6):
3972 return !!(features & NETIF_F_IPV6_CSUM);
3973 default:
3974 return false;
3975 }
3976 }
3977
3978 #ifdef CONFIG_BUG
3979 void netdev_rx_csum_fault(struct net_device *dev);
3980 #else
3981 static inline void netdev_rx_csum_fault(struct net_device *dev)
3982 {
3983 }
3984 #endif
3985 /* rx skb timestamps */
3986 void net_enable_timestamp(void);
3987 void net_disable_timestamp(void);
3988
3989 #ifdef CONFIG_PROC_FS
3990 int __init dev_proc_init(void);
3991 #else
3992 #define dev_proc_init() 0
3993 #endif
3994
3995 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
3996 struct sk_buff *skb, struct net_device *dev,
3997 bool more)
3998 {
3999 skb->xmit_more = more ? 1 : 0;
4000 return ops->ndo_start_xmit(skb, dev);
4001 }
4002
4003 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
4004 struct netdev_queue *txq, bool more)
4005 {
4006 const struct net_device_ops *ops = dev->netdev_ops;
4007 int rc;
4008
4009 rc = __netdev_start_xmit(ops, skb, dev, more);
4010 if (rc == NETDEV_TX_OK)
4011 txq_trans_update(txq);
4012
4013 return rc;
4014 }
4015
4016 int netdev_class_create_file_ns(struct class_attribute *class_attr,
4017 const void *ns);
4018 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
4019 const void *ns);
4020
4021 static inline int netdev_class_create_file(struct class_attribute *class_attr)
4022 {
4023 return netdev_class_create_file_ns(class_attr, NULL);
4024 }
4025
4026 static inline void netdev_class_remove_file(struct class_attribute *class_attr)
4027 {
4028 netdev_class_remove_file_ns(class_attr, NULL);
4029 }
4030
4031 extern struct kobj_ns_type_operations net_ns_type_operations;
4032
4033 const char *netdev_drivername(const struct net_device *dev);
4034
4035 void linkwatch_run_queue(void);
4036
4037 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
4038 netdev_features_t f2)
4039 {
4040 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
4041 if (f1 & NETIF_F_HW_CSUM)
4042 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4043 else
4044 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4045 }
4046
4047 return f1 & f2;
4048 }
4049
4050 static inline netdev_features_t netdev_get_wanted_features(
4051 struct net_device *dev)
4052 {
4053 return (dev->features & ~dev->hw_features) | dev->wanted_features;
4054 }
4055 netdev_features_t netdev_increment_features(netdev_features_t all,
4056 netdev_features_t one, netdev_features_t mask);
4057
4058 /* Allow TSO being used on stacked device :
4059 * Performing the GSO segmentation before last device
4060 * is a performance improvement.
4061 */
4062 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
4063 netdev_features_t mask)
4064 {
4065 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
4066 }
4067
4068 int __netdev_update_features(struct net_device *dev);
4069 void netdev_update_features(struct net_device *dev);
4070 void netdev_change_features(struct net_device *dev);
4071
4072 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4073 struct net_device *dev);
4074
4075 netdev_features_t passthru_features_check(struct sk_buff *skb,
4076 struct net_device *dev,
4077 netdev_features_t features);
4078 netdev_features_t netif_skb_features(struct sk_buff *skb);
4079
4080 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4081 {
4082 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
4083
4084 /* check flags correspondence */
4085 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
4086 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
4087 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
4088 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
4089 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
4090 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
4091 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
4092 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
4093 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
4094 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
4095 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
4096 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
4097 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
4098 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
4099 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
4100 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
4101 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
4102
4103 return (features & feature) == feature;
4104 }
4105
4106 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
4107 {
4108 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
4109 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
4110 }
4111
4112 static inline bool netif_needs_gso(struct sk_buff *skb,
4113 netdev_features_t features)
4114 {
4115 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
4116 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
4117 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
4118 }
4119
4120 static inline void netif_set_gso_max_size(struct net_device *dev,
4121 unsigned int size)
4122 {
4123 dev->gso_max_size = size;
4124 }
4125
4126 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
4127 int pulled_hlen, u16 mac_offset,
4128 int mac_len)
4129 {
4130 skb->protocol = protocol;
4131 skb->encapsulation = 1;
4132 skb_push(skb, pulled_hlen);
4133 skb_reset_transport_header(skb);
4134 skb->mac_header = mac_offset;
4135 skb->network_header = skb->mac_header + mac_len;
4136 skb->mac_len = mac_len;
4137 }
4138
4139 static inline bool netif_is_macsec(const struct net_device *dev)
4140 {
4141 return dev->priv_flags & IFF_MACSEC;
4142 }
4143
4144 static inline bool netif_is_macvlan(const struct net_device *dev)
4145 {
4146 return dev->priv_flags & IFF_MACVLAN;
4147 }
4148
4149 static inline bool netif_is_macvlan_port(const struct net_device *dev)
4150 {
4151 return dev->priv_flags & IFF_MACVLAN_PORT;
4152 }
4153
4154 static inline bool netif_is_ipvlan(const struct net_device *dev)
4155 {
4156 return dev->priv_flags & IFF_IPVLAN_SLAVE;
4157 }
4158
4159 static inline bool netif_is_ipvlan_port(const struct net_device *dev)
4160 {
4161 return dev->priv_flags & IFF_IPVLAN_MASTER;
4162 }
4163
4164 static inline bool netif_is_bond_master(const struct net_device *dev)
4165 {
4166 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
4167 }
4168
4169 static inline bool netif_is_bond_slave(const struct net_device *dev)
4170 {
4171 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
4172 }
4173
4174 static inline bool netif_supports_nofcs(struct net_device *dev)
4175 {
4176 return dev->priv_flags & IFF_SUPP_NOFCS;
4177 }
4178
4179 static inline bool netif_is_l3_master(const struct net_device *dev)
4180 {
4181 return dev->priv_flags & IFF_L3MDEV_MASTER;
4182 }
4183
4184 static inline bool netif_is_l3_slave(const struct net_device *dev)
4185 {
4186 return dev->priv_flags & IFF_L3MDEV_SLAVE;
4187 }
4188
4189 static inline bool netif_is_bridge_master(const struct net_device *dev)
4190 {
4191 return dev->priv_flags & IFF_EBRIDGE;
4192 }
4193
4194 static inline bool netif_is_bridge_port(const struct net_device *dev)
4195 {
4196 return dev->priv_flags & IFF_BRIDGE_PORT;
4197 }
4198
4199 static inline bool netif_is_ovs_master(const struct net_device *dev)
4200 {
4201 return dev->priv_flags & IFF_OPENVSWITCH;
4202 }
4203
4204 static inline bool netif_is_ovs_port(const struct net_device *dev)
4205 {
4206 return dev->priv_flags & IFF_OVS_DATAPATH;
4207 }
4208
4209 static inline bool netif_is_team_master(const struct net_device *dev)
4210 {
4211 return dev->priv_flags & IFF_TEAM;
4212 }
4213
4214 static inline bool netif_is_team_port(const struct net_device *dev)
4215 {
4216 return dev->priv_flags & IFF_TEAM_PORT;
4217 }
4218
4219 static inline bool netif_is_lag_master(const struct net_device *dev)
4220 {
4221 return netif_is_bond_master(dev) || netif_is_team_master(dev);
4222 }
4223
4224 static inline bool netif_is_lag_port(const struct net_device *dev)
4225 {
4226 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
4227 }
4228
4229 static inline bool netif_is_rxfh_configured(const struct net_device *dev)
4230 {
4231 return dev->priv_flags & IFF_RXFH_CONFIGURED;
4232 }
4233
4234 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
4235 static inline void netif_keep_dst(struct net_device *dev)
4236 {
4237 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
4238 }
4239
4240 /* return true if dev can't cope with mtu frames that need vlan tag insertion */
4241 static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
4242 {
4243 /* TODO: reserve and use an additional IFF bit, if we get more users */
4244 return dev->priv_flags & IFF_MACSEC;
4245 }
4246
4247 extern struct pernet_operations __net_initdata loopback_net_ops;
4248
4249 /* Logging, debugging and troubleshooting/diagnostic helpers. */
4250
4251 /* netdev_printk helpers, similar to dev_printk */
4252
4253 static inline const char *netdev_name(const struct net_device *dev)
4254 {
4255 if (!dev->name[0] || strchr(dev->name, '%'))
4256 return "(unnamed net_device)";
4257 return dev->name;
4258 }
4259
4260 static inline const char *netdev_reg_state(const struct net_device *dev)
4261 {
4262 switch (dev->reg_state) {
4263 case NETREG_UNINITIALIZED: return " (uninitialized)";
4264 case NETREG_REGISTERED: return "";
4265 case NETREG_UNREGISTERING: return " (unregistering)";
4266 case NETREG_UNREGISTERED: return " (unregistered)";
4267 case NETREG_RELEASED: return " (released)";
4268 case NETREG_DUMMY: return " (dummy)";
4269 }
4270
4271 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
4272 return " (unknown)";
4273 }
4274
4275 __printf(3, 4)
4276 void netdev_printk(const char *level, const struct net_device *dev,
4277 const char *format, ...);
4278 __printf(2, 3)
4279 void netdev_emerg(const struct net_device *dev, const char *format, ...);
4280 __printf(2, 3)
4281 void netdev_alert(const struct net_device *dev, const char *format, ...);
4282 __printf(2, 3)
4283 void netdev_crit(const struct net_device *dev, const char *format, ...);
4284 __printf(2, 3)
4285 void netdev_err(const struct net_device *dev, const char *format, ...);
4286 __printf(2, 3)
4287 void netdev_warn(const struct net_device *dev, const char *format, ...);
4288 __printf(2, 3)
4289 void netdev_notice(const struct net_device *dev, const char *format, ...);
4290 __printf(2, 3)
4291 void netdev_info(const struct net_device *dev, const char *format, ...);
4292
4293 #define MODULE_ALIAS_NETDEV(device) \
4294 MODULE_ALIAS("netdev-" device)
4295
4296 #if defined(CONFIG_DYNAMIC_DEBUG)
4297 #define netdev_dbg(__dev, format, args...) \
4298 do { \
4299 dynamic_netdev_dbg(__dev, format, ##args); \
4300 } while (0)
4301 #elif defined(DEBUG)
4302 #define netdev_dbg(__dev, format, args...) \
4303 netdev_printk(KERN_DEBUG, __dev, format, ##args)
4304 #else
4305 #define netdev_dbg(__dev, format, args...) \
4306 ({ \
4307 if (0) \
4308 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
4309 })
4310 #endif
4311
4312 #if defined(VERBOSE_DEBUG)
4313 #define netdev_vdbg netdev_dbg
4314 #else
4315
4316 #define netdev_vdbg(dev, format, args...) \
4317 ({ \
4318 if (0) \
4319 netdev_printk(KERN_DEBUG, dev, format, ##args); \
4320 0; \
4321 })
4322 #endif
4323
4324 /*
4325 * netdev_WARN() acts like dev_printk(), but with the key difference
4326 * of using a WARN/WARN_ON to get the message out, including the
4327 * file/line information and a backtrace.
4328 */
4329 #define netdev_WARN(dev, format, args...) \
4330 WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \
4331 netdev_reg_state(dev), ##args)
4332
4333 /* netif printk helpers, similar to netdev_printk */
4334
4335 #define netif_printk(priv, type, level, dev, fmt, args...) \
4336 do { \
4337 if (netif_msg_##type(priv)) \
4338 netdev_printk(level, (dev), fmt, ##args); \
4339 } while (0)
4340
4341 #define netif_level(level, priv, type, dev, fmt, args...) \
4342 do { \
4343 if (netif_msg_##type(priv)) \
4344 netdev_##level(dev, fmt, ##args); \
4345 } while (0)
4346
4347 #define netif_emerg(priv, type, dev, fmt, args...) \
4348 netif_level(emerg, priv, type, dev, fmt, ##args)
4349 #define netif_alert(priv, type, dev, fmt, args...) \
4350 netif_level(alert, priv, type, dev, fmt, ##args)
4351 #define netif_crit(priv, type, dev, fmt, args...) \
4352 netif_level(crit, priv, type, dev, fmt, ##args)
4353 #define netif_err(priv, type, dev, fmt, args...) \
4354 netif_level(err, priv, type, dev, fmt, ##args)
4355 #define netif_warn(priv, type, dev, fmt, args...) \
4356 netif_level(warn, priv, type, dev, fmt, ##args)
4357 #define netif_notice(priv, type, dev, fmt, args...) \
4358 netif_level(notice, priv, type, dev, fmt, ##args)
4359 #define netif_info(priv, type, dev, fmt, args...) \
4360 netif_level(info, priv, type, dev, fmt, ##args)
4361
4362 #if defined(CONFIG_DYNAMIC_DEBUG)
4363 #define netif_dbg(priv, type, netdev, format, args...) \
4364 do { \
4365 if (netif_msg_##type(priv)) \
4366 dynamic_netdev_dbg(netdev, format, ##args); \
4367 } while (0)
4368 #elif defined(DEBUG)
4369 #define netif_dbg(priv, type, dev, format, args...) \
4370 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
4371 #else
4372 #define netif_dbg(priv, type, dev, format, args...) \
4373 ({ \
4374 if (0) \
4375 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4376 0; \
4377 })
4378 #endif
4379
4380 /* if @cond then downgrade to debug, else print at @level */
4381 #define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
4382 do { \
4383 if (cond) \
4384 netif_dbg(priv, type, netdev, fmt, ##args); \
4385 else \
4386 netif_ ## level(priv, type, netdev, fmt, ##args); \
4387 } while (0)
4388
4389 #if defined(VERBOSE_DEBUG)
4390 #define netif_vdbg netif_dbg
4391 #else
4392 #define netif_vdbg(priv, type, dev, format, args...) \
4393 ({ \
4394 if (0) \
4395 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4396 0; \
4397 })
4398 #endif
4399
4400 /*
4401 * The list of packet types we will receive (as opposed to discard)
4402 * and the routines to invoke.
4403 *
4404 * Why 16. Because with 16 the only overlap we get on a hash of the
4405 * low nibble of the protocol value is RARP/SNAP/X.25.
4406 *
4407 * NOTE: That is no longer true with the addition of VLAN tags. Not
4408 * sure which should go first, but I bet it won't make much
4409 * difference if we are running VLANs. The good news is that
4410 * this protocol won't be in the list unless compiled in, so
4411 * the average user (w/out VLANs) will not be adversely affected.
4412 * --BLG
4413 *
4414 * 0800 IP
4415 * 8100 802.1Q VLAN
4416 * 0001 802.3
4417 * 0002 AX.25
4418 * 0004 802.2
4419 * 8035 RARP
4420 * 0005 SNAP
4421 * 0805 X.25
4422 * 0806 ARP
4423 * 8137 IPX
4424 * 0009 Localtalk
4425 * 86DD IPv6
4426 */
4427 #define PTYPE_HASH_SIZE (16)
4428 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
4429
4430 #endif /* _LINUX_NETDEVICE_H */