2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the Interfaces handler.
8 * Version: @(#)dev.h 1.0.10 08/12/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <Alan.Cox@linux.org>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
23 * Moved to /usr/include/linux for NET3
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
29 #include <linux/if_ether.h>
30 #include <linux/if_packet.h>
33 #include <linux/timer.h>
34 #include <linux/delay.h>
35 #include <asm/atomic.h>
36 #include <asm/cache.h>
37 #include <asm/byteorder.h>
39 #include <linux/device.h>
40 #include <linux/percpu.h>
41 #include <linux/dmaengine.h>
42 #include <linux/workqueue.h>
44 #include <net/net_namespace.h>
51 /* source back-compat hooks */
52 #define SET_ETHTOOL_OPS(netdev,ops) \
53 ( (netdev)->ethtool_ops = (ops) )
55 #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
56 functions are available. */
57 #define HAVE_FREE_NETDEV /* free_netdev() */
58 #define HAVE_NETDEV_PRIV /* netdev_priv() */
60 #define NET_XMIT_SUCCESS 0
61 #define NET_XMIT_DROP 1 /* skb dropped */
62 #define NET_XMIT_CN 2 /* congestion notification */
63 #define NET_XMIT_POLICED 3 /* skb is shot by police */
64 #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
65 (TC use only - dev_queue_xmit
66 returns this as NET_XMIT_SUCCESS) */
68 /* Backlog congestion levels */
69 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
70 #define NET_RX_DROP 1 /* packet dropped */
71 #define NET_RX_CN_LOW 2 /* storm alert, just in case */
72 #define NET_RX_CN_MOD 3 /* Storm on its way! */
73 #define NET_RX_CN_HIGH 4 /* The storm is here */
74 #define NET_RX_BAD 5 /* packet dropped due to kernel error */
76 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
77 * indicates that the device will soon be dropping packets, or already drops
78 * some packets of the same priority; prompting us to send less aggressively. */
79 #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
80 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
84 #define MAX_ADDR_LEN 32 /* Largest hardware address length */
86 /* Driver transmit return codes */
87 #define NETDEV_TX_OK 0 /* driver took care of packet */
88 #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
89 #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
94 * Compute the worst case header length according to the protocols
98 #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
99 # if defined(CONFIG_MAC80211_MESH)
100 # define LL_MAX_HEADER 128
102 # define LL_MAX_HEADER 96
104 #elif defined(CONFIG_TR)
105 # define LL_MAX_HEADER 48
107 # define LL_MAX_HEADER 32
110 #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
111 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
112 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
113 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
114 #define MAX_HEADER LL_MAX_HEADER
116 #define MAX_HEADER (LL_MAX_HEADER + 48)
119 #endif /* __KERNEL__ */
121 struct net_device_subqueue
123 /* Give a control state for each queue. This struct may contain
124 * per-queue locks in the future.
130 * Network device statistics. Akin to the 2.0 ether stats but
131 * with byte counters.
134 struct net_device_stats
136 unsigned long rx_packets
; /* total packets received */
137 unsigned long tx_packets
; /* total packets transmitted */
138 unsigned long rx_bytes
; /* total bytes received */
139 unsigned long tx_bytes
; /* total bytes transmitted */
140 unsigned long rx_errors
; /* bad packets received */
141 unsigned long tx_errors
; /* packet transmit problems */
142 unsigned long rx_dropped
; /* no space in linux buffers */
143 unsigned long tx_dropped
; /* no space available in linux */
144 unsigned long multicast
; /* multicast packets received */
145 unsigned long collisions
;
147 /* detailed rx_errors: */
148 unsigned long rx_length_errors
;
149 unsigned long rx_over_errors
; /* receiver ring buff overflow */
150 unsigned long rx_crc_errors
; /* recved pkt with crc error */
151 unsigned long rx_frame_errors
; /* recv'd frame alignment error */
152 unsigned long rx_fifo_errors
; /* recv'r fifo overrun */
153 unsigned long rx_missed_errors
; /* receiver missed packet */
155 /* detailed tx_errors */
156 unsigned long tx_aborted_errors
;
157 unsigned long tx_carrier_errors
;
158 unsigned long tx_fifo_errors
;
159 unsigned long tx_heartbeat_errors
;
160 unsigned long tx_window_errors
;
163 unsigned long rx_compressed
;
164 unsigned long tx_compressed
;
168 /* Media selection options. */
181 #include <linux/cache.h>
182 #include <linux/skbuff.h>
188 struct netif_rx_stats
192 unsigned time_squeeze
;
193 unsigned cpu_collision
;
196 DECLARE_PER_CPU(struct netif_rx_stats
, netdev_rx_stat
);
200 struct dev_addr_list
*next
;
201 u8 da_addr
[MAX_ADDR_LEN
];
209 * We tag multicasts with these structures.
212 #define dev_mc_list dev_addr_list
213 #define dmi_addr da_addr
214 #define dmi_addrlen da_addrlen
215 #define dmi_users da_users
216 #define dmi_gusers da_gusers
220 struct hh_cache
*hh_next
; /* Next entry */
221 atomic_t hh_refcnt
; /* number of users */
223 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
225 * They are mostly read, but hh_refcnt may be changed quite frequently,
226 * incurring cache line ping pongs.
228 __be16 hh_type ____cacheline_aligned_in_smp
;
229 /* protocol identifier, f.e ETH_P_IP
230 * NOTE: For VLANs, this will be the
231 * encapuslated type. --BLG
233 u16 hh_len
; /* length of header */
234 int (*hh_output
)(struct sk_buff
*skb
);
237 /* cached hardware header; allow for machine alignment needs. */
238 #define HH_DATA_MOD 16
239 #define HH_DATA_OFF(__len) \
240 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
241 #define HH_DATA_ALIGN(__len) \
242 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
243 unsigned long hh_data
[HH_DATA_ALIGN(LL_MAX_HEADER
) / sizeof(long)];
246 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
248 * dev->hard_header_len ? (dev->hard_header_len +
249 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
251 * We could use other alignment values, but we must maintain the
252 * relationship HH alignment <= LL alignment.
254 * LL_ALLOCATED_SPACE also takes into account the tailroom the device
257 #define LL_RESERVED_SPACE(dev) \
258 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
259 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
260 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
261 #define LL_ALLOCATED_SPACE(dev) \
262 ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
265 int (*create
) (struct sk_buff
*skb
, struct net_device
*dev
,
266 unsigned short type
, const void *daddr
,
267 const void *saddr
, unsigned len
);
268 int (*parse
)(const struct sk_buff
*skb
, unsigned char *haddr
);
269 int (*rebuild
)(struct sk_buff
*skb
);
270 #define HAVE_HEADER_CACHE
271 int (*cache
)(const struct neighbour
*neigh
, struct hh_cache
*hh
);
272 void (*cache_update
)(struct hh_cache
*hh
,
273 const struct net_device
*dev
,
274 const unsigned char *haddr
);
277 /* These flag bits are private to the generic network queueing
278 * layer, they may not be explicitly referenced by any other
286 __LINK_STATE_PRESENT
,
288 __LINK_STATE_NOCARRIER
,
289 __LINK_STATE_LINKWATCH_PENDING
,
290 __LINK_STATE_DORMANT
,
291 __LINK_STATE_QDISC_RUNNING
,
296 * This structure holds at boot time configured netdevice settings. They
297 * are then used in the device probing.
299 struct netdev_boot_setup
{
303 #define NETDEV_BOOT_SETUP_MAX 8
305 extern int __init
netdev_boot_setup(char *str
);
308 * Structure for NAPI scheduling similar to tasklet but with weighting
311 /* The poll_list must only be managed by the entity which
312 * changes the state of the NAPI_STATE_SCHED bit. This means
313 * whoever atomically sets that bit can add this napi_struct
314 * to the per-cpu poll_list, and whoever clears that bit
315 * can remove from the list right before clearing the bit.
317 struct list_head poll_list
;
321 int (*poll
)(struct napi_struct
*, int);
322 #ifdef CONFIG_NETPOLL
323 spinlock_t poll_lock
;
325 struct net_device
*dev
;
326 struct list_head dev_list
;
332 NAPI_STATE_SCHED
, /* Poll is scheduled */
333 NAPI_STATE_DISABLE
, /* Disable pending */
336 extern void __napi_schedule(struct napi_struct
*n
);
338 static inline int napi_disable_pending(struct napi_struct
*n
)
340 return test_bit(NAPI_STATE_DISABLE
, &n
->state
);
344 * napi_schedule_prep - check if napi can be scheduled
347 * Test if NAPI routine is already running, and if not mark
348 * it as running. This is used as a condition variable
349 * insure only one NAPI poll instance runs. We also make
350 * sure there is no pending NAPI disable.
352 static inline int napi_schedule_prep(struct napi_struct
*n
)
354 return !napi_disable_pending(n
) &&
355 !test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
);
359 * napi_schedule - schedule NAPI poll
362 * Schedule NAPI poll routine to be called if it is not already
365 static inline void napi_schedule(struct napi_struct
*n
)
367 if (napi_schedule_prep(n
))
371 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
372 static inline int napi_reschedule(struct napi_struct
*napi
)
374 if (napi_schedule_prep(napi
)) {
375 __napi_schedule(napi
);
382 * napi_complete - NAPI processing complete
385 * Mark NAPI processing as complete.
387 static inline void __napi_complete(struct napi_struct
*n
)
389 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &n
->state
));
390 list_del(&n
->poll_list
);
391 smp_mb__before_clear_bit();
392 clear_bit(NAPI_STATE_SCHED
, &n
->state
);
395 static inline void napi_complete(struct napi_struct
*n
)
399 local_irq_save(flags
);
401 local_irq_restore(flags
);
405 * napi_disable - prevent NAPI from scheduling
408 * Stop NAPI from being scheduled on this context.
409 * Waits till any outstanding processing completes.
411 static inline void napi_disable(struct napi_struct
*n
)
413 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
414 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
416 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
420 * napi_enable - enable NAPI scheduling
423 * Resume NAPI from being scheduled on this context.
424 * Must be paired with napi_disable.
426 static inline void napi_enable(struct napi_struct
*n
)
428 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &n
->state
));
429 smp_mb__before_clear_bit();
430 clear_bit(NAPI_STATE_SCHED
, &n
->state
);
435 * napi_synchronize - wait until NAPI is not running
438 * Wait until NAPI is done being scheduled on this context.
439 * Waits till any outstanding processing completes but
440 * does not disable future activations.
442 static inline void napi_synchronize(const struct napi_struct
*n
)
444 while (test_bit(NAPI_STATE_SCHED
, &n
->state
))
448 # define napi_synchronize(n) barrier()
451 struct netdev_queue
{
453 struct net_device
*dev
;
455 struct sk_buff
*gso_skb
;
456 struct Qdisc
*qdisc_sleeping
;
457 struct list_head qdisc_list
;
458 struct netdev_queue
*next_sched
;
462 * The DEVICE structure.
463 * Actually, this whole structure is a big mistake. It mixes I/O
464 * data with strictly "high-level" data, and it has to know about
465 * almost every data structure used in the INET module.
467 * FIXME: cleanup struct net_device such that network protocol info
475 * This is the first field of the "visible" part of this structure
476 * (i.e. as seen by users in the "Space.c" file). It is the name
480 /* device name hash chain */
481 struct hlist_node name_hlist
;
484 * I/O specific fields
485 * FIXME: Merge these and struct ifmap into one
487 unsigned long mem_end
; /* shared mem end */
488 unsigned long mem_start
; /* shared mem start */
489 unsigned long base_addr
; /* device I/O address */
490 unsigned int irq
; /* device IRQ number */
493 * Some hardware also needs these fields, but they are not
494 * part of the usual set specified in Space.c.
497 unsigned char if_port
; /* Selectable AUI, TP,..*/
498 unsigned char dma
; /* DMA channel */
502 struct list_head dev_list
;
503 #ifdef CONFIG_NETPOLL
504 struct list_head napi_list
;
507 /* The device initialization function. Called only once. */
508 int (*init
)(struct net_device
*dev
);
510 /* ------- Fields preinitialized in Space.c finish here ------- */
512 /* Net device features */
513 unsigned long features
;
514 #define NETIF_F_SG 1 /* Scatter/gather IO. */
515 #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
516 #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
517 #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
518 #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
519 #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
520 #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
521 #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
522 #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
523 #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
524 #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
525 #define NETIF_F_GSO 2048 /* Enable software GSO. */
526 #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
527 /* do not use LLTX in new drivers */
528 #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
529 #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
530 #define NETIF_F_LRO 32768 /* large receive offload */
532 /* Segmentation offload features */
533 #define NETIF_F_GSO_SHIFT 16
534 #define NETIF_F_GSO_MASK 0xffff0000
535 #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
536 #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
537 #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
538 #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
539 #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
541 /* List of features with software fallbacks. */
542 #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
545 #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
546 #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
547 #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
548 #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
550 /* Interface index. Unique device identifier */
555 struct net_device_stats
* (*get_stats
)(struct net_device
*dev
);
556 struct net_device_stats stats
;
558 #ifdef CONFIG_WIRELESS_EXT
559 /* List of functions to handle Wireless Extensions (instead of ioctl).
560 * See <net/iw_handler.h> for details. Jean II */
561 const struct iw_handler_def
* wireless_handlers
;
562 /* Instance data managed by the core of Wireless Extensions. */
563 struct iw_public_data
* wireless_data
;
565 const struct ethtool_ops
*ethtool_ops
;
567 /* Hardware header description */
568 const struct header_ops
*header_ops
;
571 * This marks the end of the "visible" part of the structure. All
572 * fields hereafter are internal to the system, and may change at
573 * will (read: may be cleaned up at will).
577 unsigned int flags
; /* interface flags (a la BSD) */
578 unsigned short gflags
;
579 unsigned short priv_flags
; /* Like 'flags' but invisible to userspace. */
580 unsigned short padded
; /* How much padding added by alloc_netdev() */
582 unsigned char operstate
; /* RFC2863 operstate */
583 unsigned char link_mode
; /* mapping policy to operstate */
585 unsigned mtu
; /* interface MTU value */
586 unsigned short type
; /* interface hardware type */
587 unsigned short hard_header_len
; /* hardware hdr length */
589 /* extra head- and tailroom the hardware may need, but not in all cases
590 * can this be guaranteed, especially tailroom. Some cases also use
591 * LL_MAX_HEADER instead to allocate the skb.
593 unsigned short needed_headroom
;
594 unsigned short needed_tailroom
;
596 struct net_device
*master
; /* Pointer to master device of a group,
597 * which this device is member of.
600 /* Interface address info. */
601 unsigned char perm_addr
[MAX_ADDR_LEN
]; /* permanent hw address */
602 unsigned char addr_len
; /* hardware address length */
603 unsigned short dev_id
; /* for shared network cards */
605 struct dev_addr_list
*uc_list
; /* Secondary unicast mac addresses */
606 int uc_count
; /* Number of installed ucasts */
608 struct dev_addr_list
*mc_list
; /* Multicast mac addresses */
609 int mc_count
; /* Number of installed mcasts */
610 unsigned int promiscuity
;
611 unsigned int allmulti
;
614 /* Protocol specific pointers */
616 void *atalk_ptr
; /* AppleTalk link */
617 void *ip_ptr
; /* IPv4 specific data */
618 void *dn_ptr
; /* DECnet specific data */
619 void *ip6_ptr
; /* IPv6 specific data */
620 void *ec_ptr
; /* Econet specific data */
621 void *ax25_ptr
; /* AX.25 specific data */
622 struct wireless_dev
*ieee80211_ptr
; /* IEEE 802.11 specific data,
623 assign before registering */
626 * Cache line mostly used on receive path (including eth_type_trans())
628 unsigned long last_rx
; /* Time of last Rx */
629 /* Interface address info used in eth_type_trans() */
630 unsigned char dev_addr
[MAX_ADDR_LEN
]; /* hw address, (before bcast
631 because most packets are unicast) */
633 unsigned char broadcast
[MAX_ADDR_LEN
]; /* hw bcast add */
635 struct netdev_queue rx_queue
;
636 struct netdev_queue tx_queue ____cacheline_aligned_in_smp
;
637 unsigned long tx_queue_len
; /* Max frames per queue allowed */
640 * One part is mostly used on xmit path (device)
642 /* hard_start_xmit synchronizer */
643 spinlock_t _xmit_lock ____cacheline_aligned_in_smp
;
644 /* cpu id of processor entered to hard_start_xmit or -1,
645 if nobody entered there.
648 void *priv
; /* pointer to private data */
649 int (*hard_start_xmit
) (struct sk_buff
*skb
,
650 struct net_device
*dev
);
651 /* These may be needed for future network-power-down code. */
652 unsigned long trans_start
; /* Time (in jiffies) of last Tx */
654 int watchdog_timeo
; /* used by dev_watchdog() */
655 struct timer_list watchdog_timer
;
658 * refcnt is a very hot point, so align it on SMP
660 /* Number of references to this device */
661 atomic_t refcnt ____cacheline_aligned_in_smp
;
663 /* delayed register/unregister */
664 struct list_head todo_list
;
665 /* device index hash chain */
666 struct hlist_node index_hlist
;
668 struct net_device
*link_watch_next
;
670 /* register/unregister state machine */
671 enum { NETREG_UNINITIALIZED
=0,
672 NETREG_REGISTERED
, /* completed register_netdevice */
673 NETREG_UNREGISTERING
, /* called unregister_netdevice */
674 NETREG_UNREGISTERED
, /* completed unregister todo */
675 NETREG_RELEASED
, /* called free_netdev */
678 /* Called after device is detached from network. */
679 void (*uninit
)(struct net_device
*dev
);
680 /* Called after last user reference disappears. */
681 void (*destructor
)(struct net_device
*dev
);
683 /* Pointers to interface service routines. */
684 int (*open
)(struct net_device
*dev
);
685 int (*stop
)(struct net_device
*dev
);
686 #define HAVE_NETDEV_POLL
687 #define HAVE_CHANGE_RX_FLAGS
688 void (*change_rx_flags
)(struct net_device
*dev
,
690 #define HAVE_SET_RX_MODE
691 void (*set_rx_mode
)(struct net_device
*dev
);
692 #define HAVE_MULTICAST
693 void (*set_multicast_list
)(struct net_device
*dev
);
694 #define HAVE_SET_MAC_ADDR
695 int (*set_mac_address
)(struct net_device
*dev
,
697 #define HAVE_VALIDATE_ADDR
698 int (*validate_addr
)(struct net_device
*dev
);
699 #define HAVE_PRIVATE_IOCTL
700 int (*do_ioctl
)(struct net_device
*dev
,
701 struct ifreq
*ifr
, int cmd
);
702 #define HAVE_SET_CONFIG
703 int (*set_config
)(struct net_device
*dev
,
705 #define HAVE_CHANGE_MTU
706 int (*change_mtu
)(struct net_device
*dev
, int new_mtu
);
708 #define HAVE_TX_TIMEOUT
709 void (*tx_timeout
) (struct net_device
*dev
);
711 void (*vlan_rx_register
)(struct net_device
*dev
,
712 struct vlan_group
*grp
);
713 void (*vlan_rx_add_vid
)(struct net_device
*dev
,
715 void (*vlan_rx_kill_vid
)(struct net_device
*dev
,
718 int (*neigh_setup
)(struct net_device
*dev
, struct neigh_parms
*);
719 #ifdef CONFIG_NETPOLL
720 struct netpoll_info
*npinfo
;
722 #ifdef CONFIG_NET_POLL_CONTROLLER
723 void (*poll_controller
)(struct net_device
*dev
);
727 /* Network namespace this network device is inside */
731 /* mid-layer private */
735 struct net_bridge_port
*br_port
;
737 struct macvlan_port
*macvlan_port
;
739 struct garp_port
*garp_port
;
741 /* class/net/name entry */
743 /* space for optional statistics and wireless sysfs groups */
744 struct attribute_group
*sysfs_groups
[3];
746 /* rtnetlink link ops */
747 const struct rtnl_link_ops
*rtnl_link_ops
;
749 /* VLAN feature mask */
750 unsigned long vlan_features
;
752 /* for setting kernel sock attribute on TCP connection setup */
753 #define GSO_MAX_SIZE 65536
754 unsigned int gso_max_size
;
756 /* The TX queue control structures */
757 unsigned int egress_subqueue_count
;
758 struct net_device_subqueue egress_subqueue
[1];
760 #define to_net_dev(d) container_of(d, struct net_device, dev)
762 #define NETDEV_ALIGN 32
763 #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
766 * Net namespace inlines
769 struct net
*dev_net(const struct net_device
*dev
)
779 void dev_net_set(struct net_device
*dev
, struct net
*net
)
782 release_net(dev
->nd_net
);
783 dev
->nd_net
= hold_net(net
);
788 * netdev_priv - access network device private data
789 * @dev: network device
791 * Get network device private data
793 static inline void *netdev_priv(const struct net_device
*dev
)
798 /* Set the sysfs physical device reference for the network logical device
799 * if set prior to registration will cause a symlink during initialization.
801 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
804 * netif_napi_add - initialize a napi context
805 * @dev: network device
806 * @napi: napi context
807 * @poll: polling function
808 * @weight: default weight
810 * netif_napi_add() must be used to initialize a napi context prior to calling
811 * *any* of the other napi related functions.
813 static inline void netif_napi_add(struct net_device
*dev
,
814 struct napi_struct
*napi
,
815 int (*poll
)(struct napi_struct
*, int),
818 INIT_LIST_HEAD(&napi
->poll_list
);
820 napi
->weight
= weight
;
821 #ifdef CONFIG_NETPOLL
823 list_add(&napi
->dev_list
, &dev
->napi_list
);
824 spin_lock_init(&napi
->poll_lock
);
825 napi
->poll_owner
= -1;
827 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
831 __be16 type
; /* This is really htons(ether_type). */
832 struct net_device
*dev
; /* NULL is wildcarded here */
833 int (*func
) (struct sk_buff
*,
835 struct packet_type
*,
836 struct net_device
*);
837 struct sk_buff
*(*gso_segment
)(struct sk_buff
*skb
,
839 int (*gso_send_check
)(struct sk_buff
*skb
);
840 void *af_packet_priv
;
841 struct list_head list
;
844 #include <linux/interrupt.h>
845 #include <linux/notifier.h>
847 extern rwlock_t dev_base_lock
; /* Device list lock */
850 #define for_each_netdev(net, d) \
851 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
852 #define for_each_netdev_safe(net, d, n) \
853 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
854 #define for_each_netdev_continue(net, d) \
855 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
856 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
858 static inline struct net_device
*next_net_device(struct net_device
*dev
)
860 struct list_head
*lh
;
864 lh
= dev
->dev_list
.next
;
865 return lh
== &net
->dev_base_head
? NULL
: net_device_entry(lh
);
868 static inline struct net_device
*first_net_device(struct net
*net
)
870 return list_empty(&net
->dev_base_head
) ? NULL
:
871 net_device_entry(net
->dev_base_head
.next
);
874 extern int netdev_boot_setup_check(struct net_device
*dev
);
875 extern unsigned long netdev_boot_base(const char *prefix
, int unit
);
876 extern struct net_device
*dev_getbyhwaddr(struct net
*net
, unsigned short type
, char *hwaddr
);
877 extern struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
);
878 extern struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
);
879 extern void dev_add_pack(struct packet_type
*pt
);
880 extern void dev_remove_pack(struct packet_type
*pt
);
881 extern void __dev_remove_pack(struct packet_type
*pt
);
883 extern struct net_device
*dev_get_by_flags(struct net
*net
, unsigned short flags
,
884 unsigned short mask
);
885 extern struct net_device
*dev_get_by_name(struct net
*net
, const char *name
);
886 extern struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
);
887 extern int dev_alloc_name(struct net_device
*dev
, const char *name
);
888 extern int dev_open(struct net_device
*dev
);
889 extern int dev_close(struct net_device
*dev
);
890 extern void dev_disable_lro(struct net_device
*dev
);
891 extern int dev_queue_xmit(struct sk_buff
*skb
);
892 extern int register_netdevice(struct net_device
*dev
);
893 extern void unregister_netdevice(struct net_device
*dev
);
894 extern void free_netdev(struct net_device
*dev
);
895 extern void synchronize_net(void);
896 extern int register_netdevice_notifier(struct notifier_block
*nb
);
897 extern int unregister_netdevice_notifier(struct notifier_block
*nb
);
898 extern int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
);
899 extern struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
);
900 extern struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
);
901 extern int dev_restart(struct net_device
*dev
);
902 #ifdef CONFIG_NETPOLL_TRAP
903 extern int netpoll_trap(void);
906 static inline int dev_hard_header(struct sk_buff
*skb
, struct net_device
*dev
,
908 const void *daddr
, const void *saddr
,
911 if (!dev
->header_ops
|| !dev
->header_ops
->create
)
914 return dev
->header_ops
->create(skb
, dev
, type
, daddr
, saddr
, len
);
917 static inline int dev_parse_header(const struct sk_buff
*skb
,
918 unsigned char *haddr
)
920 const struct net_device
*dev
= skb
->dev
;
922 if (!dev
->header_ops
|| !dev
->header_ops
->parse
)
924 return dev
->header_ops
->parse(skb
, haddr
);
927 typedef int gifconf_func_t(struct net_device
* dev
, char __user
* bufptr
, int len
);
928 extern int register_gifconf(unsigned int family
, gifconf_func_t
* gifconf
);
929 static inline int unregister_gifconf(unsigned int family
)
931 return register_gifconf(family
, NULL
);
935 * Incoming packets are placed on per-cpu queues so that
936 * no locking is needed.
940 struct netdev_queue
*output_queue
;
941 struct sk_buff_head input_pkt_queue
;
942 struct list_head poll_list
;
943 struct sk_buff
*completion_queue
;
945 struct napi_struct backlog
;
946 #ifdef CONFIG_NET_DMA
947 struct dma_chan
*net_dma
;
951 DECLARE_PER_CPU(struct softnet_data
,softnet_data
);
953 #define HAVE_NETIF_QUEUE
955 extern void __netif_schedule(struct netdev_queue
*txq
);
957 static inline void netif_schedule_queue(struct netdev_queue
*txq
)
959 struct net_device
*dev
= txq
->dev
;
961 if (!test_bit(__LINK_STATE_XOFF
, &dev
->state
))
962 __netif_schedule(txq
);
965 static inline void netif_schedule(struct net_device
*dev
)
967 netif_schedule_queue(&dev
->tx_queue
);
971 * netif_start_queue - allow transmit
972 * @dev: network device
974 * Allow upper layers to call the device hard_start_xmit routine.
976 static inline void netif_start_queue(struct net_device
*dev
)
978 clear_bit(__LINK_STATE_XOFF
, &dev
->state
);
982 * netif_wake_queue - restart transmit
983 * @dev: network device
985 * Allow upper layers to call the device hard_start_xmit routine.
986 * Used for flow control when transmit resources are available.
988 static inline void netif_wake_queue(struct net_device
*dev
)
990 #ifdef CONFIG_NETPOLL_TRAP
991 if (netpoll_trap()) {
992 clear_bit(__LINK_STATE_XOFF
, &dev
->state
);
996 if (test_and_clear_bit(__LINK_STATE_XOFF
, &dev
->state
))
997 __netif_schedule(&dev
->tx_queue
);
1001 * netif_stop_queue - stop transmitted packets
1002 * @dev: network device
1004 * Stop upper layers calling the device hard_start_xmit routine.
1005 * Used for flow control when transmit resources are unavailable.
1007 static inline void netif_stop_queue(struct net_device
*dev
)
1009 set_bit(__LINK_STATE_XOFF
, &dev
->state
);
1013 * netif_queue_stopped - test if transmit queue is flowblocked
1014 * @dev: network device
1016 * Test if transmit queue on device is currently unable to send.
1018 static inline int netif_queue_stopped(const struct net_device
*dev
)
1020 return test_bit(__LINK_STATE_XOFF
, &dev
->state
);
1024 * netif_running - test if up
1025 * @dev: network device
1027 * Test if the device has been brought up.
1029 static inline int netif_running(const struct net_device
*dev
)
1031 return test_bit(__LINK_STATE_START
, &dev
->state
);
1035 * Routines to manage the subqueues on a device. We only need start
1036 * stop, and a check if it's stopped. All other device management is
1037 * done at the overall netdevice level.
1038 * Also test the device if we're multiqueue.
1042 * netif_start_subqueue - allow sending packets on subqueue
1043 * @dev: network device
1044 * @queue_index: sub queue index
1046 * Start individual transmit queue of a device with multiple transmit queues.
1048 static inline void netif_start_subqueue(struct net_device
*dev
, u16 queue_index
)
1050 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
1051 clear_bit(__LINK_STATE_XOFF
, &dev
->egress_subqueue
[queue_index
].state
);
1056 * netif_stop_subqueue - stop sending packets on subqueue
1057 * @dev: network device
1058 * @queue_index: sub queue index
1060 * Stop individual transmit queue of a device with multiple transmit queues.
1062 static inline void netif_stop_subqueue(struct net_device
*dev
, u16 queue_index
)
1064 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
1065 #ifdef CONFIG_NETPOLL_TRAP
1069 set_bit(__LINK_STATE_XOFF
, &dev
->egress_subqueue
[queue_index
].state
);
1074 * netif_subqueue_stopped - test status of subqueue
1075 * @dev: network device
1076 * @queue_index: sub queue index
1078 * Check individual transmit queue of a device with multiple transmit queues.
1080 static inline int __netif_subqueue_stopped(const struct net_device
*dev
,
1083 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
1084 return test_bit(__LINK_STATE_XOFF
,
1085 &dev
->egress_subqueue
[queue_index
].state
);
1091 static inline int netif_subqueue_stopped(const struct net_device
*dev
,
1092 struct sk_buff
*skb
)
1094 return __netif_subqueue_stopped(dev
, skb_get_queue_mapping(skb
));
1098 * netif_wake_subqueue - allow sending packets on subqueue
1099 * @dev: network device
1100 * @queue_index: sub queue index
1102 * Resume individual transmit queue of a device with multiple transmit queues.
1104 static inline void netif_wake_subqueue(struct net_device
*dev
, u16 queue_index
)
1106 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
1107 #ifdef CONFIG_NETPOLL_TRAP
1111 if (test_and_clear_bit(__LINK_STATE_XOFF
,
1112 &dev
->egress_subqueue
[queue_index
].state
))
1113 __netif_schedule(&dev
->tx_queue
);
1118 * netif_is_multiqueue - test if device has multiple transmit queues
1119 * @dev: network device
1121 * Check if device has multiple transmit queues
1122 * Always falls if NETDEVICE_MULTIQUEUE is not configured
1124 static inline int netif_is_multiqueue(const struct net_device
*dev
)
1126 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
1127 return (!!(NETIF_F_MULTI_QUEUE
& dev
->features
));
1133 /* Use this variant when it is known for sure that it
1134 * is executing from hardware interrupt context or with hardware interrupts
1137 extern void dev_kfree_skb_irq(struct sk_buff
*skb
);
1139 /* Use this variant in places where it could be invoked
1140 * from either hardware interrupt or other context, with hardware interrupts
1141 * either disabled or enabled.
1143 extern void dev_kfree_skb_any(struct sk_buff
*skb
);
1145 #define HAVE_NETIF_RX 1
1146 extern int netif_rx(struct sk_buff
*skb
);
1147 extern int netif_rx_ni(struct sk_buff
*skb
);
1148 #define HAVE_NETIF_RECEIVE_SKB 1
1149 extern int netif_receive_skb(struct sk_buff
*skb
);
1150 extern int dev_valid_name(const char *name
);
1151 extern int dev_ioctl(struct net
*net
, unsigned int cmd
, void __user
*);
1152 extern int dev_ethtool(struct net
*net
, struct ifreq
*);
1153 extern unsigned dev_get_flags(const struct net_device
*);
1154 extern int dev_change_flags(struct net_device
*, unsigned);
1155 extern int dev_change_name(struct net_device
*, char *);
1156 extern int dev_change_net_namespace(struct net_device
*,
1157 struct net
*, const char *);
1158 extern int dev_set_mtu(struct net_device
*, int);
1159 extern int dev_set_mac_address(struct net_device
*,
1161 extern int dev_hard_start_xmit(struct sk_buff
*skb
,
1162 struct net_device
*dev
);
1164 extern int netdev_budget
;
1166 /* Called by rtnetlink.c:rtnl_unlock() */
1167 extern void netdev_run_todo(void);
1170 * dev_put - release reference to device
1171 * @dev: network device
1173 * Release reference to device to allow it to be freed.
1175 static inline void dev_put(struct net_device
*dev
)
1177 atomic_dec(&dev
->refcnt
);
1181 * dev_hold - get reference to device
1182 * @dev: network device
1184 * Hold reference to device to keep it from being freed.
1186 static inline void dev_hold(struct net_device
*dev
)
1188 atomic_inc(&dev
->refcnt
);
1191 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
1192 * and _off may be called from IRQ context, but it is caller
1193 * who is responsible for serialization of these calls.
1195 * The name carrier is inappropriate, these functions should really be
1196 * called netif_lowerlayer_*() because they represent the state of any
1197 * kind of lower layer not just hardware media.
1200 extern void linkwatch_fire_event(struct net_device
*dev
);
1203 * netif_carrier_ok - test if carrier present
1204 * @dev: network device
1206 * Check if carrier is present on device
1208 static inline int netif_carrier_ok(const struct net_device
*dev
)
1210 return !test_bit(__LINK_STATE_NOCARRIER
, &dev
->state
);
1213 extern void __netdev_watchdog_up(struct net_device
*dev
);
1215 extern void netif_carrier_on(struct net_device
*dev
);
1217 extern void netif_carrier_off(struct net_device
*dev
);
1220 * netif_dormant_on - mark device as dormant.
1221 * @dev: network device
1223 * Mark device as dormant (as per RFC2863).
1225 * The dormant state indicates that the relevant interface is not
1226 * actually in a condition to pass packets (i.e., it is not 'up') but is
1227 * in a "pending" state, waiting for some external event. For "on-
1228 * demand" interfaces, this new state identifies the situation where the
1229 * interface is waiting for events to place it in the up state.
1232 static inline void netif_dormant_on(struct net_device
*dev
)
1234 if (!test_and_set_bit(__LINK_STATE_DORMANT
, &dev
->state
))
1235 linkwatch_fire_event(dev
);
1239 * netif_dormant_off - set device as not dormant.
1240 * @dev: network device
1242 * Device is not in dormant state.
1244 static inline void netif_dormant_off(struct net_device
*dev
)
1246 if (test_and_clear_bit(__LINK_STATE_DORMANT
, &dev
->state
))
1247 linkwatch_fire_event(dev
);
1251 * netif_dormant - test if carrier present
1252 * @dev: network device
1254 * Check if carrier is present on device
1256 static inline int netif_dormant(const struct net_device
*dev
)
1258 return test_bit(__LINK_STATE_DORMANT
, &dev
->state
);
1263 * netif_oper_up - test if device is operational
1264 * @dev: network device
1266 * Check if carrier is operational
1268 static inline int netif_oper_up(const struct net_device
*dev
) {
1269 return (dev
->operstate
== IF_OPER_UP
||
1270 dev
->operstate
== IF_OPER_UNKNOWN
/* backward compat */);
1274 * netif_device_present - is device available or removed
1275 * @dev: network device
1277 * Check if device has not been removed from system.
1279 static inline int netif_device_present(struct net_device
*dev
)
1281 return test_bit(__LINK_STATE_PRESENT
, &dev
->state
);
1284 extern void netif_device_detach(struct net_device
*dev
);
1286 extern void netif_device_attach(struct net_device
*dev
);
1289 * Network interface message level settings
1291 #define HAVE_NETIF_MSG 1
1294 NETIF_MSG_DRV
= 0x0001,
1295 NETIF_MSG_PROBE
= 0x0002,
1296 NETIF_MSG_LINK
= 0x0004,
1297 NETIF_MSG_TIMER
= 0x0008,
1298 NETIF_MSG_IFDOWN
= 0x0010,
1299 NETIF_MSG_IFUP
= 0x0020,
1300 NETIF_MSG_RX_ERR
= 0x0040,
1301 NETIF_MSG_TX_ERR
= 0x0080,
1302 NETIF_MSG_TX_QUEUED
= 0x0100,
1303 NETIF_MSG_INTR
= 0x0200,
1304 NETIF_MSG_TX_DONE
= 0x0400,
1305 NETIF_MSG_RX_STATUS
= 0x0800,
1306 NETIF_MSG_PKTDATA
= 0x1000,
1307 NETIF_MSG_HW
= 0x2000,
1308 NETIF_MSG_WOL
= 0x4000,
1311 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
1312 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
1313 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
1314 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
1315 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
1316 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
1317 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
1318 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
1319 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
1320 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
1321 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
1322 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
1323 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
1324 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
1325 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
1327 static inline u32
netif_msg_init(int debug_value
, int default_msg_enable_bits
)
1330 if (debug_value
< 0 || debug_value
>= (sizeof(u32
) * 8))
1331 return default_msg_enable_bits
;
1332 if (debug_value
== 0) /* no output */
1334 /* set low N bits */
1335 return (1 << debug_value
) - 1;
1338 /* Test if receive needs to be scheduled but only if up */
1339 static inline int netif_rx_schedule_prep(struct net_device
*dev
,
1340 struct napi_struct
*napi
)
1342 return napi_schedule_prep(napi
);
1345 /* Add interface to tail of rx poll list. This assumes that _prep has
1346 * already been called and returned 1.
1348 static inline void __netif_rx_schedule(struct net_device
*dev
,
1349 struct napi_struct
*napi
)
1351 __napi_schedule(napi
);
1354 /* Try to reschedule poll. Called by irq handler. */
1356 static inline void netif_rx_schedule(struct net_device
*dev
,
1357 struct napi_struct
*napi
)
1359 if (netif_rx_schedule_prep(dev
, napi
))
1360 __netif_rx_schedule(dev
, napi
);
1363 /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
1364 static inline int netif_rx_reschedule(struct net_device
*dev
,
1365 struct napi_struct
*napi
)
1367 if (napi_schedule_prep(napi
)) {
1368 __netif_rx_schedule(dev
, napi
);
1374 /* same as netif_rx_complete, except that local_irq_save(flags)
1375 * has already been issued
1377 static inline void __netif_rx_complete(struct net_device
*dev
,
1378 struct napi_struct
*napi
)
1380 __napi_complete(napi
);
1383 /* Remove interface from poll list: it must be in the poll list
1384 * on current cpu. This primitive is called by dev->poll(), when
1385 * it completes the work. The device cannot be out of poll list at this
1386 * moment, it is BUG().
1388 static inline void netif_rx_complete(struct net_device
*dev
,
1389 struct napi_struct
*napi
)
1391 unsigned long flags
;
1393 local_irq_save(flags
);
1394 __netif_rx_complete(dev
, napi
);
1395 local_irq_restore(flags
);
1399 * netif_tx_lock - grab network device transmit lock
1400 * @dev: network device
1401 * @cpu: cpu number of lock owner
1403 * Get network device transmit lock
1405 static inline void __netif_tx_lock(struct net_device
*dev
, int cpu
)
1407 spin_lock(&dev
->_xmit_lock
);
1408 dev
->xmit_lock_owner
= cpu
;
1411 static inline void netif_tx_lock(struct net_device
*dev
)
1413 __netif_tx_lock(dev
, smp_processor_id());
1416 static inline void netif_tx_lock_bh(struct net_device
*dev
)
1418 spin_lock_bh(&dev
->_xmit_lock
);
1419 dev
->xmit_lock_owner
= smp_processor_id();
1422 static inline int netif_tx_trylock(struct net_device
*dev
)
1424 int ok
= spin_trylock(&dev
->_xmit_lock
);
1426 dev
->xmit_lock_owner
= smp_processor_id();
1430 static inline void netif_tx_unlock(struct net_device
*dev
)
1432 dev
->xmit_lock_owner
= -1;
1433 spin_unlock(&dev
->_xmit_lock
);
1436 static inline void netif_tx_unlock_bh(struct net_device
*dev
)
1438 dev
->xmit_lock_owner
= -1;
1439 spin_unlock_bh(&dev
->_xmit_lock
);
1442 #define HARD_TX_LOCK(dev, cpu) { \
1443 if ((dev->features & NETIF_F_LLTX) == 0) { \
1444 __netif_tx_lock(dev, cpu); \
1448 #define HARD_TX_UNLOCK(dev) { \
1449 if ((dev->features & NETIF_F_LLTX) == 0) { \
1450 netif_tx_unlock(dev); \
1454 static inline void netif_tx_disable(struct net_device
*dev
)
1456 netif_tx_lock_bh(dev
);
1457 netif_stop_queue(dev
);
1458 netif_tx_unlock_bh(dev
);
1461 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
1463 extern void ether_setup(struct net_device
*dev
);
1465 /* Support for loadable net-drivers */
1466 extern struct net_device
*alloc_netdev_mq(int sizeof_priv
, const char *name
,
1467 void (*setup
)(struct net_device
*),
1468 unsigned int queue_count
);
1469 #define alloc_netdev(sizeof_priv, name, setup) \
1470 alloc_netdev_mq(sizeof_priv, name, setup, 1)
1471 extern int register_netdev(struct net_device
*dev
);
1472 extern void unregister_netdev(struct net_device
*dev
);
1473 /* Functions used for secondary unicast and multicast support */
1474 extern void dev_set_rx_mode(struct net_device
*dev
);
1475 extern void __dev_set_rx_mode(struct net_device
*dev
);
1476 extern int dev_unicast_delete(struct net_device
*dev
, void *addr
, int alen
);
1477 extern int dev_unicast_add(struct net_device
*dev
, void *addr
, int alen
);
1478 extern int dev_unicast_sync(struct net_device
*to
, struct net_device
*from
);
1479 extern void dev_unicast_unsync(struct net_device
*to
, struct net_device
*from
);
1480 extern int dev_mc_delete(struct net_device
*dev
, void *addr
, int alen
, int all
);
1481 extern int dev_mc_add(struct net_device
*dev
, void *addr
, int alen
, int newonly
);
1482 extern int dev_mc_sync(struct net_device
*to
, struct net_device
*from
);
1483 extern void dev_mc_unsync(struct net_device
*to
, struct net_device
*from
);
1484 extern int __dev_addr_delete(struct dev_addr_list
**list
, int *count
, void *addr
, int alen
, int all
);
1485 extern int __dev_addr_add(struct dev_addr_list
**list
, int *count
, void *addr
, int alen
, int newonly
);
1486 extern int __dev_addr_sync(struct dev_addr_list
**to
, int *to_count
, struct dev_addr_list
**from
, int *from_count
);
1487 extern void __dev_addr_unsync(struct dev_addr_list
**to
, int *to_count
, struct dev_addr_list
**from
, int *from_count
);
1488 extern int dev_set_promiscuity(struct net_device
*dev
, int inc
);
1489 extern int dev_set_allmulti(struct net_device
*dev
, int inc
);
1490 extern void netdev_state_change(struct net_device
*dev
);
1491 extern void netdev_bonding_change(struct net_device
*dev
);
1492 extern void netdev_features_change(struct net_device
*dev
);
1493 /* Load a device via the kmod */
1494 extern void dev_load(struct net
*net
, const char *name
);
1495 extern void dev_mcast_init(void);
1496 extern int netdev_max_backlog
;
1497 extern int weight_p
;
1498 extern int netdev_set_master(struct net_device
*dev
, struct net_device
*master
);
1499 extern int skb_checksum_help(struct sk_buff
*skb
);
1500 extern struct sk_buff
*skb_gso_segment(struct sk_buff
*skb
, int features
);
1502 extern void netdev_rx_csum_fault(struct net_device
*dev
);
1504 static inline void netdev_rx_csum_fault(struct net_device
*dev
)
1508 /* rx skb timestamps */
1509 extern void net_enable_timestamp(void);
1510 extern void net_disable_timestamp(void);
1512 #ifdef CONFIG_PROC_FS
1513 extern void *dev_seq_start(struct seq_file
*seq
, loff_t
*pos
);
1514 extern void *dev_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
);
1515 extern void dev_seq_stop(struct seq_file
*seq
, void *v
);
1518 extern int netdev_class_create_file(struct class_attribute
*class_attr
);
1519 extern void netdev_class_remove_file(struct class_attribute
*class_attr
);
1521 extern void linkwatch_run_queue(void);
1523 extern int netdev_compute_features(unsigned long all
, unsigned long one
);
1525 static inline int net_gso_ok(int features
, int gso_type
)
1527 int feature
= gso_type
<< NETIF_F_GSO_SHIFT
;
1528 return (features
& feature
) == feature
;
1531 static inline int skb_gso_ok(struct sk_buff
*skb
, int features
)
1533 return net_gso_ok(features
, skb_shinfo(skb
)->gso_type
);
1536 static inline int netif_needs_gso(struct net_device
*dev
, struct sk_buff
*skb
)
1538 return skb_is_gso(skb
) &&
1539 (!skb_gso_ok(skb
, dev
->features
) ||
1540 unlikely(skb
->ip_summed
!= CHECKSUM_PARTIAL
));
1543 static inline void netif_set_gso_max_size(struct net_device
*dev
,
1546 dev
->gso_max_size
= size
;
1549 /* On bonding slaves other than the currently active slave, suppress
1550 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1551 * ARP on active-backup slaves with arp_validate enabled.
1553 static inline int skb_bond_should_drop(struct sk_buff
*skb
)
1555 struct net_device
*dev
= skb
->dev
;
1556 struct net_device
*master
= dev
->master
;
1559 (dev
->priv_flags
& IFF_SLAVE_INACTIVE
)) {
1560 if ((dev
->priv_flags
& IFF_SLAVE_NEEDARP
) &&
1561 skb
->protocol
== __constant_htons(ETH_P_ARP
))
1564 if (master
->priv_flags
& IFF_MASTER_ALB
) {
1565 if (skb
->pkt_type
!= PACKET_BROADCAST
&&
1566 skb
->pkt_type
!= PACKET_MULTICAST
)
1569 if (master
->priv_flags
& IFF_MASTER_8023AD
&&
1570 skb
->protocol
== __constant_htons(ETH_P_SLOW
))
1578 #endif /* __KERNEL__ */
1580 #endif /* _LINUX_DEV_H */