2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the Interfaces handler.
8 * Version: @(#)dev.h 1.0.10 08/12/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <Alan.Cox@linux.org>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
23 * Moved to /usr/include/linux for NET3
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
29 #include <linux/if_ether.h>
30 #include <linux/if_packet.h>
33 #include <linux/timer.h>
34 #include <linux/delay.h>
35 #include <asm/atomic.h>
36 #include <asm/cache.h>
37 #include <asm/byteorder.h>
39 #include <linux/device.h>
40 #include <linux/percpu.h>
41 #include <linux/dmaengine.h>
42 #include <linux/workqueue.h>
50 /* source back-compat hooks */
51 #define SET_ETHTOOL_OPS(netdev,ops) \
52 ( (netdev)->ethtool_ops = (ops) )
54 #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
55 functions are available. */
56 #define HAVE_FREE_NETDEV /* free_netdev() */
57 #define HAVE_NETDEV_PRIV /* netdev_priv() */
59 #define NET_XMIT_SUCCESS 0
60 #define NET_XMIT_DROP 1 /* skb dropped */
61 #define NET_XMIT_CN 2 /* congestion notification */
62 #define NET_XMIT_POLICED 3 /* skb is shot by police */
63 #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
64 (TC use only - dev_queue_xmit
65 returns this as NET_XMIT_SUCCESS) */
67 /* Backlog congestion levels */
68 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
69 #define NET_RX_DROP 1 /* packet dropped */
70 #define NET_RX_CN_LOW 2 /* storm alert, just in case */
71 #define NET_RX_CN_MOD 3 /* Storm on its way! */
72 #define NET_RX_CN_HIGH 4 /* The storm is here */
73 #define NET_RX_BAD 5 /* packet dropped due to kernel error */
75 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
76 * indicates that the device will soon be dropping packets, or already drops
77 * some packets of the same priority; prompting us to send less aggressively. */
78 #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
79 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
83 #define MAX_ADDR_LEN 32 /* Largest hardware address length */
85 /* Driver transmit return codes */
86 #define NETDEV_TX_OK 0 /* driver took care of packet */
87 #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
88 #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
91 * Compute the worst case header length according to the protocols
95 #if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR)
96 #define LL_MAX_HEADER 32
98 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
99 #define LL_MAX_HEADER 96
101 #define LL_MAX_HEADER 48
105 #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
106 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
107 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
108 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
109 #define MAX_HEADER LL_MAX_HEADER
111 #define MAX_HEADER (LL_MAX_HEADER + 48)
114 struct net_device_subqueue
116 /* Give a control state for each queue. This struct may contain
117 * per-queue locks in the future.
123 * Network device statistics. Akin to the 2.0 ether stats but
124 * with byte counters.
127 struct net_device_stats
129 unsigned long rx_packets
; /* total packets received */
130 unsigned long tx_packets
; /* total packets transmitted */
131 unsigned long rx_bytes
; /* total bytes received */
132 unsigned long tx_bytes
; /* total bytes transmitted */
133 unsigned long rx_errors
; /* bad packets received */
134 unsigned long tx_errors
; /* packet transmit problems */
135 unsigned long rx_dropped
; /* no space in linux buffers */
136 unsigned long tx_dropped
; /* no space available in linux */
137 unsigned long multicast
; /* multicast packets received */
138 unsigned long collisions
;
140 /* detailed rx_errors: */
141 unsigned long rx_length_errors
;
142 unsigned long rx_over_errors
; /* receiver ring buff overflow */
143 unsigned long rx_crc_errors
; /* recved pkt with crc error */
144 unsigned long rx_frame_errors
; /* recv'd frame alignment error */
145 unsigned long rx_fifo_errors
; /* recv'r fifo overrun */
146 unsigned long rx_missed_errors
; /* receiver missed packet */
148 /* detailed tx_errors */
149 unsigned long tx_aborted_errors
;
150 unsigned long tx_carrier_errors
;
151 unsigned long tx_fifo_errors
;
152 unsigned long tx_heartbeat_errors
;
153 unsigned long tx_window_errors
;
156 unsigned long rx_compressed
;
157 unsigned long tx_compressed
;
161 /* Media selection options. */
174 #include <linux/cache.h>
175 #include <linux/skbuff.h>
181 struct netif_rx_stats
185 unsigned time_squeeze
;
186 unsigned cpu_collision
;
189 DECLARE_PER_CPU(struct netif_rx_stats
, netdev_rx_stat
);
193 struct dev_addr_list
*next
;
194 u8 da_addr
[MAX_ADDR_LEN
];
202 * We tag multicasts with these structures.
205 #define dev_mc_list dev_addr_list
206 #define dmi_addr da_addr
207 #define dmi_addrlen da_addrlen
208 #define dmi_users da_users
209 #define dmi_gusers da_gusers
213 struct hh_cache
*hh_next
; /* Next entry */
214 atomic_t hh_refcnt
; /* number of users */
216 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
218 * They are mostly read, but hh_refcnt may be changed quite frequently,
219 * incurring cache line ping pongs.
221 __be16 hh_type ____cacheline_aligned_in_smp
;
222 /* protocol identifier, f.e ETH_P_IP
223 * NOTE: For VLANs, this will be the
224 * encapuslated type. --BLG
226 u16 hh_len
; /* length of header */
227 int (*hh_output
)(struct sk_buff
*skb
);
230 /* cached hardware header; allow for machine alignment needs. */
231 #define HH_DATA_MOD 16
232 #define HH_DATA_OFF(__len) \
233 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
234 #define HH_DATA_ALIGN(__len) \
235 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
236 unsigned long hh_data
[HH_DATA_ALIGN(LL_MAX_HEADER
) / sizeof(long)];
239 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
241 * dev->hard_header_len ? (dev->hard_header_len +
242 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
244 * We could use other alignment values, but we must maintain the
245 * relationship HH alignment <= LL alignment.
247 #define LL_RESERVED_SPACE(dev) \
248 (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
249 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
250 ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
252 /* These flag bits are private to the generic network queueing
253 * layer, they may not be explicitly referenced by any other
261 __LINK_STATE_PRESENT
,
263 __LINK_STATE_NOCARRIER
,
264 __LINK_STATE_LINKWATCH_PENDING
,
265 __LINK_STATE_DORMANT
,
266 __LINK_STATE_QDISC_RUNNING
,
271 * This structure holds at boot time configured netdevice settings. They
272 * are then used in the device probing.
274 struct netdev_boot_setup
{
278 #define NETDEV_BOOT_SETUP_MAX 8
280 extern int __init
netdev_boot_setup(char *str
);
283 * Structure for NAPI scheduling similar to tasklet but with weighting
286 /* The poll_list must only be managed by the entity which
287 * changes the state of the NAPI_STATE_SCHED bit. This means
288 * whoever atomically sets that bit can add this napi_struct
289 * to the per-cpu poll_list, and whoever clears that bit
290 * can remove from the list right before clearing the bit.
292 struct list_head poll_list
;
296 int (*poll
)(struct napi_struct
*, int);
297 #ifdef CONFIG_NETPOLL
298 spinlock_t poll_lock
;
300 struct net_device
*dev
;
301 struct list_head dev_list
;
307 NAPI_STATE_SCHED
, /* Poll is scheduled */
310 extern void FASTCALL(__napi_schedule(struct napi_struct
*n
));
313 * napi_schedule_prep - check if napi can be scheduled
316 * Test if NAPI routine is already running, and if not mark
317 * it as running. This is used as a condition variable
318 * insure only one NAPI poll instance runs
320 static inline int napi_schedule_prep(struct napi_struct
*n
)
322 return !test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
);
326 * napi_schedule - schedule NAPI poll
329 * Schedule NAPI poll routine to be called if it is not already
332 static inline void napi_schedule(struct napi_struct
*n
)
334 if (napi_schedule_prep(n
))
339 * napi_complete - NAPI processing complete
342 * Mark NAPI processing as complete.
344 static inline void __napi_complete(struct napi_struct
*n
)
346 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &n
->state
));
347 list_del(&n
->poll_list
);
348 smp_mb__before_clear_bit();
349 clear_bit(NAPI_STATE_SCHED
, &n
->state
);
352 static inline void napi_complete(struct napi_struct
*n
)
360 * napi_disable - prevent NAPI from scheduling
363 * Stop NAPI from being scheduled on this context.
364 * Waits till any outstanding processing completes.
366 static inline void napi_disable(struct napi_struct
*n
)
368 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
369 msleep_interruptible(1);
373 * napi_enable - enable NAPI scheduling
376 * Resume NAPI from being scheduled on this context.
377 * Must be paired with napi_disable.
379 static inline void napi_enable(struct napi_struct
*n
)
381 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &n
->state
));
382 smp_mb__before_clear_bit();
383 clear_bit(NAPI_STATE_SCHED
, &n
->state
);
387 * The DEVICE structure.
388 * Actually, this whole structure is a big mistake. It mixes I/O
389 * data with strictly "high-level" data, and it has to know about
390 * almost every data structure used in the INET module.
392 * FIXME: cleanup struct net_device such that network protocol info
400 * This is the first field of the "visible" part of this structure
401 * (i.e. as seen by users in the "Space.c" file). It is the name
405 /* device name hash chain */
406 struct hlist_node name_hlist
;
409 * I/O specific fields
410 * FIXME: Merge these and struct ifmap into one
412 unsigned long mem_end
; /* shared mem end */
413 unsigned long mem_start
; /* shared mem start */
414 unsigned long base_addr
; /* device I/O address */
415 unsigned int irq
; /* device IRQ number */
418 * Some hardware also needs these fields, but they are not
419 * part of the usual set specified in Space.c.
422 unsigned char if_port
; /* Selectable AUI, TP,..*/
423 unsigned char dma
; /* DMA channel */
427 struct list_head dev_list
;
428 #ifdef CONFIG_NETPOLL
429 struct list_head napi_list
;
432 /* The device initialization function. Called only once. */
433 int (*init
)(struct net_device
*dev
);
435 /* ------- Fields preinitialized in Space.c finish here ------- */
437 /* Net device features */
438 unsigned long features
;
439 #define NETIF_F_SG 1 /* Scatter/gather IO. */
440 #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
441 #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
442 #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
443 #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
444 #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
445 #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
446 #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
447 #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
448 #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
449 #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
450 #define NETIF_F_GSO 2048 /* Enable software GSO. */
451 #define NETIF_F_LLTX 4096 /* LockLess TX */
452 #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
453 #define NETIF_F_LRO 32768 /* large receive offload */
455 /* Segmentation offload features */
456 #define NETIF_F_GSO_SHIFT 16
457 #define NETIF_F_GSO_MASK 0xffff0000
458 #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
459 #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
460 #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
461 #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
462 #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
464 /* List of features with software fallbacks. */
465 #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
468 #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
469 #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
470 #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
471 #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
473 struct net_device
*next_sched
;
475 /* Interface index. Unique device identifier */
480 struct net_device_stats
* (*get_stats
)(struct net_device
*dev
);
481 struct net_device_stats stats
;
483 #ifdef CONFIG_WIRELESS_EXT
484 /* List of functions to handle Wireless Extensions (instead of ioctl).
485 * See <net/iw_handler.h> for details. Jean II */
486 const struct iw_handler_def
* wireless_handlers
;
487 /* Instance data managed by the core of Wireless Extensions. */
488 struct iw_public_data
* wireless_data
;
490 const struct ethtool_ops
*ethtool_ops
;
493 * This marks the end of the "visible" part of the structure. All
494 * fields hereafter are internal to the system, and may change at
495 * will (read: may be cleaned up at will).
499 unsigned int flags
; /* interface flags (a la BSD) */
500 unsigned short gflags
;
501 unsigned short priv_flags
; /* Like 'flags' but invisible to userspace. */
502 unsigned short padded
; /* How much padding added by alloc_netdev() */
504 unsigned char operstate
; /* RFC2863 operstate */
505 unsigned char link_mode
; /* mapping policy to operstate */
507 unsigned mtu
; /* interface MTU value */
508 unsigned short type
; /* interface hardware type */
509 unsigned short hard_header_len
; /* hardware hdr length */
511 struct net_device
*master
; /* Pointer to master device of a group,
512 * which this device is member of.
515 /* Interface address info. */
516 unsigned char perm_addr
[MAX_ADDR_LEN
]; /* permanent hw address */
517 unsigned char addr_len
; /* hardware address length */
518 unsigned short dev_id
; /* for shared network cards */
520 struct dev_addr_list
*uc_list
; /* Secondary unicast mac addresses */
521 int uc_count
; /* Number of installed ucasts */
523 struct dev_addr_list
*mc_list
; /* Multicast mac addresses */
524 int mc_count
; /* Number of installed mcasts */
529 /* Protocol specific pointers */
531 void *atalk_ptr
; /* AppleTalk link */
532 void *ip_ptr
; /* IPv4 specific data */
533 void *dn_ptr
; /* DECnet specific data */
534 void *ip6_ptr
; /* IPv6 specific data */
535 void *ec_ptr
; /* Econet specific data */
536 void *ax25_ptr
; /* AX.25 specific data */
537 struct wireless_dev
*ieee80211_ptr
; /* IEEE 802.11 specific data,
538 assign before registering */
541 * Cache line mostly used on receive path (including eth_type_trans())
543 unsigned long last_rx
; /* Time of last Rx */
544 /* Interface address info used in eth_type_trans() */
545 unsigned char dev_addr
[MAX_ADDR_LEN
]; /* hw address, (before bcast
546 because most packets are unicast) */
548 unsigned char broadcast
[MAX_ADDR_LEN
]; /* hw bcast add */
551 * Cache line mostly used on queue transmit path (qdisc)
553 /* device queue lock */
554 spinlock_t queue_lock ____cacheline_aligned_in_smp
;
556 struct Qdisc
*qdisc_sleeping
;
557 struct list_head qdisc_list
;
558 unsigned long tx_queue_len
; /* Max frames per queue allowed */
560 /* Partially transmitted GSO packet. */
561 struct sk_buff
*gso_skb
;
563 /* ingress path synchronizer */
564 spinlock_t ingress_lock
;
565 struct Qdisc
*qdisc_ingress
;
568 * One part is mostly used on xmit path (device)
570 /* hard_start_xmit synchronizer */
571 spinlock_t _xmit_lock ____cacheline_aligned_in_smp
;
572 /* cpu id of processor entered to hard_start_xmit or -1,
573 if nobody entered there.
576 void *priv
; /* pointer to private data */
577 int (*hard_start_xmit
) (struct sk_buff
*skb
,
578 struct net_device
*dev
);
579 /* These may be needed for future network-power-down code. */
580 unsigned long trans_start
; /* Time (in jiffies) of last Tx */
582 int watchdog_timeo
; /* used by dev_watchdog() */
583 struct timer_list watchdog_timer
;
586 * refcnt is a very hot point, so align it on SMP
588 /* Number of references to this device */
589 atomic_t refcnt ____cacheline_aligned_in_smp
;
591 /* delayed register/unregister */
592 struct list_head todo_list
;
593 /* device index hash chain */
594 struct hlist_node index_hlist
;
596 struct net_device
*link_watch_next
;
598 /* register/unregister state machine */
599 enum { NETREG_UNINITIALIZED
=0,
600 NETREG_REGISTERED
, /* completed register_netdevice */
601 NETREG_UNREGISTERING
, /* called unregister_netdevice */
602 NETREG_UNREGISTERED
, /* completed unregister todo */
603 NETREG_RELEASED
, /* called free_netdev */
606 /* Called after device is detached from network. */
607 void (*uninit
)(struct net_device
*dev
);
608 /* Called after last user reference disappears. */
609 void (*destructor
)(struct net_device
*dev
);
611 /* Pointers to interface service routines. */
612 int (*open
)(struct net_device
*dev
);
613 int (*stop
)(struct net_device
*dev
);
614 #define HAVE_NETDEV_POLL
615 int (*hard_header
) (struct sk_buff
*skb
,
616 struct net_device
*dev
,
621 int (*rebuild_header
)(struct sk_buff
*skb
);
622 #define HAVE_CHANGE_RX_FLAGS
623 void (*change_rx_flags
)(struct net_device
*dev
,
625 #define HAVE_SET_RX_MODE
626 void (*set_rx_mode
)(struct net_device
*dev
);
627 #define HAVE_MULTICAST
628 void (*set_multicast_list
)(struct net_device
*dev
);
629 #define HAVE_SET_MAC_ADDR
630 int (*set_mac_address
)(struct net_device
*dev
,
632 #define HAVE_PRIVATE_IOCTL
633 int (*do_ioctl
)(struct net_device
*dev
,
634 struct ifreq
*ifr
, int cmd
);
635 #define HAVE_SET_CONFIG
636 int (*set_config
)(struct net_device
*dev
,
638 #define HAVE_HEADER_CACHE
639 int (*hard_header_cache
)(struct neighbour
*neigh
,
640 struct hh_cache
*hh
);
641 void (*header_cache_update
)(struct hh_cache
*hh
,
642 struct net_device
*dev
,
643 unsigned char * haddr
);
644 #define HAVE_CHANGE_MTU
645 int (*change_mtu
)(struct net_device
*dev
, int new_mtu
);
647 #define HAVE_TX_TIMEOUT
648 void (*tx_timeout
) (struct net_device
*dev
);
650 void (*vlan_rx_register
)(struct net_device
*dev
,
651 struct vlan_group
*grp
);
652 void (*vlan_rx_add_vid
)(struct net_device
*dev
,
654 void (*vlan_rx_kill_vid
)(struct net_device
*dev
,
657 int (*hard_header_parse
)(struct sk_buff
*skb
,
658 unsigned char *haddr
);
659 int (*neigh_setup
)(struct net_device
*dev
, struct neigh_parms
*);
660 #ifdef CONFIG_NETPOLL
661 struct netpoll_info
*npinfo
;
663 #ifdef CONFIG_NET_POLL_CONTROLLER
664 void (*poll_controller
)(struct net_device
*dev
);
667 /* Network namespace this network device is inside */
671 struct net_bridge_port
*br_port
;
673 struct macvlan_port
*macvlan_port
;
675 /* class/net/name entry */
677 /* space for optional statistics and wireless sysfs groups */
678 struct attribute_group
*sysfs_groups
[3];
680 /* rtnetlink link ops */
681 const struct rtnl_link_ops
*rtnl_link_ops
;
683 /* The TX queue control structures */
684 unsigned int egress_subqueue_count
;
685 struct net_device_subqueue egress_subqueue
[1];
687 #define to_net_dev(d) container_of(d, struct net_device, dev)
689 #define NETDEV_ALIGN 32
690 #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
693 * netdev_priv - access network device private data
694 * @dev: network device
696 * Get network device private data
698 static inline void *netdev_priv(const struct net_device
*dev
)
703 #define SET_MODULE_OWNER(dev) do { } while (0)
704 /* Set the sysfs physical device reference for the network logical device
705 * if set prior to registration will cause a symlink during initialization.
707 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
709 static inline void netif_napi_add(struct net_device
*dev
,
710 struct napi_struct
*napi
,
711 int (*poll
)(struct napi_struct
*, int),
714 INIT_LIST_HEAD(&napi
->poll_list
);
716 napi
->weight
= weight
;
717 #ifdef CONFIG_NETPOLL
719 list_add(&napi
->dev_list
, &dev
->napi_list
);
720 spin_lock_init(&napi
->poll_lock
);
721 napi
->poll_owner
= -1;
723 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
727 __be16 type
; /* This is really htons(ether_type). */
728 struct net_device
*dev
; /* NULL is wildcarded here */
729 int (*func
) (struct sk_buff
*,
731 struct packet_type
*,
732 struct net_device
*);
733 struct sk_buff
*(*gso_segment
)(struct sk_buff
*skb
,
735 int (*gso_send_check
)(struct sk_buff
*skb
);
736 void *af_packet_priv
;
737 struct list_head list
;
740 #include <linux/interrupt.h>
741 #include <linux/notifier.h>
743 extern struct net_device loopback_dev
; /* The loopback */
744 extern rwlock_t dev_base_lock
; /* Device list lock */
747 #define for_each_netdev(net, d) \
748 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
749 #define for_each_netdev_safe(net, d, n) \
750 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
751 #define for_each_netdev_continue(net, d) \
752 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
753 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
755 #define next_net_device(d) \
757 struct net_device *dev = d; \
758 struct list_head *lh; \
762 lh = dev->dev_list.next; \
763 lh == &net->dev_base_head ? NULL : net_device_entry(lh); \
766 #define first_net_device(N) \
768 struct net *NET = (N); \
769 list_empty(&NET->dev_base_head) ? NULL : \
770 net_device_entry(NET->dev_base_head.next); \
773 extern int netdev_boot_setup_check(struct net_device
*dev
);
774 extern unsigned long netdev_boot_base(const char *prefix
, int unit
);
775 extern struct net_device
*dev_getbyhwaddr(struct net
*net
, unsigned short type
, char *hwaddr
);
776 extern struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
);
777 extern struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
);
778 extern void dev_add_pack(struct packet_type
*pt
);
779 extern void dev_remove_pack(struct packet_type
*pt
);
780 extern void __dev_remove_pack(struct packet_type
*pt
);
782 extern struct net_device
*dev_get_by_flags(struct net
*net
, unsigned short flags
,
783 unsigned short mask
);
784 extern struct net_device
*dev_get_by_name(struct net
*net
, const char *name
);
785 extern struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
);
786 extern int dev_alloc_name(struct net_device
*dev
, const char *name
);
787 extern int dev_open(struct net_device
*dev
);
788 extern int dev_close(struct net_device
*dev
);
789 extern int dev_queue_xmit(struct sk_buff
*skb
);
790 extern int register_netdevice(struct net_device
*dev
);
791 extern void unregister_netdevice(struct net_device
*dev
);
792 extern void free_netdev(struct net_device
*dev
);
793 extern void synchronize_net(void);
794 extern int register_netdevice_notifier(struct notifier_block
*nb
);
795 extern int unregister_netdevice_notifier(struct notifier_block
*nb
);
796 extern int call_netdevice_notifiers(unsigned long val
, void *v
);
797 extern struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
);
798 extern struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
);
799 extern int dev_restart(struct net_device
*dev
);
800 #ifdef CONFIG_NETPOLL_TRAP
801 extern int netpoll_trap(void);
804 typedef int gifconf_func_t(struct net_device
* dev
, char __user
* bufptr
, int len
);
805 extern int register_gifconf(unsigned int family
, gifconf_func_t
* gifconf
);
806 static inline int unregister_gifconf(unsigned int family
)
808 return register_gifconf(family
, NULL
);
812 * Incoming packets are placed on per-cpu queues so that
813 * no locking is needed.
817 struct net_device
*output_queue
;
818 struct sk_buff_head input_pkt_queue
;
819 struct list_head poll_list
;
820 struct sk_buff
*completion_queue
;
822 struct napi_struct backlog
;
823 #ifdef CONFIG_NET_DMA
824 struct dma_chan
*net_dma
;
828 DECLARE_PER_CPU(struct softnet_data
,softnet_data
);
830 #define HAVE_NETIF_QUEUE
832 extern void __netif_schedule(struct net_device
*dev
);
834 static inline void netif_schedule(struct net_device
*dev
)
836 if (!test_bit(__LINK_STATE_XOFF
, &dev
->state
))
837 __netif_schedule(dev
);
841 * netif_start_queue - allow transmit
842 * @dev: network device
844 * Allow upper layers to call the device hard_start_xmit routine.
846 static inline void netif_start_queue(struct net_device
*dev
)
848 clear_bit(__LINK_STATE_XOFF
, &dev
->state
);
852 * netif_wake_queue - restart transmit
853 * @dev: network device
855 * Allow upper layers to call the device hard_start_xmit routine.
856 * Used for flow control when transmit resources are available.
858 static inline void netif_wake_queue(struct net_device
*dev
)
860 #ifdef CONFIG_NETPOLL_TRAP
861 if (netpoll_trap()) {
862 clear_bit(__LINK_STATE_XOFF
, &dev
->state
);
866 if (test_and_clear_bit(__LINK_STATE_XOFF
, &dev
->state
))
867 __netif_schedule(dev
);
871 * netif_stop_queue - stop transmitted packets
872 * @dev: network device
874 * Stop upper layers calling the device hard_start_xmit routine.
875 * Used for flow control when transmit resources are unavailable.
877 static inline void netif_stop_queue(struct net_device
*dev
)
879 set_bit(__LINK_STATE_XOFF
, &dev
->state
);
883 * netif_queue_stopped - test if transmit queue is flowblocked
884 * @dev: network device
886 * Test if transmit queue on device is currently unable to send.
888 static inline int netif_queue_stopped(const struct net_device
*dev
)
890 return test_bit(__LINK_STATE_XOFF
, &dev
->state
);
894 * netif_running - test if up
895 * @dev: network device
897 * Test if the device has been brought up.
899 static inline int netif_running(const struct net_device
*dev
)
901 return test_bit(__LINK_STATE_START
, &dev
->state
);
905 * Routines to manage the subqueues on a device. We only need start
906 * stop, and a check if it's stopped. All other device management is
907 * done at the overall netdevice level.
908 * Also test the device if we're multiqueue.
912 * netif_start_subqueue - allow sending packets on subqueue
913 * @dev: network device
914 * @queue_index: sub queue index
916 * Start individual transmit queue of a device with multiple transmit queues.
918 static inline void netif_start_subqueue(struct net_device
*dev
, u16 queue_index
)
920 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
921 clear_bit(__LINK_STATE_XOFF
, &dev
->egress_subqueue
[queue_index
].state
);
926 * netif_stop_subqueue - stop sending packets on subqueue
927 * @dev: network device
928 * @queue_index: sub queue index
930 * Stop individual transmit queue of a device with multiple transmit queues.
932 static inline void netif_stop_subqueue(struct net_device
*dev
, u16 queue_index
)
934 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
935 #ifdef CONFIG_NETPOLL_TRAP
939 set_bit(__LINK_STATE_XOFF
, &dev
->egress_subqueue
[queue_index
].state
);
944 * netif_subqueue_stopped - test status of subqueue
945 * @dev: network device
946 * @queue_index: sub queue index
948 * Check individual transmit queue of a device with multiple transmit queues.
950 static inline int netif_subqueue_stopped(const struct net_device
*dev
,
953 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
954 return test_bit(__LINK_STATE_XOFF
,
955 &dev
->egress_subqueue
[queue_index
].state
);
963 * netif_wake_subqueue - allow sending packets on subqueue
964 * @dev: network device
965 * @queue_index: sub queue index
967 * Resume individual transmit queue of a device with multiple transmit queues.
969 static inline void netif_wake_subqueue(struct net_device
*dev
, u16 queue_index
)
971 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
972 #ifdef CONFIG_NETPOLL_TRAP
976 if (test_and_clear_bit(__LINK_STATE_XOFF
,
977 &dev
->egress_subqueue
[queue_index
].state
))
978 __netif_schedule(dev
);
983 * netif_is_multiqueue - test if device has multiple transmit queues
984 * @dev: network device
986 * Check if device has multiple transmit queues
987 * Always falls if NETDEVICE_MULTIQUEUE is not configured
989 static inline int netif_is_multiqueue(const struct net_device
*dev
)
991 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
992 return (!!(NETIF_F_MULTI_QUEUE
& dev
->features
));
998 /* Use this variant when it is known for sure that it
999 * is executing from interrupt context.
1001 extern void dev_kfree_skb_irq(struct sk_buff
*skb
);
1003 /* Use this variant in places where it could be invoked
1004 * either from interrupt or non-interrupt context.
1006 extern void dev_kfree_skb_any(struct sk_buff
*skb
);
1008 #define HAVE_NETIF_RX 1
1009 extern int netif_rx(struct sk_buff
*skb
);
1010 extern int netif_rx_ni(struct sk_buff
*skb
);
1011 #define HAVE_NETIF_RECEIVE_SKB 1
1012 extern int netif_receive_skb(struct sk_buff
*skb
);
1013 extern int dev_valid_name(const char *name
);
1014 extern int dev_ioctl(struct net
*net
, unsigned int cmd
, void __user
*);
1015 extern int dev_ethtool(struct net
*net
, struct ifreq
*);
1016 extern unsigned dev_get_flags(const struct net_device
*);
1017 extern int dev_change_flags(struct net_device
*, unsigned);
1018 extern int dev_change_name(struct net_device
*, char *);
1019 extern int dev_set_mtu(struct net_device
*, int);
1020 extern int dev_set_mac_address(struct net_device
*,
1022 extern int dev_hard_start_xmit(struct sk_buff
*skb
,
1023 struct net_device
*dev
);
1025 extern int netdev_budget
;
1027 /* Called by rtnetlink.c:rtnl_unlock() */
1028 extern void netdev_run_todo(void);
1031 * dev_put - release reference to device
1032 * @dev: network device
1034 * Hold reference to device to keep it from being freed.
1036 static inline void dev_put(struct net_device
*dev
)
1038 atomic_dec(&dev
->refcnt
);
1042 * dev_hold - get reference to device
1043 * @dev: network device
1045 * Release reference to device to allow it to be freed.
1047 static inline void dev_hold(struct net_device
*dev
)
1049 atomic_inc(&dev
->refcnt
);
1052 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
1053 * and _off may be called from IRQ context, but it is caller
1054 * who is responsible for serialization of these calls.
1056 * The name carrier is inappropriate, these functions should really be
1057 * called netif_lowerlayer_*() because they represent the state of any
1058 * kind of lower layer not just hardware media.
1061 extern void linkwatch_fire_event(struct net_device
*dev
);
1064 * netif_carrier_ok - test if carrier present
1065 * @dev: network device
1067 * Check if carrier is present on device
1069 static inline int netif_carrier_ok(const struct net_device
*dev
)
1071 return !test_bit(__LINK_STATE_NOCARRIER
, &dev
->state
);
1074 extern void __netdev_watchdog_up(struct net_device
*dev
);
1076 extern void netif_carrier_on(struct net_device
*dev
);
1078 extern void netif_carrier_off(struct net_device
*dev
);
1081 * netif_dormant_on - mark device as dormant.
1082 * @dev: network device
1084 * Mark device as dormant (as per RFC2863).
1086 * The dormant state indicates that the relevant interface is not
1087 * actually in a condition to pass packets (i.e., it is not 'up') but is
1088 * in a "pending" state, waiting for some external event. For "on-
1089 * demand" interfaces, this new state identifies the situation where the
1090 * interface is waiting for events to place it in the up state.
1093 static inline void netif_dormant_on(struct net_device
*dev
)
1095 if (!test_and_set_bit(__LINK_STATE_DORMANT
, &dev
->state
))
1096 linkwatch_fire_event(dev
);
1100 * netif_dormant_off - set device as not dormant.
1101 * @dev: network device
1103 * Device is not in dormant state.
1105 static inline void netif_dormant_off(struct net_device
*dev
)
1107 if (test_and_clear_bit(__LINK_STATE_DORMANT
, &dev
->state
))
1108 linkwatch_fire_event(dev
);
1112 * netif_dormant - test if carrier present
1113 * @dev: network device
1115 * Check if carrier is present on device
1117 static inline int netif_dormant(const struct net_device
*dev
)
1119 return test_bit(__LINK_STATE_DORMANT
, &dev
->state
);
1124 * netif_oper_up - test if device is operational
1125 * @dev: network device
1127 * Check if carrier is operational
1129 static inline int netif_oper_up(const struct net_device
*dev
) {
1130 return (dev
->operstate
== IF_OPER_UP
||
1131 dev
->operstate
== IF_OPER_UNKNOWN
/* backward compat */);
1135 * netif_device_present - is device available or removed
1136 * @dev: network device
1138 * Check if device has not been removed from system.
1140 static inline int netif_device_present(struct net_device
*dev
)
1142 return test_bit(__LINK_STATE_PRESENT
, &dev
->state
);
1145 extern void netif_device_detach(struct net_device
*dev
);
1147 extern void netif_device_attach(struct net_device
*dev
);
1150 * Network interface message level settings
1152 #define HAVE_NETIF_MSG 1
1155 NETIF_MSG_DRV
= 0x0001,
1156 NETIF_MSG_PROBE
= 0x0002,
1157 NETIF_MSG_LINK
= 0x0004,
1158 NETIF_MSG_TIMER
= 0x0008,
1159 NETIF_MSG_IFDOWN
= 0x0010,
1160 NETIF_MSG_IFUP
= 0x0020,
1161 NETIF_MSG_RX_ERR
= 0x0040,
1162 NETIF_MSG_TX_ERR
= 0x0080,
1163 NETIF_MSG_TX_QUEUED
= 0x0100,
1164 NETIF_MSG_INTR
= 0x0200,
1165 NETIF_MSG_TX_DONE
= 0x0400,
1166 NETIF_MSG_RX_STATUS
= 0x0800,
1167 NETIF_MSG_PKTDATA
= 0x1000,
1168 NETIF_MSG_HW
= 0x2000,
1169 NETIF_MSG_WOL
= 0x4000,
1172 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
1173 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
1174 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
1175 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
1176 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
1177 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
1178 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
1179 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
1180 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
1181 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
1182 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
1183 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
1184 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
1185 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
1186 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
1188 static inline u32
netif_msg_init(int debug_value
, int default_msg_enable_bits
)
1191 if (debug_value
< 0 || debug_value
>= (sizeof(u32
) * 8))
1192 return default_msg_enable_bits
;
1193 if (debug_value
== 0) /* no output */
1195 /* set low N bits */
1196 return (1 << debug_value
) - 1;
1199 /* Test if receive needs to be scheduled but only if up */
1200 static inline int netif_rx_schedule_prep(struct net_device
*dev
,
1201 struct napi_struct
*napi
)
1203 return netif_running(dev
) && napi_schedule_prep(napi
);
1206 /* Add interface to tail of rx poll list. This assumes that _prep has
1207 * already been called and returned 1.
1209 static inline void __netif_rx_schedule(struct net_device
*dev
,
1210 struct napi_struct
*napi
)
1213 __napi_schedule(napi
);
1216 /* Try to reschedule poll. Called by irq handler. */
1218 static inline void netif_rx_schedule(struct net_device
*dev
,
1219 struct napi_struct
*napi
)
1221 if (netif_rx_schedule_prep(dev
, napi
))
1222 __netif_rx_schedule(dev
, napi
);
1225 /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
1226 static inline int netif_rx_reschedule(struct net_device
*dev
,
1227 struct napi_struct
*napi
)
1229 if (napi_schedule_prep(napi
)) {
1230 __netif_rx_schedule(dev
, napi
);
1236 /* same as netif_rx_complete, except that local_irq_save(flags)
1237 * has already been issued
1239 static inline void __netif_rx_complete(struct net_device
*dev
,
1240 struct napi_struct
*napi
)
1242 __napi_complete(napi
);
1246 /* Remove interface from poll list: it must be in the poll list
1247 * on current cpu. This primitive is called by dev->poll(), when
1248 * it completes the work. The device cannot be out of poll list at this
1249 * moment, it is BUG().
1251 static inline void netif_rx_complete(struct net_device
*dev
,
1252 struct napi_struct
*napi
)
1254 unsigned long flags
;
1256 local_irq_save(flags
);
1257 __netif_rx_complete(dev
, napi
);
1258 local_irq_restore(flags
);
1262 * netif_tx_lock - grab network device transmit lock
1263 * @dev: network device
1265 * Get network device transmit lock
1267 static inline void netif_tx_lock(struct net_device
*dev
)
1269 spin_lock(&dev
->_xmit_lock
);
1270 dev
->xmit_lock_owner
= smp_processor_id();
1273 static inline void netif_tx_lock_bh(struct net_device
*dev
)
1275 spin_lock_bh(&dev
->_xmit_lock
);
1276 dev
->xmit_lock_owner
= smp_processor_id();
1279 static inline int netif_tx_trylock(struct net_device
*dev
)
1281 int ok
= spin_trylock(&dev
->_xmit_lock
);
1283 dev
->xmit_lock_owner
= smp_processor_id();
1287 static inline void netif_tx_unlock(struct net_device
*dev
)
1289 dev
->xmit_lock_owner
= -1;
1290 spin_unlock(&dev
->_xmit_lock
);
1293 static inline void netif_tx_unlock_bh(struct net_device
*dev
)
1295 dev
->xmit_lock_owner
= -1;
1296 spin_unlock_bh(&dev
->_xmit_lock
);
1299 static inline void netif_tx_disable(struct net_device
*dev
)
1301 netif_tx_lock_bh(dev
);
1302 netif_stop_queue(dev
);
1303 netif_tx_unlock_bh(dev
);
1306 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
1308 extern void ether_setup(struct net_device
*dev
);
1310 /* Support for loadable net-drivers */
1311 extern struct net_device
*alloc_netdev_mq(int sizeof_priv
, const char *name
,
1312 void (*setup
)(struct net_device
*),
1313 unsigned int queue_count
);
1314 #define alloc_netdev(sizeof_priv, name, setup) \
1315 alloc_netdev_mq(sizeof_priv, name, setup, 1)
1316 extern int register_netdev(struct net_device
*dev
);
1317 extern void unregister_netdev(struct net_device
*dev
);
1318 /* Functions used for secondary unicast and multicast support */
1319 extern void dev_set_rx_mode(struct net_device
*dev
);
1320 extern void __dev_set_rx_mode(struct net_device
*dev
);
1321 extern int dev_unicast_delete(struct net_device
*dev
, void *addr
, int alen
);
1322 extern int dev_unicast_add(struct net_device
*dev
, void *addr
, int alen
);
1323 extern int dev_mc_delete(struct net_device
*dev
, void *addr
, int alen
, int all
);
1324 extern int dev_mc_add(struct net_device
*dev
, void *addr
, int alen
, int newonly
);
1325 extern int dev_mc_sync(struct net_device
*to
, struct net_device
*from
);
1326 extern void dev_mc_unsync(struct net_device
*to
, struct net_device
*from
);
1327 extern int __dev_addr_delete(struct dev_addr_list
**list
, int *count
, void *addr
, int alen
, int all
);
1328 extern int __dev_addr_add(struct dev_addr_list
**list
, int *count
, void *addr
, int alen
, int newonly
);
1329 extern void dev_set_promiscuity(struct net_device
*dev
, int inc
);
1330 extern void dev_set_allmulti(struct net_device
*dev
, int inc
);
1331 extern void netdev_state_change(struct net_device
*dev
);
1332 extern void netdev_features_change(struct net_device
*dev
);
1333 /* Load a device via the kmod */
1334 extern void dev_load(struct net
*net
, const char *name
);
1335 extern void dev_mcast_init(void);
1336 extern int netdev_max_backlog
;
1337 extern int weight_p
;
1338 extern int netdev_set_master(struct net_device
*dev
, struct net_device
*master
);
1339 extern int skb_checksum_help(struct sk_buff
*skb
);
1340 extern struct sk_buff
*skb_gso_segment(struct sk_buff
*skb
, int features
);
1342 extern void netdev_rx_csum_fault(struct net_device
*dev
);
1344 static inline void netdev_rx_csum_fault(struct net_device
*dev
)
1348 /* rx skb timestamps */
1349 extern void net_enable_timestamp(void);
1350 extern void net_disable_timestamp(void);
1352 #ifdef CONFIG_PROC_FS
1353 extern void *dev_seq_start(struct seq_file
*seq
, loff_t
*pos
);
1354 extern void *dev_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
);
1355 extern void dev_seq_stop(struct seq_file
*seq
, void *v
);
1358 extern void linkwatch_run_queue(void);
1360 extern int netdev_compute_features(unsigned long all
, unsigned long one
);
1362 static inline int net_gso_ok(int features
, int gso_type
)
1364 int feature
= gso_type
<< NETIF_F_GSO_SHIFT
;
1365 return (features
& feature
) == feature
;
1368 static inline int skb_gso_ok(struct sk_buff
*skb
, int features
)
1370 return net_gso_ok(features
, skb_shinfo(skb
)->gso_type
);
1373 static inline int netif_needs_gso(struct net_device
*dev
, struct sk_buff
*skb
)
1375 return skb_is_gso(skb
) &&
1376 (!skb_gso_ok(skb
, dev
->features
) ||
1377 unlikely(skb
->ip_summed
!= CHECKSUM_PARTIAL
));
1380 /* On bonding slaves other than the currently active slave, suppress
1381 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1382 * ARP on active-backup slaves with arp_validate enabled.
1384 static inline int skb_bond_should_drop(struct sk_buff
*skb
)
1386 struct net_device
*dev
= skb
->dev
;
1387 struct net_device
*master
= dev
->master
;
1390 (dev
->priv_flags
& IFF_SLAVE_INACTIVE
)) {
1391 if ((dev
->priv_flags
& IFF_SLAVE_NEEDARP
) &&
1392 skb
->protocol
== __constant_htons(ETH_P_ARP
))
1395 if (master
->priv_flags
& IFF_MASTER_ALB
) {
1396 if (skb
->pkt_type
!= PACKET_BROADCAST
&&
1397 skb
->pkt_type
!= PACKET_MULTICAST
)
1400 if (master
->priv_flags
& IFF_MASTER_8023AD
&&
1401 skb
->protocol
== __constant_htons(ETH_P_SLOW
))
1409 #endif /* __KERNEL__ */
1411 #endif /* _LINUX_DEV_H */