2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/module.h>
108 #include <linux/netpoll.h>
109 #include <linux/rcupdate.h>
110 #include <linux/delay.h>
111 #include <net/iw_handler.h>
112 #include <asm/current.h>
113 #include <linux/audit.h>
114 #include <linux/dmaengine.h>
115 #include <linux/err.h>
116 #include <linux/ctype.h>
117 #include <linux/if_arp.h>
118 #include <linux/if_vlan.h>
119 #include <linux/ip.h>
121 #include <net/mpls.h>
122 #include <linux/ipv6.h>
123 #include <linux/in.h>
124 #include <linux/jhash.h>
125 #include <linux/random.h>
126 #include <trace/events/napi.h>
127 #include <trace/events/net.h>
128 #include <trace/events/skb.h>
129 #include <linux/pci.h>
130 #include <linux/inetdevice.h>
131 #include <linux/cpu_rmap.h>
132 #include <linux/static_key.h>
133 #include <linux/hashtable.h>
134 #include <linux/vmalloc.h>
135 #include <linux/if_macvlan.h>
136 #include <linux/errqueue.h>
137 #include <linux/hrtimer.h>
138 #include <linux/netfilter_ingress.h>
140 #include "net-sysfs.h"
142 /* Instead of increasing this, you should create a hash table. */
143 #define MAX_GRO_SKBS 8
145 /* This should be increased if a protocol with a bigger head is added. */
146 #define GRO_MAX_HEAD (MAX_HEADER + 128)
148 static DEFINE_SPINLOCK(ptype_lock
);
149 static DEFINE_SPINLOCK(offload_lock
);
150 struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
151 struct list_head ptype_all __read_mostly
; /* Taps */
152 static struct list_head offload_base __read_mostly
;
154 static int netif_rx_internal(struct sk_buff
*skb
);
155 static int call_netdevice_notifiers_info(unsigned long val
,
156 struct net_device
*dev
,
157 struct netdev_notifier_info
*info
);
160 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
163 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
165 * Writers must hold the rtnl semaphore while they loop through the
166 * dev_base_head list, and hold dev_base_lock for writing when they do the
167 * actual updates. This allows pure readers to access the list even
168 * while a writer is preparing to update it.
170 * To put it another way, dev_base_lock is held for writing only to
171 * protect against pure readers; the rtnl semaphore provides the
172 * protection against other writers.
174 * See, for example usages, register_netdevice() and
175 * unregister_netdevice(), which must be called with the rtnl
178 DEFINE_RWLOCK(dev_base_lock
);
179 EXPORT_SYMBOL(dev_base_lock
);
181 /* protects napi_hash addition/deletion and napi_gen_id */
182 static DEFINE_SPINLOCK(napi_hash_lock
);
184 static unsigned int napi_gen_id
;
185 static DEFINE_HASHTABLE(napi_hash
, 8);
187 static seqcount_t devnet_rename_seq
;
189 static inline void dev_base_seq_inc(struct net
*net
)
191 while (++net
->dev_base_seq
== 0);
194 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
196 unsigned int hash
= full_name_hash(name
, strnlen(name
, IFNAMSIZ
));
198 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
201 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
203 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
206 static inline void rps_lock(struct softnet_data
*sd
)
209 spin_lock(&sd
->input_pkt_queue
.lock
);
213 static inline void rps_unlock(struct softnet_data
*sd
)
216 spin_unlock(&sd
->input_pkt_queue
.lock
);
220 /* Device list insertion */
221 static void list_netdevice(struct net_device
*dev
)
223 struct net
*net
= dev_net(dev
);
227 write_lock_bh(&dev_base_lock
);
228 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
229 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
230 hlist_add_head_rcu(&dev
->index_hlist
,
231 dev_index_hash(net
, dev
->ifindex
));
232 write_unlock_bh(&dev_base_lock
);
234 dev_base_seq_inc(net
);
237 /* Device list removal
238 * caller must respect a RCU grace period before freeing/reusing dev
240 static void unlist_netdevice(struct net_device
*dev
)
244 /* Unlink dev from the device chain */
245 write_lock_bh(&dev_base_lock
);
246 list_del_rcu(&dev
->dev_list
);
247 hlist_del_rcu(&dev
->name_hlist
);
248 hlist_del_rcu(&dev
->index_hlist
);
249 write_unlock_bh(&dev_base_lock
);
251 dev_base_seq_inc(dev_net(dev
));
258 static RAW_NOTIFIER_HEAD(netdev_chain
);
261 * Device drivers call our routines to queue packets here. We empty the
262 * queue in the local softnet handler.
265 DEFINE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
266 EXPORT_PER_CPU_SYMBOL(softnet_data
);
268 #ifdef CONFIG_LOCKDEP
270 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
271 * according to dev->type
273 static const unsigned short netdev_lock_type
[] =
274 {ARPHRD_NETROM
, ARPHRD_ETHER
, ARPHRD_EETHER
, ARPHRD_AX25
,
275 ARPHRD_PRONET
, ARPHRD_CHAOS
, ARPHRD_IEEE802
, ARPHRD_ARCNET
,
276 ARPHRD_APPLETLK
, ARPHRD_DLCI
, ARPHRD_ATM
, ARPHRD_METRICOM
,
277 ARPHRD_IEEE1394
, ARPHRD_EUI64
, ARPHRD_INFINIBAND
, ARPHRD_SLIP
,
278 ARPHRD_CSLIP
, ARPHRD_SLIP6
, ARPHRD_CSLIP6
, ARPHRD_RSRVD
,
279 ARPHRD_ADAPT
, ARPHRD_ROSE
, ARPHRD_X25
, ARPHRD_HWX25
,
280 ARPHRD_PPP
, ARPHRD_CISCO
, ARPHRD_LAPB
, ARPHRD_DDCMP
,
281 ARPHRD_RAWHDLC
, ARPHRD_TUNNEL
, ARPHRD_TUNNEL6
, ARPHRD_FRAD
,
282 ARPHRD_SKIP
, ARPHRD_LOOPBACK
, ARPHRD_LOCALTLK
, ARPHRD_FDDI
,
283 ARPHRD_BIF
, ARPHRD_SIT
, ARPHRD_IPDDP
, ARPHRD_IPGRE
,
284 ARPHRD_PIMREG
, ARPHRD_HIPPI
, ARPHRD_ASH
, ARPHRD_ECONET
,
285 ARPHRD_IRDA
, ARPHRD_FCPP
, ARPHRD_FCAL
, ARPHRD_FCPL
,
286 ARPHRD_FCFABRIC
, ARPHRD_IEEE80211
, ARPHRD_IEEE80211_PRISM
,
287 ARPHRD_IEEE80211_RADIOTAP
, ARPHRD_PHONET
, ARPHRD_PHONET_PIPE
,
288 ARPHRD_IEEE802154
, ARPHRD_VOID
, ARPHRD_NONE
};
290 static const char *const netdev_lock_name
[] =
291 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
292 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
293 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
294 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
295 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
296 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
297 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
298 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
299 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
300 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
301 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
302 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
303 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
304 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
305 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
307 static struct lock_class_key netdev_xmit_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
308 static struct lock_class_key netdev_addr_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
310 static inline unsigned short netdev_lock_pos(unsigned short dev_type
)
314 for (i
= 0; i
< ARRAY_SIZE(netdev_lock_type
); i
++)
315 if (netdev_lock_type
[i
] == dev_type
)
317 /* the last key is used by default */
318 return ARRAY_SIZE(netdev_lock_type
) - 1;
321 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
322 unsigned short dev_type
)
326 i
= netdev_lock_pos(dev_type
);
327 lockdep_set_class_and_name(lock
, &netdev_xmit_lock_key
[i
],
328 netdev_lock_name
[i
]);
331 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
335 i
= netdev_lock_pos(dev
->type
);
336 lockdep_set_class_and_name(&dev
->addr_list_lock
,
337 &netdev_addr_lock_key
[i
],
338 netdev_lock_name
[i
]);
341 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
342 unsigned short dev_type
)
345 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
350 /*******************************************************************************
352 Protocol management and registration routines
354 *******************************************************************************/
357 * Add a protocol ID to the list. Now that the input handler is
358 * smarter we can dispense with all the messy stuff that used to be
361 * BEWARE!!! Protocol handlers, mangling input packets,
362 * MUST BE last in hash buckets and checking protocol handlers
363 * MUST start from promiscuous ptype_all chain in net_bh.
364 * It is true now, do not change it.
365 * Explanation follows: if protocol handler, mangling packet, will
366 * be the first on list, it is not able to sense, that packet
367 * is cloned and should be copied-on-write, so that it will
368 * change it and subsequent readers will get broken packet.
372 static inline struct list_head
*ptype_head(const struct packet_type
*pt
)
374 if (pt
->type
== htons(ETH_P_ALL
))
375 return pt
->dev
? &pt
->dev
->ptype_all
: &ptype_all
;
377 return pt
->dev
? &pt
->dev
->ptype_specific
:
378 &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
382 * dev_add_pack - add packet handler
383 * @pt: packet type declaration
385 * Add a protocol handler to the networking stack. The passed &packet_type
386 * is linked into kernel lists and may not be freed until it has been
387 * removed from the kernel lists.
389 * This call does not sleep therefore it can not
390 * guarantee all CPU's that are in middle of receiving packets
391 * will see the new packet type (until the next received packet).
394 void dev_add_pack(struct packet_type
*pt
)
396 struct list_head
*head
= ptype_head(pt
);
398 spin_lock(&ptype_lock
);
399 list_add_rcu(&pt
->list
, head
);
400 spin_unlock(&ptype_lock
);
402 EXPORT_SYMBOL(dev_add_pack
);
405 * __dev_remove_pack - remove packet handler
406 * @pt: packet type declaration
408 * Remove a protocol handler that was previously added to the kernel
409 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
410 * from the kernel lists and can be freed or reused once this function
413 * The packet type might still be in use by receivers
414 * and must not be freed until after all the CPU's have gone
415 * through a quiescent state.
417 void __dev_remove_pack(struct packet_type
*pt
)
419 struct list_head
*head
= ptype_head(pt
);
420 struct packet_type
*pt1
;
422 spin_lock(&ptype_lock
);
424 list_for_each_entry(pt1
, head
, list
) {
426 list_del_rcu(&pt
->list
);
431 pr_warn("dev_remove_pack: %p not found\n", pt
);
433 spin_unlock(&ptype_lock
);
435 EXPORT_SYMBOL(__dev_remove_pack
);
438 * dev_remove_pack - remove packet handler
439 * @pt: packet type declaration
441 * Remove a protocol handler that was previously added to the kernel
442 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
443 * from the kernel lists and can be freed or reused once this function
446 * This call sleeps to guarantee that no CPU is looking at the packet
449 void dev_remove_pack(struct packet_type
*pt
)
451 __dev_remove_pack(pt
);
455 EXPORT_SYMBOL(dev_remove_pack
);
459 * dev_add_offload - register offload handlers
460 * @po: protocol offload declaration
462 * Add protocol offload handlers to the networking stack. The passed
463 * &proto_offload is linked into kernel lists and may not be freed until
464 * it has been removed from the kernel lists.
466 * This call does not sleep therefore it can not
467 * guarantee all CPU's that are in middle of receiving packets
468 * will see the new offload handlers (until the next received packet).
470 void dev_add_offload(struct packet_offload
*po
)
472 struct packet_offload
*elem
;
474 spin_lock(&offload_lock
);
475 list_for_each_entry(elem
, &offload_base
, list
) {
476 if (po
->priority
< elem
->priority
)
479 list_add_rcu(&po
->list
, elem
->list
.prev
);
480 spin_unlock(&offload_lock
);
482 EXPORT_SYMBOL(dev_add_offload
);
485 * __dev_remove_offload - remove offload handler
486 * @po: packet offload declaration
488 * Remove a protocol offload handler that was previously added to the
489 * kernel offload handlers by dev_add_offload(). The passed &offload_type
490 * is removed from the kernel lists and can be freed or reused once this
493 * The packet type might still be in use by receivers
494 * and must not be freed until after all the CPU's have gone
495 * through a quiescent state.
497 static void __dev_remove_offload(struct packet_offload
*po
)
499 struct list_head
*head
= &offload_base
;
500 struct packet_offload
*po1
;
502 spin_lock(&offload_lock
);
504 list_for_each_entry(po1
, head
, list
) {
506 list_del_rcu(&po
->list
);
511 pr_warn("dev_remove_offload: %p not found\n", po
);
513 spin_unlock(&offload_lock
);
517 * dev_remove_offload - remove packet offload handler
518 * @po: packet offload declaration
520 * Remove a packet offload handler that was previously added to the kernel
521 * offload handlers by dev_add_offload(). The passed &offload_type is
522 * removed from the kernel lists and can be freed or reused once this
525 * This call sleeps to guarantee that no CPU is looking at the packet
528 void dev_remove_offload(struct packet_offload
*po
)
530 __dev_remove_offload(po
);
534 EXPORT_SYMBOL(dev_remove_offload
);
536 /******************************************************************************
538 Device Boot-time Settings Routines
540 *******************************************************************************/
542 /* Boot time configuration table */
543 static struct netdev_boot_setup dev_boot_setup
[NETDEV_BOOT_SETUP_MAX
];
546 * netdev_boot_setup_add - add new setup entry
547 * @name: name of the device
548 * @map: configured settings for the device
550 * Adds new setup entry to the dev_boot_setup list. The function
551 * returns 0 on error and 1 on success. This is a generic routine to
554 static int netdev_boot_setup_add(char *name
, struct ifmap
*map
)
556 struct netdev_boot_setup
*s
;
560 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
561 if (s
[i
].name
[0] == '\0' || s
[i
].name
[0] == ' ') {
562 memset(s
[i
].name
, 0, sizeof(s
[i
].name
));
563 strlcpy(s
[i
].name
, name
, IFNAMSIZ
);
564 memcpy(&s
[i
].map
, map
, sizeof(s
[i
].map
));
569 return i
>= NETDEV_BOOT_SETUP_MAX
? 0 : 1;
573 * netdev_boot_setup_check - check boot time settings
574 * @dev: the netdevice
576 * Check boot time settings for the device.
577 * The found settings are set for the device to be used
578 * later in the device probing.
579 * Returns 0 if no settings found, 1 if they are.
581 int netdev_boot_setup_check(struct net_device
*dev
)
583 struct netdev_boot_setup
*s
= dev_boot_setup
;
586 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
587 if (s
[i
].name
[0] != '\0' && s
[i
].name
[0] != ' ' &&
588 !strcmp(dev
->name
, s
[i
].name
)) {
589 dev
->irq
= s
[i
].map
.irq
;
590 dev
->base_addr
= s
[i
].map
.base_addr
;
591 dev
->mem_start
= s
[i
].map
.mem_start
;
592 dev
->mem_end
= s
[i
].map
.mem_end
;
598 EXPORT_SYMBOL(netdev_boot_setup_check
);
602 * netdev_boot_base - get address from boot time settings
603 * @prefix: prefix for network device
604 * @unit: id for network device
606 * Check boot time settings for the base address of device.
607 * The found settings are set for the device to be used
608 * later in the device probing.
609 * Returns 0 if no settings found.
611 unsigned long netdev_boot_base(const char *prefix
, int unit
)
613 const struct netdev_boot_setup
*s
= dev_boot_setup
;
617 sprintf(name
, "%s%d", prefix
, unit
);
620 * If device already registered then return base of 1
621 * to indicate not to probe for this interface
623 if (__dev_get_by_name(&init_net
, name
))
626 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++)
627 if (!strcmp(name
, s
[i
].name
))
628 return s
[i
].map
.base_addr
;
633 * Saves at boot time configured settings for any netdevice.
635 int __init
netdev_boot_setup(char *str
)
640 str
= get_options(str
, ARRAY_SIZE(ints
), ints
);
645 memset(&map
, 0, sizeof(map
));
649 map
.base_addr
= ints
[2];
651 map
.mem_start
= ints
[3];
653 map
.mem_end
= ints
[4];
655 /* Add new entry to the list */
656 return netdev_boot_setup_add(str
, &map
);
659 __setup("netdev=", netdev_boot_setup
);
661 /*******************************************************************************
663 Device Interface Subroutines
665 *******************************************************************************/
668 * dev_get_iflink - get 'iflink' value of a interface
669 * @dev: targeted interface
671 * Indicates the ifindex the interface is linked to.
672 * Physical interfaces have the same 'ifindex' and 'iflink' values.
675 int dev_get_iflink(const struct net_device
*dev
)
677 if (dev
->netdev_ops
&& dev
->netdev_ops
->ndo_get_iflink
)
678 return dev
->netdev_ops
->ndo_get_iflink(dev
);
682 EXPORT_SYMBOL(dev_get_iflink
);
685 * __dev_get_by_name - find a device by its name
686 * @net: the applicable net namespace
687 * @name: name to find
689 * Find an interface by name. Must be called under RTNL semaphore
690 * or @dev_base_lock. If the name is found a pointer to the device
691 * is returned. If the name is not found then %NULL is returned. The
692 * reference counters are not incremented so the caller must be
693 * careful with locks.
696 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
698 struct net_device
*dev
;
699 struct hlist_head
*head
= dev_name_hash(net
, name
);
701 hlist_for_each_entry(dev
, head
, name_hlist
)
702 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
707 EXPORT_SYMBOL(__dev_get_by_name
);
710 * dev_get_by_name_rcu - find a device by its name
711 * @net: the applicable net namespace
712 * @name: name to find
714 * Find an interface by name.
715 * If the name is found a pointer to the device is returned.
716 * If the name is not found then %NULL is returned.
717 * The reference counters are not incremented so the caller must be
718 * careful with locks. The caller must hold RCU lock.
721 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
723 struct net_device
*dev
;
724 struct hlist_head
*head
= dev_name_hash(net
, name
);
726 hlist_for_each_entry_rcu(dev
, head
, name_hlist
)
727 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
732 EXPORT_SYMBOL(dev_get_by_name_rcu
);
735 * dev_get_by_name - find a device by its name
736 * @net: the applicable net namespace
737 * @name: name to find
739 * Find an interface by name. This can be called from any
740 * context and does its own locking. The returned handle has
741 * the usage count incremented and the caller must use dev_put() to
742 * release it when it is no longer needed. %NULL is returned if no
743 * matching device is found.
746 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
748 struct net_device
*dev
;
751 dev
= dev_get_by_name_rcu(net
, name
);
757 EXPORT_SYMBOL(dev_get_by_name
);
760 * __dev_get_by_index - find a device by its ifindex
761 * @net: the applicable net namespace
762 * @ifindex: index of device
764 * Search for an interface by index. Returns %NULL if the device
765 * is not found or a pointer to the device. The device has not
766 * had its reference counter increased so the caller must be careful
767 * about locking. The caller must hold either the RTNL semaphore
771 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
773 struct net_device
*dev
;
774 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
776 hlist_for_each_entry(dev
, head
, index_hlist
)
777 if (dev
->ifindex
== ifindex
)
782 EXPORT_SYMBOL(__dev_get_by_index
);
785 * dev_get_by_index_rcu - find a device by its ifindex
786 * @net: the applicable net namespace
787 * @ifindex: index of device
789 * Search for an interface by index. Returns %NULL if the device
790 * is not found or a pointer to the device. The device has not
791 * had its reference counter increased so the caller must be careful
792 * about locking. The caller must hold RCU lock.
795 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
797 struct net_device
*dev
;
798 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
800 hlist_for_each_entry_rcu(dev
, head
, index_hlist
)
801 if (dev
->ifindex
== ifindex
)
806 EXPORT_SYMBOL(dev_get_by_index_rcu
);
810 * dev_get_by_index - find a device by its ifindex
811 * @net: the applicable net namespace
812 * @ifindex: index of device
814 * Search for an interface by index. Returns NULL if the device
815 * is not found or a pointer to the device. The device returned has
816 * had a reference added and the pointer is safe until the user calls
817 * dev_put to indicate they have finished with it.
820 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
822 struct net_device
*dev
;
825 dev
= dev_get_by_index_rcu(net
, ifindex
);
831 EXPORT_SYMBOL(dev_get_by_index
);
834 * netdev_get_name - get a netdevice name, knowing its ifindex.
835 * @net: network namespace
836 * @name: a pointer to the buffer where the name will be stored.
837 * @ifindex: the ifindex of the interface to get the name from.
839 * The use of raw_seqcount_begin() and cond_resched() before
840 * retrying is required as we want to give the writers a chance
841 * to complete when CONFIG_PREEMPT is not set.
843 int netdev_get_name(struct net
*net
, char *name
, int ifindex
)
845 struct net_device
*dev
;
849 seq
= raw_seqcount_begin(&devnet_rename_seq
);
851 dev
= dev_get_by_index_rcu(net
, ifindex
);
857 strcpy(name
, dev
->name
);
859 if (read_seqcount_retry(&devnet_rename_seq
, seq
)) {
868 * dev_getbyhwaddr_rcu - find a device by its hardware address
869 * @net: the applicable net namespace
870 * @type: media type of device
871 * @ha: hardware address
873 * Search for an interface by MAC address. Returns NULL if the device
874 * is not found or a pointer to the device.
875 * The caller must hold RCU or RTNL.
876 * The returned device has not had its ref count increased
877 * and the caller must therefore be careful about locking
881 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
884 struct net_device
*dev
;
886 for_each_netdev_rcu(net
, dev
)
887 if (dev
->type
== type
&&
888 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
893 EXPORT_SYMBOL(dev_getbyhwaddr_rcu
);
895 struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
897 struct net_device
*dev
;
900 for_each_netdev(net
, dev
)
901 if (dev
->type
== type
)
906 EXPORT_SYMBOL(__dev_getfirstbyhwtype
);
908 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
910 struct net_device
*dev
, *ret
= NULL
;
913 for_each_netdev_rcu(net
, dev
)
914 if (dev
->type
== type
) {
922 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
925 * __dev_get_by_flags - find any device with given flags
926 * @net: the applicable net namespace
927 * @if_flags: IFF_* values
928 * @mask: bitmask of bits in if_flags to check
930 * Search for any interface with the given flags. Returns NULL if a device
931 * is not found or a pointer to the device. Must be called inside
932 * rtnl_lock(), and result refcount is unchanged.
935 struct net_device
*__dev_get_by_flags(struct net
*net
, unsigned short if_flags
,
938 struct net_device
*dev
, *ret
;
943 for_each_netdev(net
, dev
) {
944 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
951 EXPORT_SYMBOL(__dev_get_by_flags
);
954 * dev_valid_name - check if name is okay for network device
957 * Network device names need to be valid file names to
958 * to allow sysfs to work. We also disallow any kind of
961 bool dev_valid_name(const char *name
)
965 if (strlen(name
) >= IFNAMSIZ
)
967 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
971 if (*name
== '/' || *name
== ':' || isspace(*name
))
977 EXPORT_SYMBOL(dev_valid_name
);
980 * __dev_alloc_name - allocate a name for a device
981 * @net: network namespace to allocate the device name in
982 * @name: name format string
983 * @buf: scratch buffer and result name string
985 * Passed a format string - eg "lt%d" it will try and find a suitable
986 * id. It scans list of devices to build up a free map, then chooses
987 * the first empty slot. The caller must hold the dev_base or rtnl lock
988 * while allocating the name and adding the device in order to avoid
990 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
991 * Returns the number of the unit assigned or a negative errno code.
994 static int __dev_alloc_name(struct net
*net
, const char *name
, char *buf
)
998 const int max_netdevices
= 8*PAGE_SIZE
;
999 unsigned long *inuse
;
1000 struct net_device
*d
;
1002 p
= strnchr(name
, IFNAMSIZ
-1, '%');
1005 * Verify the string as this thing may have come from
1006 * the user. There must be either one "%d" and no other "%"
1009 if (p
[1] != 'd' || strchr(p
+ 2, '%'))
1012 /* Use one page as a bit array of possible slots */
1013 inuse
= (unsigned long *) get_zeroed_page(GFP_ATOMIC
);
1017 for_each_netdev(net
, d
) {
1018 if (!sscanf(d
->name
, name
, &i
))
1020 if (i
< 0 || i
>= max_netdevices
)
1023 /* avoid cases where sscanf is not exact inverse of printf */
1024 snprintf(buf
, IFNAMSIZ
, name
, i
);
1025 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
1029 i
= find_first_zero_bit(inuse
, max_netdevices
);
1030 free_page((unsigned long) inuse
);
1034 snprintf(buf
, IFNAMSIZ
, name
, i
);
1035 if (!__dev_get_by_name(net
, buf
))
1038 /* It is possible to run out of possible slots
1039 * when the name is long and there isn't enough space left
1040 * for the digits, or if all bits are used.
1046 * dev_alloc_name - allocate a name for a device
1048 * @name: name format string
1050 * Passed a format string - eg "lt%d" it will try and find a suitable
1051 * id. It scans list of devices to build up a free map, then chooses
1052 * the first empty slot. The caller must hold the dev_base or rtnl lock
1053 * while allocating the name and adding the device in order to avoid
1055 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1056 * Returns the number of the unit assigned or a negative errno code.
1059 int dev_alloc_name(struct net_device
*dev
, const char *name
)
1065 BUG_ON(!dev_net(dev
));
1067 ret
= __dev_alloc_name(net
, name
, buf
);
1069 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1072 EXPORT_SYMBOL(dev_alloc_name
);
1074 static int dev_alloc_name_ns(struct net
*net
,
1075 struct net_device
*dev
,
1081 ret
= __dev_alloc_name(net
, name
, buf
);
1083 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1087 static int dev_get_valid_name(struct net
*net
,
1088 struct net_device
*dev
,
1093 if (!dev_valid_name(name
))
1096 if (strchr(name
, '%'))
1097 return dev_alloc_name_ns(net
, dev
, name
);
1098 else if (__dev_get_by_name(net
, name
))
1100 else if (dev
->name
!= name
)
1101 strlcpy(dev
->name
, name
, IFNAMSIZ
);
1107 * dev_change_name - change name of a device
1109 * @newname: name (or format string) must be at least IFNAMSIZ
1111 * Change name of a device, can pass format strings "eth%d".
1114 int dev_change_name(struct net_device
*dev
, const char *newname
)
1116 unsigned char old_assign_type
;
1117 char oldname
[IFNAMSIZ
];
1123 BUG_ON(!dev_net(dev
));
1126 if (dev
->flags
& IFF_UP
)
1129 write_seqcount_begin(&devnet_rename_seq
);
1131 if (strncmp(newname
, dev
->name
, IFNAMSIZ
) == 0) {
1132 write_seqcount_end(&devnet_rename_seq
);
1136 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
1138 err
= dev_get_valid_name(net
, dev
, newname
);
1140 write_seqcount_end(&devnet_rename_seq
);
1144 if (oldname
[0] && !strchr(oldname
, '%'))
1145 netdev_info(dev
, "renamed from %s\n", oldname
);
1147 old_assign_type
= dev
->name_assign_type
;
1148 dev
->name_assign_type
= NET_NAME_RENAMED
;
1151 ret
= device_rename(&dev
->dev
, dev
->name
);
1153 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1154 dev
->name_assign_type
= old_assign_type
;
1155 write_seqcount_end(&devnet_rename_seq
);
1159 write_seqcount_end(&devnet_rename_seq
);
1161 netdev_adjacent_rename_links(dev
, oldname
);
1163 write_lock_bh(&dev_base_lock
);
1164 hlist_del_rcu(&dev
->name_hlist
);
1165 write_unlock_bh(&dev_base_lock
);
1169 write_lock_bh(&dev_base_lock
);
1170 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
1171 write_unlock_bh(&dev_base_lock
);
1173 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1174 ret
= notifier_to_errno(ret
);
1177 /* err >= 0 after dev_alloc_name() or stores the first errno */
1180 write_seqcount_begin(&devnet_rename_seq
);
1181 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1182 memcpy(oldname
, newname
, IFNAMSIZ
);
1183 dev
->name_assign_type
= old_assign_type
;
1184 old_assign_type
= NET_NAME_RENAMED
;
1187 pr_err("%s: name change rollback failed: %d\n",
1196 * dev_set_alias - change ifalias of a device
1198 * @alias: name up to IFALIASZ
1199 * @len: limit of bytes to copy from info
1201 * Set ifalias for a device,
1203 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1209 if (len
>= IFALIASZ
)
1213 kfree(dev
->ifalias
);
1214 dev
->ifalias
= NULL
;
1218 new_ifalias
= krealloc(dev
->ifalias
, len
+ 1, GFP_KERNEL
);
1221 dev
->ifalias
= new_ifalias
;
1223 strlcpy(dev
->ifalias
, alias
, len
+1);
1229 * netdev_features_change - device changes features
1230 * @dev: device to cause notification
1232 * Called to indicate a device has changed features.
1234 void netdev_features_change(struct net_device
*dev
)
1236 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1238 EXPORT_SYMBOL(netdev_features_change
);
1241 * netdev_state_change - device changes state
1242 * @dev: device to cause notification
1244 * Called to indicate a device has changed state. This function calls
1245 * the notifier chains for netdev_chain and sends a NEWLINK message
1246 * to the routing socket.
1248 void netdev_state_change(struct net_device
*dev
)
1250 if (dev
->flags
& IFF_UP
) {
1251 struct netdev_notifier_change_info change_info
;
1253 change_info
.flags_changed
= 0;
1254 call_netdevice_notifiers_info(NETDEV_CHANGE
, dev
,
1256 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0, GFP_KERNEL
);
1259 EXPORT_SYMBOL(netdev_state_change
);
1262 * netdev_notify_peers - notify network peers about existence of @dev
1263 * @dev: network device
1265 * Generate traffic such that interested network peers are aware of
1266 * @dev, such as by generating a gratuitous ARP. This may be used when
1267 * a device wants to inform the rest of the network about some sort of
1268 * reconfiguration such as a failover event or virtual machine
1271 void netdev_notify_peers(struct net_device
*dev
)
1274 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, dev
);
1277 EXPORT_SYMBOL(netdev_notify_peers
);
1279 static int __dev_open(struct net_device
*dev
)
1281 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1286 if (!netif_device_present(dev
))
1289 /* Block netpoll from trying to do any rx path servicing.
1290 * If we don't do this there is a chance ndo_poll_controller
1291 * or ndo_poll may be running while we open the device
1293 netpoll_poll_disable(dev
);
1295 ret
= call_netdevice_notifiers(NETDEV_PRE_UP
, dev
);
1296 ret
= notifier_to_errno(ret
);
1300 set_bit(__LINK_STATE_START
, &dev
->state
);
1302 if (ops
->ndo_validate_addr
)
1303 ret
= ops
->ndo_validate_addr(dev
);
1305 if (!ret
&& ops
->ndo_open
)
1306 ret
= ops
->ndo_open(dev
);
1308 netpoll_poll_enable(dev
);
1311 clear_bit(__LINK_STATE_START
, &dev
->state
);
1313 dev
->flags
|= IFF_UP
;
1314 dev_set_rx_mode(dev
);
1316 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
1323 * dev_open - prepare an interface for use.
1324 * @dev: device to open
1326 * Takes a device from down to up state. The device's private open
1327 * function is invoked and then the multicast lists are loaded. Finally
1328 * the device is moved into the up state and a %NETDEV_UP message is
1329 * sent to the netdev notifier chain.
1331 * Calling this function on an active interface is a nop. On a failure
1332 * a negative errno code is returned.
1334 int dev_open(struct net_device
*dev
)
1338 if (dev
->flags
& IFF_UP
)
1341 ret
= __dev_open(dev
);
1345 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1346 call_netdevice_notifiers(NETDEV_UP
, dev
);
1350 EXPORT_SYMBOL(dev_open
);
1352 static int __dev_close_many(struct list_head
*head
)
1354 struct net_device
*dev
;
1359 list_for_each_entry(dev
, head
, close_list
) {
1360 /* Temporarily disable netpoll until the interface is down */
1361 netpoll_poll_disable(dev
);
1363 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1365 clear_bit(__LINK_STATE_START
, &dev
->state
);
1367 /* Synchronize to scheduled poll. We cannot touch poll list, it
1368 * can be even on different cpu. So just clear netif_running().
1370 * dev->stop() will invoke napi_disable() on all of it's
1371 * napi_struct instances on this device.
1373 smp_mb__after_atomic(); /* Commit netif_running(). */
1376 dev_deactivate_many(head
);
1378 list_for_each_entry(dev
, head
, close_list
) {
1379 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1382 * Call the device specific close. This cannot fail.
1383 * Only if device is UP
1385 * We allow it to be called even after a DETACH hot-plug
1391 dev
->flags
&= ~IFF_UP
;
1392 netpoll_poll_enable(dev
);
1398 static int __dev_close(struct net_device
*dev
)
1403 list_add(&dev
->close_list
, &single
);
1404 retval
= __dev_close_many(&single
);
1410 int dev_close_many(struct list_head
*head
, bool unlink
)
1412 struct net_device
*dev
, *tmp
;
1414 /* Remove the devices that don't need to be closed */
1415 list_for_each_entry_safe(dev
, tmp
, head
, close_list
)
1416 if (!(dev
->flags
& IFF_UP
))
1417 list_del_init(&dev
->close_list
);
1419 __dev_close_many(head
);
1421 list_for_each_entry_safe(dev
, tmp
, head
, close_list
) {
1422 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1423 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1425 list_del_init(&dev
->close_list
);
1430 EXPORT_SYMBOL(dev_close_many
);
1433 * dev_close - shutdown an interface.
1434 * @dev: device to shutdown
1436 * This function moves an active device into down state. A
1437 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1438 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1441 int dev_close(struct net_device
*dev
)
1443 if (dev
->flags
& IFF_UP
) {
1446 list_add(&dev
->close_list
, &single
);
1447 dev_close_many(&single
, true);
1452 EXPORT_SYMBOL(dev_close
);
1456 * dev_disable_lro - disable Large Receive Offload on a device
1459 * Disable Large Receive Offload (LRO) on a net device. Must be
1460 * called under RTNL. This is needed if received packets may be
1461 * forwarded to another interface.
1463 void dev_disable_lro(struct net_device
*dev
)
1465 struct net_device
*lower_dev
;
1466 struct list_head
*iter
;
1468 dev
->wanted_features
&= ~NETIF_F_LRO
;
1469 netdev_update_features(dev
);
1471 if (unlikely(dev
->features
& NETIF_F_LRO
))
1472 netdev_WARN(dev
, "failed to disable LRO!\n");
1474 netdev_for_each_lower_dev(dev
, lower_dev
, iter
)
1475 dev_disable_lro(lower_dev
);
1477 EXPORT_SYMBOL(dev_disable_lro
);
1479 static int call_netdevice_notifier(struct notifier_block
*nb
, unsigned long val
,
1480 struct net_device
*dev
)
1482 struct netdev_notifier_info info
;
1484 netdev_notifier_info_init(&info
, dev
);
1485 return nb
->notifier_call(nb
, val
, &info
);
1488 static int dev_boot_phase
= 1;
1491 * register_netdevice_notifier - register a network notifier block
1494 * Register a notifier to be called when network device events occur.
1495 * The notifier passed is linked into the kernel structures and must
1496 * not be reused until it has been unregistered. A negative errno code
1497 * is returned on a failure.
1499 * When registered all registration and up events are replayed
1500 * to the new notifier to allow device to have a race free
1501 * view of the network device list.
1504 int register_netdevice_notifier(struct notifier_block
*nb
)
1506 struct net_device
*dev
;
1507 struct net_device
*last
;
1512 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1518 for_each_netdev(net
, dev
) {
1519 err
= call_netdevice_notifier(nb
, NETDEV_REGISTER
, dev
);
1520 err
= notifier_to_errno(err
);
1524 if (!(dev
->flags
& IFF_UP
))
1527 call_netdevice_notifier(nb
, NETDEV_UP
, dev
);
1538 for_each_netdev(net
, dev
) {
1542 if (dev
->flags
& IFF_UP
) {
1543 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1545 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1547 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1552 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1555 EXPORT_SYMBOL(register_netdevice_notifier
);
1558 * unregister_netdevice_notifier - unregister a network notifier block
1561 * Unregister a notifier previously registered by
1562 * register_netdevice_notifier(). The notifier is unlinked into the
1563 * kernel structures and may then be reused. A negative errno code
1564 * is returned on a failure.
1566 * After unregistering unregister and down device events are synthesized
1567 * for all devices on the device list to the removed notifier to remove
1568 * the need for special case cleanup code.
1571 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1573 struct net_device
*dev
;
1578 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1583 for_each_netdev(net
, dev
) {
1584 if (dev
->flags
& IFF_UP
) {
1585 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1587 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1589 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1596 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1599 * call_netdevice_notifiers_info - call all network notifier blocks
1600 * @val: value passed unmodified to notifier function
1601 * @dev: net_device pointer passed unmodified to notifier function
1602 * @info: notifier information data
1604 * Call all network notifier blocks. Parameters and return value
1605 * are as for raw_notifier_call_chain().
1608 static int call_netdevice_notifiers_info(unsigned long val
,
1609 struct net_device
*dev
,
1610 struct netdev_notifier_info
*info
)
1613 netdev_notifier_info_init(info
, dev
);
1614 return raw_notifier_call_chain(&netdev_chain
, val
, info
);
1618 * call_netdevice_notifiers - call all network notifier blocks
1619 * @val: value passed unmodified to notifier function
1620 * @dev: net_device pointer passed unmodified to notifier function
1622 * Call all network notifier blocks. Parameters and return value
1623 * are as for raw_notifier_call_chain().
1626 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
1628 struct netdev_notifier_info info
;
1630 return call_netdevice_notifiers_info(val
, dev
, &info
);
1632 EXPORT_SYMBOL(call_netdevice_notifiers
);
1634 #ifdef CONFIG_NET_INGRESS
1635 static struct static_key ingress_needed __read_mostly
;
1637 void net_inc_ingress_queue(void)
1639 static_key_slow_inc(&ingress_needed
);
1641 EXPORT_SYMBOL_GPL(net_inc_ingress_queue
);
1643 void net_dec_ingress_queue(void)
1645 static_key_slow_dec(&ingress_needed
);
1647 EXPORT_SYMBOL_GPL(net_dec_ingress_queue
);
1650 static struct static_key netstamp_needed __read_mostly
;
1651 #ifdef HAVE_JUMP_LABEL
1652 /* We are not allowed to call static_key_slow_dec() from irq context
1653 * If net_disable_timestamp() is called from irq context, defer the
1654 * static_key_slow_dec() calls.
1656 static atomic_t netstamp_needed_deferred
;
1659 void net_enable_timestamp(void)
1661 #ifdef HAVE_JUMP_LABEL
1662 int deferred
= atomic_xchg(&netstamp_needed_deferred
, 0);
1666 static_key_slow_dec(&netstamp_needed
);
1670 static_key_slow_inc(&netstamp_needed
);
1672 EXPORT_SYMBOL(net_enable_timestamp
);
1674 void net_disable_timestamp(void)
1676 #ifdef HAVE_JUMP_LABEL
1677 if (in_interrupt()) {
1678 atomic_inc(&netstamp_needed_deferred
);
1682 static_key_slow_dec(&netstamp_needed
);
1684 EXPORT_SYMBOL(net_disable_timestamp
);
1686 static inline void net_timestamp_set(struct sk_buff
*skb
)
1688 skb
->tstamp
.tv64
= 0;
1689 if (static_key_false(&netstamp_needed
))
1690 __net_timestamp(skb
);
1693 #define net_timestamp_check(COND, SKB) \
1694 if (static_key_false(&netstamp_needed)) { \
1695 if ((COND) && !(SKB)->tstamp.tv64) \
1696 __net_timestamp(SKB); \
1699 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1703 if (!(dev
->flags
& IFF_UP
))
1706 len
= dev
->mtu
+ dev
->hard_header_len
+ VLAN_HLEN
;
1707 if (skb
->len
<= len
)
1710 /* if TSO is enabled, we don't care about the length as the packet
1711 * could be forwarded without being segmented before
1713 if (skb_is_gso(skb
))
1718 EXPORT_SYMBOL_GPL(is_skb_forwardable
);
1720 int __dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1722 if (skb_orphan_frags(skb
, GFP_ATOMIC
) ||
1723 unlikely(!is_skb_forwardable(dev
, skb
))) {
1724 atomic_long_inc(&dev
->rx_dropped
);
1729 skb_scrub_packet(skb
, true);
1731 skb
->protocol
= eth_type_trans(skb
, dev
);
1732 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
1736 EXPORT_SYMBOL_GPL(__dev_forward_skb
);
1739 * dev_forward_skb - loopback an skb to another netif
1741 * @dev: destination network device
1742 * @skb: buffer to forward
1745 * NET_RX_SUCCESS (no congestion)
1746 * NET_RX_DROP (packet was dropped, but freed)
1748 * dev_forward_skb can be used for injecting an skb from the
1749 * start_xmit function of one device into the receive queue
1750 * of another device.
1752 * The receiving device may be in another namespace, so
1753 * we have to clear all information in the skb that could
1754 * impact namespace isolation.
1756 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1758 return __dev_forward_skb(dev
, skb
) ?: netif_rx_internal(skb
);
1760 EXPORT_SYMBOL_GPL(dev_forward_skb
);
1762 static inline int deliver_skb(struct sk_buff
*skb
,
1763 struct packet_type
*pt_prev
,
1764 struct net_device
*orig_dev
)
1766 if (unlikely(skb_orphan_frags(skb
, GFP_ATOMIC
)))
1768 atomic_inc(&skb
->users
);
1769 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
1772 static inline void deliver_ptype_list_skb(struct sk_buff
*skb
,
1773 struct packet_type
**pt
,
1774 struct net_device
*orig_dev
,
1776 struct list_head
*ptype_list
)
1778 struct packet_type
*ptype
, *pt_prev
= *pt
;
1780 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1781 if (ptype
->type
!= type
)
1784 deliver_skb(skb
, pt_prev
, orig_dev
);
1790 static inline bool skb_loop_sk(struct packet_type
*ptype
, struct sk_buff
*skb
)
1792 if (!ptype
->af_packet_priv
|| !skb
->sk
)
1795 if (ptype
->id_match
)
1796 return ptype
->id_match(ptype
, skb
->sk
);
1797 else if ((struct sock
*)ptype
->af_packet_priv
== skb
->sk
)
1804 * Support routine. Sends outgoing frames to any network
1805 * taps currently in use.
1808 static void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
1810 struct packet_type
*ptype
;
1811 struct sk_buff
*skb2
= NULL
;
1812 struct packet_type
*pt_prev
= NULL
;
1813 struct list_head
*ptype_list
= &ptype_all
;
1817 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1818 /* Never send packets back to the socket
1819 * they originated from - MvS (miquels@drinkel.ow.org)
1821 if (skb_loop_sk(ptype
, skb
))
1825 deliver_skb(skb2
, pt_prev
, skb
->dev
);
1830 /* need to clone skb, done only once */
1831 skb2
= skb_clone(skb
, GFP_ATOMIC
);
1835 net_timestamp_set(skb2
);
1837 /* skb->nh should be correctly
1838 * set by sender, so that the second statement is
1839 * just protection against buggy protocols.
1841 skb_reset_mac_header(skb2
);
1843 if (skb_network_header(skb2
) < skb2
->data
||
1844 skb_network_header(skb2
) > skb_tail_pointer(skb2
)) {
1845 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1846 ntohs(skb2
->protocol
),
1848 skb_reset_network_header(skb2
);
1851 skb2
->transport_header
= skb2
->network_header
;
1852 skb2
->pkt_type
= PACKET_OUTGOING
;
1856 if (ptype_list
== &ptype_all
) {
1857 ptype_list
= &dev
->ptype_all
;
1862 pt_prev
->func(skb2
, skb
->dev
, pt_prev
, skb
->dev
);
1867 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1868 * @dev: Network device
1869 * @txq: number of queues available
1871 * If real_num_tx_queues is changed the tc mappings may no longer be
1872 * valid. To resolve this verify the tc mapping remains valid and if
1873 * not NULL the mapping. With no priorities mapping to this
1874 * offset/count pair it will no longer be used. In the worst case TC0
1875 * is invalid nothing can be done so disable priority mappings. If is
1876 * expected that drivers will fix this mapping if they can before
1877 * calling netif_set_real_num_tx_queues.
1879 static void netif_setup_tc(struct net_device
*dev
, unsigned int txq
)
1882 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
1884 /* If TC0 is invalidated disable TC mapping */
1885 if (tc
->offset
+ tc
->count
> txq
) {
1886 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1891 /* Invalidated prio to tc mappings set to TC0 */
1892 for (i
= 1; i
< TC_BITMASK
+ 1; i
++) {
1893 int q
= netdev_get_prio_tc_map(dev
, i
);
1895 tc
= &dev
->tc_to_txq
[q
];
1896 if (tc
->offset
+ tc
->count
> txq
) {
1897 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1899 netdev_set_prio_tc_map(dev
, i
, 0);
1905 static DEFINE_MUTEX(xps_map_mutex
);
1906 #define xmap_dereference(P) \
1907 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1909 static struct xps_map
*remove_xps_queue(struct xps_dev_maps
*dev_maps
,
1912 struct xps_map
*map
= NULL
;
1916 map
= xmap_dereference(dev_maps
->cpu_map
[cpu
]);
1918 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
1919 if (map
->queues
[pos
] == index
) {
1921 map
->queues
[pos
] = map
->queues
[--map
->len
];
1923 RCU_INIT_POINTER(dev_maps
->cpu_map
[cpu
], NULL
);
1924 kfree_rcu(map
, rcu
);
1934 static void netif_reset_xps_queues_gt(struct net_device
*dev
, u16 index
)
1936 struct xps_dev_maps
*dev_maps
;
1938 bool active
= false;
1940 mutex_lock(&xps_map_mutex
);
1941 dev_maps
= xmap_dereference(dev
->xps_maps
);
1946 for_each_possible_cpu(cpu
) {
1947 for (i
= index
; i
< dev
->num_tx_queues
; i
++) {
1948 if (!remove_xps_queue(dev_maps
, cpu
, i
))
1951 if (i
== dev
->num_tx_queues
)
1956 RCU_INIT_POINTER(dev
->xps_maps
, NULL
);
1957 kfree_rcu(dev_maps
, rcu
);
1960 for (i
= index
; i
< dev
->num_tx_queues
; i
++)
1961 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, i
),
1965 mutex_unlock(&xps_map_mutex
);
1968 static struct xps_map
*expand_xps_map(struct xps_map
*map
,
1971 struct xps_map
*new_map
;
1972 int alloc_len
= XPS_MIN_MAP_ALLOC
;
1975 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
1976 if (map
->queues
[pos
] != index
)
1981 /* Need to add queue to this CPU's existing map */
1983 if (pos
< map
->alloc_len
)
1986 alloc_len
= map
->alloc_len
* 2;
1989 /* Need to allocate new map to store queue on this CPU's map */
1990 new_map
= kzalloc_node(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
,
1995 for (i
= 0; i
< pos
; i
++)
1996 new_map
->queues
[i
] = map
->queues
[i
];
1997 new_map
->alloc_len
= alloc_len
;
2003 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
2006 struct xps_dev_maps
*dev_maps
, *new_dev_maps
= NULL
;
2007 struct xps_map
*map
, *new_map
;
2008 int maps_sz
= max_t(unsigned int, XPS_DEV_MAPS_SIZE
, L1_CACHE_BYTES
);
2009 int cpu
, numa_node_id
= -2;
2010 bool active
= false;
2012 mutex_lock(&xps_map_mutex
);
2014 dev_maps
= xmap_dereference(dev
->xps_maps
);
2016 /* allocate memory for queue storage */
2017 for_each_online_cpu(cpu
) {
2018 if (!cpumask_test_cpu(cpu
, mask
))
2022 new_dev_maps
= kzalloc(maps_sz
, GFP_KERNEL
);
2023 if (!new_dev_maps
) {
2024 mutex_unlock(&xps_map_mutex
);
2028 map
= dev_maps
? xmap_dereference(dev_maps
->cpu_map
[cpu
]) :
2031 map
= expand_xps_map(map
, cpu
, index
);
2035 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[cpu
], map
);
2039 goto out_no_new_maps
;
2041 for_each_possible_cpu(cpu
) {
2042 if (cpumask_test_cpu(cpu
, mask
) && cpu_online(cpu
)) {
2043 /* add queue to CPU maps */
2046 map
= xmap_dereference(new_dev_maps
->cpu_map
[cpu
]);
2047 while ((pos
< map
->len
) && (map
->queues
[pos
] != index
))
2050 if (pos
== map
->len
)
2051 map
->queues
[map
->len
++] = index
;
2053 if (numa_node_id
== -2)
2054 numa_node_id
= cpu_to_node(cpu
);
2055 else if (numa_node_id
!= cpu_to_node(cpu
))
2058 } else if (dev_maps
) {
2059 /* fill in the new device map from the old device map */
2060 map
= xmap_dereference(dev_maps
->cpu_map
[cpu
]);
2061 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[cpu
], map
);
2066 rcu_assign_pointer(dev
->xps_maps
, new_dev_maps
);
2068 /* Cleanup old maps */
2070 for_each_possible_cpu(cpu
) {
2071 new_map
= xmap_dereference(new_dev_maps
->cpu_map
[cpu
]);
2072 map
= xmap_dereference(dev_maps
->cpu_map
[cpu
]);
2073 if (map
&& map
!= new_map
)
2074 kfree_rcu(map
, rcu
);
2077 kfree_rcu(dev_maps
, rcu
);
2080 dev_maps
= new_dev_maps
;
2084 /* update Tx queue numa node */
2085 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, index
),
2086 (numa_node_id
>= 0) ? numa_node_id
:
2092 /* removes queue from unused CPUs */
2093 for_each_possible_cpu(cpu
) {
2094 if (cpumask_test_cpu(cpu
, mask
) && cpu_online(cpu
))
2097 if (remove_xps_queue(dev_maps
, cpu
, index
))
2101 /* free map if not active */
2103 RCU_INIT_POINTER(dev
->xps_maps
, NULL
);
2104 kfree_rcu(dev_maps
, rcu
);
2108 mutex_unlock(&xps_map_mutex
);
2112 /* remove any maps that we added */
2113 for_each_possible_cpu(cpu
) {
2114 new_map
= xmap_dereference(new_dev_maps
->cpu_map
[cpu
]);
2115 map
= dev_maps
? xmap_dereference(dev_maps
->cpu_map
[cpu
]) :
2117 if (new_map
&& new_map
!= map
)
2121 mutex_unlock(&xps_map_mutex
);
2123 kfree(new_dev_maps
);
2126 EXPORT_SYMBOL(netif_set_xps_queue
);
2130 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2131 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2133 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
)
2137 if (txq
< 1 || txq
> dev
->num_tx_queues
)
2140 if (dev
->reg_state
== NETREG_REGISTERED
||
2141 dev
->reg_state
== NETREG_UNREGISTERING
) {
2144 rc
= netdev_queue_update_kobjects(dev
, dev
->real_num_tx_queues
,
2150 netif_setup_tc(dev
, txq
);
2152 if (txq
< dev
->real_num_tx_queues
) {
2153 qdisc_reset_all_tx_gt(dev
, txq
);
2155 netif_reset_xps_queues_gt(dev
, txq
);
2160 dev
->real_num_tx_queues
= txq
;
2163 EXPORT_SYMBOL(netif_set_real_num_tx_queues
);
2167 * netif_set_real_num_rx_queues - set actual number of RX queues used
2168 * @dev: Network device
2169 * @rxq: Actual number of RX queues
2171 * This must be called either with the rtnl_lock held or before
2172 * registration of the net device. Returns 0 on success, or a
2173 * negative error code. If called before registration, it always
2176 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
)
2180 if (rxq
< 1 || rxq
> dev
->num_rx_queues
)
2183 if (dev
->reg_state
== NETREG_REGISTERED
) {
2186 rc
= net_rx_queue_update_kobjects(dev
, dev
->real_num_rx_queues
,
2192 dev
->real_num_rx_queues
= rxq
;
2195 EXPORT_SYMBOL(netif_set_real_num_rx_queues
);
2199 * netif_get_num_default_rss_queues - default number of RSS queues
2201 * This routine should set an upper limit on the number of RSS queues
2202 * used by default by multiqueue devices.
2204 int netif_get_num_default_rss_queues(void)
2206 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES
, num_online_cpus());
2208 EXPORT_SYMBOL(netif_get_num_default_rss_queues
);
2210 static inline void __netif_reschedule(struct Qdisc
*q
)
2212 struct softnet_data
*sd
;
2213 unsigned long flags
;
2215 local_irq_save(flags
);
2216 sd
= this_cpu_ptr(&softnet_data
);
2217 q
->next_sched
= NULL
;
2218 *sd
->output_queue_tailp
= q
;
2219 sd
->output_queue_tailp
= &q
->next_sched
;
2220 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2221 local_irq_restore(flags
);
2224 void __netif_schedule(struct Qdisc
*q
)
2226 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
2227 __netif_reschedule(q
);
2229 EXPORT_SYMBOL(__netif_schedule
);
2231 struct dev_kfree_skb_cb
{
2232 enum skb_free_reason reason
;
2235 static struct dev_kfree_skb_cb
*get_kfree_skb_cb(const struct sk_buff
*skb
)
2237 return (struct dev_kfree_skb_cb
*)skb
->cb
;
2240 void netif_schedule_queue(struct netdev_queue
*txq
)
2243 if (!(txq
->state
& QUEUE_STATE_ANY_XOFF
)) {
2244 struct Qdisc
*q
= rcu_dereference(txq
->qdisc
);
2246 __netif_schedule(q
);
2250 EXPORT_SYMBOL(netif_schedule_queue
);
2253 * netif_wake_subqueue - allow sending packets on subqueue
2254 * @dev: network device
2255 * @queue_index: sub queue index
2257 * Resume individual transmit queue of a device with multiple transmit queues.
2259 void netif_wake_subqueue(struct net_device
*dev
, u16 queue_index
)
2261 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2263 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &txq
->state
)) {
2267 q
= rcu_dereference(txq
->qdisc
);
2268 __netif_schedule(q
);
2272 EXPORT_SYMBOL(netif_wake_subqueue
);
2274 void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
2276 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
)) {
2280 q
= rcu_dereference(dev_queue
->qdisc
);
2281 __netif_schedule(q
);
2285 EXPORT_SYMBOL(netif_tx_wake_queue
);
2287 void __dev_kfree_skb_irq(struct sk_buff
*skb
, enum skb_free_reason reason
)
2289 unsigned long flags
;
2291 if (likely(atomic_read(&skb
->users
) == 1)) {
2293 atomic_set(&skb
->users
, 0);
2294 } else if (likely(!atomic_dec_and_test(&skb
->users
))) {
2297 get_kfree_skb_cb(skb
)->reason
= reason
;
2298 local_irq_save(flags
);
2299 skb
->next
= __this_cpu_read(softnet_data
.completion_queue
);
2300 __this_cpu_write(softnet_data
.completion_queue
, skb
);
2301 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2302 local_irq_restore(flags
);
2304 EXPORT_SYMBOL(__dev_kfree_skb_irq
);
2306 void __dev_kfree_skb_any(struct sk_buff
*skb
, enum skb_free_reason reason
)
2308 if (in_irq() || irqs_disabled())
2309 __dev_kfree_skb_irq(skb
, reason
);
2313 EXPORT_SYMBOL(__dev_kfree_skb_any
);
2317 * netif_device_detach - mark device as removed
2318 * @dev: network device
2320 * Mark device as removed from system and therefore no longer available.
2322 void netif_device_detach(struct net_device
*dev
)
2324 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2325 netif_running(dev
)) {
2326 netif_tx_stop_all_queues(dev
);
2329 EXPORT_SYMBOL(netif_device_detach
);
2332 * netif_device_attach - mark device as attached
2333 * @dev: network device
2335 * Mark device as attached from system and restart if needed.
2337 void netif_device_attach(struct net_device
*dev
)
2339 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2340 netif_running(dev
)) {
2341 netif_tx_wake_all_queues(dev
);
2342 __netdev_watchdog_up(dev
);
2345 EXPORT_SYMBOL(netif_device_attach
);
2348 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2349 * to be used as a distribution range.
2351 u16
__skb_tx_hash(const struct net_device
*dev
, struct sk_buff
*skb
,
2352 unsigned int num_tx_queues
)
2356 u16 qcount
= num_tx_queues
;
2358 if (skb_rx_queue_recorded(skb
)) {
2359 hash
= skb_get_rx_queue(skb
);
2360 while (unlikely(hash
>= num_tx_queues
))
2361 hash
-= num_tx_queues
;
2366 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
2367 qoffset
= dev
->tc_to_txq
[tc
].offset
;
2368 qcount
= dev
->tc_to_txq
[tc
].count
;
2371 return (u16
) reciprocal_scale(skb_get_hash(skb
), qcount
) + qoffset
;
2373 EXPORT_SYMBOL(__skb_tx_hash
);
2375 static void skb_warn_bad_offload(const struct sk_buff
*skb
)
2377 static const netdev_features_t null_features
= 0;
2378 struct net_device
*dev
= skb
->dev
;
2379 const char *driver
= "";
2381 if (!net_ratelimit())
2384 if (dev
&& dev
->dev
.parent
)
2385 driver
= dev_driver_string(dev
->dev
.parent
);
2387 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2388 "gso_type=%d ip_summed=%d\n",
2389 driver
, dev
? &dev
->features
: &null_features
,
2390 skb
->sk
? &skb
->sk
->sk_route_caps
: &null_features
,
2391 skb
->len
, skb
->data_len
, skb_shinfo(skb
)->gso_size
,
2392 skb_shinfo(skb
)->gso_type
, skb
->ip_summed
);
2396 * Invalidate hardware checksum when packet is to be mangled, and
2397 * complete checksum manually on outgoing path.
2399 int skb_checksum_help(struct sk_buff
*skb
)
2402 int ret
= 0, offset
;
2404 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
2405 goto out_set_summed
;
2407 if (unlikely(skb_shinfo(skb
)->gso_size
)) {
2408 skb_warn_bad_offload(skb
);
2412 /* Before computing a checksum, we should make sure no frag could
2413 * be modified by an external entity : checksum could be wrong.
2415 if (skb_has_shared_frag(skb
)) {
2416 ret
= __skb_linearize(skb
);
2421 offset
= skb_checksum_start_offset(skb
);
2422 BUG_ON(offset
>= skb_headlen(skb
));
2423 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
2425 offset
+= skb
->csum_offset
;
2426 BUG_ON(offset
+ sizeof(__sum16
) > skb_headlen(skb
));
2428 if (skb_cloned(skb
) &&
2429 !skb_clone_writable(skb
, offset
+ sizeof(__sum16
))) {
2430 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2435 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
);
2437 skb
->ip_summed
= CHECKSUM_NONE
;
2441 EXPORT_SYMBOL(skb_checksum_help
);
2443 __be16
skb_network_protocol(struct sk_buff
*skb
, int *depth
)
2445 __be16 type
= skb
->protocol
;
2447 /* Tunnel gso handlers can set protocol to ethernet. */
2448 if (type
== htons(ETH_P_TEB
)) {
2451 if (unlikely(!pskb_may_pull(skb
, sizeof(struct ethhdr
))))
2454 eth
= (struct ethhdr
*)skb_mac_header(skb
);
2455 type
= eth
->h_proto
;
2458 return __vlan_get_protocol(skb
, type
, depth
);
2462 * skb_mac_gso_segment - mac layer segmentation handler.
2463 * @skb: buffer to segment
2464 * @features: features for the output path (see dev->features)
2466 struct sk_buff
*skb_mac_gso_segment(struct sk_buff
*skb
,
2467 netdev_features_t features
)
2469 struct sk_buff
*segs
= ERR_PTR(-EPROTONOSUPPORT
);
2470 struct packet_offload
*ptype
;
2471 int vlan_depth
= skb
->mac_len
;
2472 __be16 type
= skb_network_protocol(skb
, &vlan_depth
);
2474 if (unlikely(!type
))
2475 return ERR_PTR(-EINVAL
);
2477 __skb_pull(skb
, vlan_depth
);
2480 list_for_each_entry_rcu(ptype
, &offload_base
, list
) {
2481 if (ptype
->type
== type
&& ptype
->callbacks
.gso_segment
) {
2482 segs
= ptype
->callbacks
.gso_segment(skb
, features
);
2488 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
2492 EXPORT_SYMBOL(skb_mac_gso_segment
);
2495 /* openvswitch calls this on rx path, so we need a different check.
2497 static inline bool skb_needs_check(struct sk_buff
*skb
, bool tx_path
)
2500 return skb
->ip_summed
!= CHECKSUM_PARTIAL
;
2502 return skb
->ip_summed
== CHECKSUM_NONE
;
2506 * __skb_gso_segment - Perform segmentation on skb.
2507 * @skb: buffer to segment
2508 * @features: features for the output path (see dev->features)
2509 * @tx_path: whether it is called in TX path
2511 * This function segments the given skb and returns a list of segments.
2513 * It may return NULL if the skb requires no segmentation. This is
2514 * only possible when GSO is used for verifying header integrity.
2516 struct sk_buff
*__skb_gso_segment(struct sk_buff
*skb
,
2517 netdev_features_t features
, bool tx_path
)
2519 if (unlikely(skb_needs_check(skb
, tx_path
))) {
2522 skb_warn_bad_offload(skb
);
2524 err
= skb_cow_head(skb
, 0);
2526 return ERR_PTR(err
);
2529 SKB_GSO_CB(skb
)->mac_offset
= skb_headroom(skb
);
2530 SKB_GSO_CB(skb
)->encap_level
= 0;
2532 skb_reset_mac_header(skb
);
2533 skb_reset_mac_len(skb
);
2535 return skb_mac_gso_segment(skb
, features
);
2537 EXPORT_SYMBOL(__skb_gso_segment
);
2539 /* Take action when hardware reception checksum errors are detected. */
2541 void netdev_rx_csum_fault(struct net_device
*dev
)
2543 if (net_ratelimit()) {
2544 pr_err("%s: hw csum failure\n", dev
? dev
->name
: "<unknown>");
2548 EXPORT_SYMBOL(netdev_rx_csum_fault
);
2551 /* Actually, we should eliminate this check as soon as we know, that:
2552 * 1. IOMMU is present and allows to map all the memory.
2553 * 2. No high memory really exists on this machine.
2556 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
2558 #ifdef CONFIG_HIGHMEM
2560 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
2561 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2562 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2563 if (PageHighMem(skb_frag_page(frag
)))
2568 if (PCI_DMA_BUS_IS_PHYS
) {
2569 struct device
*pdev
= dev
->dev
.parent
;
2573 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2574 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2575 dma_addr_t addr
= page_to_phys(skb_frag_page(frag
));
2576 if (!pdev
->dma_mask
|| addr
+ PAGE_SIZE
- 1 > *pdev
->dma_mask
)
2584 /* If MPLS offload request, verify we are testing hardware MPLS features
2585 * instead of standard features for the netdev.
2587 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2588 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
2589 netdev_features_t features
,
2592 if (eth_p_mpls(type
))
2593 features
&= skb
->dev
->mpls_features
;
2598 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
2599 netdev_features_t features
,
2606 static netdev_features_t
harmonize_features(struct sk_buff
*skb
,
2607 netdev_features_t features
)
2612 type
= skb_network_protocol(skb
, &tmp
);
2613 features
= net_mpls_features(skb
, features
, type
);
2615 if (skb
->ip_summed
!= CHECKSUM_NONE
&&
2616 !can_checksum_protocol(features
, type
)) {
2617 features
&= ~NETIF_F_ALL_CSUM
;
2618 } else if (illegal_highdma(skb
->dev
, skb
)) {
2619 features
&= ~NETIF_F_SG
;
2625 netdev_features_t
passthru_features_check(struct sk_buff
*skb
,
2626 struct net_device
*dev
,
2627 netdev_features_t features
)
2631 EXPORT_SYMBOL(passthru_features_check
);
2633 static netdev_features_t
dflt_features_check(const struct sk_buff
*skb
,
2634 struct net_device
*dev
,
2635 netdev_features_t features
)
2637 return vlan_features_check(skb
, features
);
2640 netdev_features_t
netif_skb_features(struct sk_buff
*skb
)
2642 struct net_device
*dev
= skb
->dev
;
2643 netdev_features_t features
= dev
->features
;
2644 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
;
2646 if (gso_segs
> dev
->gso_max_segs
|| gso_segs
< dev
->gso_min_segs
)
2647 features
&= ~NETIF_F_GSO_MASK
;
2649 /* If encapsulation offload request, verify we are testing
2650 * hardware encapsulation features instead of standard
2651 * features for the netdev
2653 if (skb
->encapsulation
)
2654 features
&= dev
->hw_enc_features
;
2656 if (skb_vlan_tagged(skb
))
2657 features
= netdev_intersect_features(features
,
2658 dev
->vlan_features
|
2659 NETIF_F_HW_VLAN_CTAG_TX
|
2660 NETIF_F_HW_VLAN_STAG_TX
);
2662 if (dev
->netdev_ops
->ndo_features_check
)
2663 features
&= dev
->netdev_ops
->ndo_features_check(skb
, dev
,
2666 features
&= dflt_features_check(skb
, dev
, features
);
2668 return harmonize_features(skb
, features
);
2670 EXPORT_SYMBOL(netif_skb_features
);
2672 static int xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
2673 struct netdev_queue
*txq
, bool more
)
2678 if (!list_empty(&ptype_all
) || !list_empty(&dev
->ptype_all
))
2679 dev_queue_xmit_nit(skb
, dev
);
2682 trace_net_dev_start_xmit(skb
, dev
);
2683 rc
= netdev_start_xmit(skb
, dev
, txq
, more
);
2684 trace_net_dev_xmit(skb
, rc
, dev
, len
);
2689 struct sk_buff
*dev_hard_start_xmit(struct sk_buff
*first
, struct net_device
*dev
,
2690 struct netdev_queue
*txq
, int *ret
)
2692 struct sk_buff
*skb
= first
;
2693 int rc
= NETDEV_TX_OK
;
2696 struct sk_buff
*next
= skb
->next
;
2699 rc
= xmit_one(skb
, dev
, txq
, next
!= NULL
);
2700 if (unlikely(!dev_xmit_complete(rc
))) {
2706 if (netif_xmit_stopped(txq
) && skb
) {
2707 rc
= NETDEV_TX_BUSY
;
2717 static struct sk_buff
*validate_xmit_vlan(struct sk_buff
*skb
,
2718 netdev_features_t features
)
2720 if (skb_vlan_tag_present(skb
) &&
2721 !vlan_hw_offload_capable(features
, skb
->vlan_proto
))
2722 skb
= __vlan_hwaccel_push_inside(skb
);
2726 static struct sk_buff
*validate_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
)
2728 netdev_features_t features
;
2733 features
= netif_skb_features(skb
);
2734 skb
= validate_xmit_vlan(skb
, features
);
2738 if (netif_needs_gso(skb
, features
)) {
2739 struct sk_buff
*segs
;
2741 segs
= skb_gso_segment(skb
, features
);
2749 if (skb_needs_linearize(skb
, features
) &&
2750 __skb_linearize(skb
))
2753 /* If packet is not checksummed and device does not
2754 * support checksumming for this protocol, complete
2755 * checksumming here.
2757 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2758 if (skb
->encapsulation
)
2759 skb_set_inner_transport_header(skb
,
2760 skb_checksum_start_offset(skb
));
2762 skb_set_transport_header(skb
,
2763 skb_checksum_start_offset(skb
));
2764 if (!(features
& NETIF_F_ALL_CSUM
) &&
2765 skb_checksum_help(skb
))
2778 struct sk_buff
*validate_xmit_skb_list(struct sk_buff
*skb
, struct net_device
*dev
)
2780 struct sk_buff
*next
, *head
= NULL
, *tail
;
2782 for (; skb
!= NULL
; skb
= next
) {
2786 /* in case skb wont be segmented, point to itself */
2789 skb
= validate_xmit_skb(skb
, dev
);
2797 /* If skb was segmented, skb->prev points to
2798 * the last segment. If not, it still contains skb.
2805 static void qdisc_pkt_len_init(struct sk_buff
*skb
)
2807 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
2809 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
2811 /* To get more precise estimation of bytes sent on wire,
2812 * we add to pkt_len the headers size of all segments
2814 if (shinfo
->gso_size
) {
2815 unsigned int hdr_len
;
2816 u16 gso_segs
= shinfo
->gso_segs
;
2818 /* mac layer + network layer */
2819 hdr_len
= skb_transport_header(skb
) - skb_mac_header(skb
);
2821 /* + transport layer */
2822 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)))
2823 hdr_len
+= tcp_hdrlen(skb
);
2825 hdr_len
+= sizeof(struct udphdr
);
2827 if (shinfo
->gso_type
& SKB_GSO_DODGY
)
2828 gso_segs
= DIV_ROUND_UP(skb
->len
- hdr_len
,
2831 qdisc_skb_cb(skb
)->pkt_len
+= (gso_segs
- 1) * hdr_len
;
2835 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
2836 struct net_device
*dev
,
2837 struct netdev_queue
*txq
)
2839 spinlock_t
*root_lock
= qdisc_lock(q
);
2843 qdisc_pkt_len_init(skb
);
2844 qdisc_calculate_pkt_len(skb
, q
);
2846 * Heuristic to force contended enqueues to serialize on a
2847 * separate lock before trying to get qdisc main lock.
2848 * This permits __QDISC___STATE_RUNNING owner to get the lock more
2849 * often and dequeue packets faster.
2851 contended
= qdisc_is_running(q
);
2852 if (unlikely(contended
))
2853 spin_lock(&q
->busylock
);
2855 spin_lock(root_lock
);
2856 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
2859 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
2860 qdisc_run_begin(q
)) {
2862 * This is a work-conserving queue; there are no old skbs
2863 * waiting to be sent out; and the qdisc is not running -
2864 * xmit the skb directly.
2867 qdisc_bstats_update(q
, skb
);
2869 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, true)) {
2870 if (unlikely(contended
)) {
2871 spin_unlock(&q
->busylock
);
2878 rc
= NET_XMIT_SUCCESS
;
2880 rc
= q
->enqueue(skb
, q
) & NET_XMIT_MASK
;
2881 if (qdisc_run_begin(q
)) {
2882 if (unlikely(contended
)) {
2883 spin_unlock(&q
->busylock
);
2889 spin_unlock(root_lock
);
2890 if (unlikely(contended
))
2891 spin_unlock(&q
->busylock
);
2895 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2896 static void skb_update_prio(struct sk_buff
*skb
)
2898 struct netprio_map
*map
= rcu_dereference_bh(skb
->dev
->priomap
);
2900 if (!skb
->priority
&& skb
->sk
&& map
) {
2901 unsigned int prioidx
= skb
->sk
->sk_cgrp_prioidx
;
2903 if (prioidx
< map
->priomap_len
)
2904 skb
->priority
= map
->priomap
[prioidx
];
2908 #define skb_update_prio(skb)
2911 DEFINE_PER_CPU(int, xmit_recursion
);
2912 EXPORT_SYMBOL(xmit_recursion
);
2914 #define RECURSION_LIMIT 10
2917 * dev_loopback_xmit - loop back @skb
2918 * @net: network namespace this loopback is happening in
2919 * @sk: sk needed to be a netfilter okfn
2920 * @skb: buffer to transmit
2922 int dev_loopback_xmit(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
2924 skb_reset_mac_header(skb
);
2925 __skb_pull(skb
, skb_network_offset(skb
));
2926 skb
->pkt_type
= PACKET_LOOPBACK
;
2927 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2928 WARN_ON(!skb_dst(skb
));
2933 EXPORT_SYMBOL(dev_loopback_xmit
);
2935 static inline int get_xps_queue(struct net_device
*dev
, struct sk_buff
*skb
)
2938 struct xps_dev_maps
*dev_maps
;
2939 struct xps_map
*map
;
2940 int queue_index
= -1;
2943 dev_maps
= rcu_dereference(dev
->xps_maps
);
2945 map
= rcu_dereference(
2946 dev_maps
->cpu_map
[skb
->sender_cpu
- 1]);
2949 queue_index
= map
->queues
[0];
2951 queue_index
= map
->queues
[reciprocal_scale(skb_get_hash(skb
),
2953 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
2965 static u16
__netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
)
2967 struct sock
*sk
= skb
->sk
;
2968 int queue_index
= sk_tx_queue_get(sk
);
2970 if (queue_index
< 0 || skb
->ooo_okay
||
2971 queue_index
>= dev
->real_num_tx_queues
) {
2972 int new_index
= get_xps_queue(dev
, skb
);
2974 new_index
= skb_tx_hash(dev
, skb
);
2976 if (queue_index
!= new_index
&& sk
&&
2977 rcu_access_pointer(sk
->sk_dst_cache
))
2978 sk_tx_queue_set(sk
, new_index
);
2980 queue_index
= new_index
;
2986 struct netdev_queue
*netdev_pick_tx(struct net_device
*dev
,
2987 struct sk_buff
*skb
,
2990 int queue_index
= 0;
2993 if (skb
->sender_cpu
== 0)
2994 skb
->sender_cpu
= raw_smp_processor_id() + 1;
2997 if (dev
->real_num_tx_queues
!= 1) {
2998 const struct net_device_ops
*ops
= dev
->netdev_ops
;
2999 if (ops
->ndo_select_queue
)
3000 queue_index
= ops
->ndo_select_queue(dev
, skb
, accel_priv
,
3003 queue_index
= __netdev_pick_tx(dev
, skb
);
3006 queue_index
= netdev_cap_txqueue(dev
, queue_index
);
3009 skb_set_queue_mapping(skb
, queue_index
);
3010 return netdev_get_tx_queue(dev
, queue_index
);
3014 * __dev_queue_xmit - transmit a buffer
3015 * @skb: buffer to transmit
3016 * @accel_priv: private data used for L2 forwarding offload
3018 * Queue a buffer for transmission to a network device. The caller must
3019 * have set the device and priority and built the buffer before calling
3020 * this function. The function can be called from an interrupt.
3022 * A negative errno code is returned on a failure. A success does not
3023 * guarantee the frame will be transmitted as it may be dropped due
3024 * to congestion or traffic shaping.
3026 * -----------------------------------------------------------------------------------
3027 * I notice this method can also return errors from the queue disciplines,
3028 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3031 * Regardless of the return value, the skb is consumed, so it is currently
3032 * difficult to retry a send to this method. (You can bump the ref count
3033 * before sending to hold a reference for retry if you are careful.)
3035 * When calling this method, interrupts MUST be enabled. This is because
3036 * the BH enable code must have IRQs enabled so that it will not deadlock.
3039 static int __dev_queue_xmit(struct sk_buff
*skb
, void *accel_priv
)
3041 struct net_device
*dev
= skb
->dev
;
3042 struct netdev_queue
*txq
;
3046 skb_reset_mac_header(skb
);
3048 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_SCHED_TSTAMP
))
3049 __skb_tstamp_tx(skb
, NULL
, skb
->sk
, SCM_TSTAMP_SCHED
);
3051 /* Disable soft irqs for various locks below. Also
3052 * stops preemption for RCU.
3056 skb_update_prio(skb
);
3058 /* If device/qdisc don't need skb->dst, release it right now while
3059 * its hot in this cpu cache.
3061 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
3066 #ifdef CONFIG_NET_SWITCHDEV
3067 /* Don't forward if offload device already forwarded */
3068 if (skb
->offload_fwd_mark
&&
3069 skb
->offload_fwd_mark
== dev
->offload_fwd_mark
) {
3071 rc
= NET_XMIT_SUCCESS
;
3076 txq
= netdev_pick_tx(dev
, skb
, accel_priv
);
3077 q
= rcu_dereference_bh(txq
->qdisc
);
3079 #ifdef CONFIG_NET_CLS_ACT
3080 skb
->tc_verd
= SET_TC_AT(skb
->tc_verd
, AT_EGRESS
);
3082 trace_net_dev_queue(skb
);
3084 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
3088 /* The device has no queue. Common case for software devices:
3089 loopback, all the sorts of tunnels...
3091 Really, it is unlikely that netif_tx_lock protection is necessary
3092 here. (f.e. loopback and IP tunnels are clean ignoring statistics
3094 However, it is possible, that they rely on protection
3097 Check this and shot the lock. It is not prone from deadlocks.
3098 Either shot noqueue qdisc, it is even simpler 8)
3100 if (dev
->flags
& IFF_UP
) {
3101 int cpu
= smp_processor_id(); /* ok because BHs are off */
3103 if (txq
->xmit_lock_owner
!= cpu
) {
3105 if (__this_cpu_read(xmit_recursion
) > RECURSION_LIMIT
)
3106 goto recursion_alert
;
3108 skb
= validate_xmit_skb(skb
, dev
);
3112 HARD_TX_LOCK(dev
, txq
, cpu
);
3114 if (!netif_xmit_stopped(txq
)) {
3115 __this_cpu_inc(xmit_recursion
);
3116 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &rc
);
3117 __this_cpu_dec(xmit_recursion
);
3118 if (dev_xmit_complete(rc
)) {
3119 HARD_TX_UNLOCK(dev
, txq
);
3123 HARD_TX_UNLOCK(dev
, txq
);
3124 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3127 /* Recursion is detected! It is possible,
3131 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3138 rcu_read_unlock_bh();
3140 atomic_long_inc(&dev
->tx_dropped
);
3141 kfree_skb_list(skb
);
3144 rcu_read_unlock_bh();
3148 int dev_queue_xmit(struct sk_buff
*skb
)
3150 return __dev_queue_xmit(skb
, NULL
);
3152 EXPORT_SYMBOL(dev_queue_xmit
);
3154 int dev_queue_xmit_accel(struct sk_buff
*skb
, void *accel_priv
)
3156 return __dev_queue_xmit(skb
, accel_priv
);
3158 EXPORT_SYMBOL(dev_queue_xmit_accel
);
3161 /*=======================================================================
3163 =======================================================================*/
3165 int netdev_max_backlog __read_mostly
= 1000;
3166 EXPORT_SYMBOL(netdev_max_backlog
);
3168 int netdev_tstamp_prequeue __read_mostly
= 1;
3169 int netdev_budget __read_mostly
= 300;
3170 int weight_p __read_mostly
= 64; /* old backlog weight */
3172 /* Called with irq disabled */
3173 static inline void ____napi_schedule(struct softnet_data
*sd
,
3174 struct napi_struct
*napi
)
3176 list_add_tail(&napi
->poll_list
, &sd
->poll_list
);
3177 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3182 /* One global table that all flow-based protocols share. */
3183 struct rps_sock_flow_table __rcu
*rps_sock_flow_table __read_mostly
;
3184 EXPORT_SYMBOL(rps_sock_flow_table
);
3185 u32 rps_cpu_mask __read_mostly
;
3186 EXPORT_SYMBOL(rps_cpu_mask
);
3188 struct static_key rps_needed __read_mostly
;
3190 static struct rps_dev_flow
*
3191 set_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3192 struct rps_dev_flow
*rflow
, u16 next_cpu
)
3194 if (next_cpu
< nr_cpu_ids
) {
3195 #ifdef CONFIG_RFS_ACCEL
3196 struct netdev_rx_queue
*rxqueue
;
3197 struct rps_dev_flow_table
*flow_table
;
3198 struct rps_dev_flow
*old_rflow
;
3203 /* Should we steer this flow to a different hardware queue? */
3204 if (!skb_rx_queue_recorded(skb
) || !dev
->rx_cpu_rmap
||
3205 !(dev
->features
& NETIF_F_NTUPLE
))
3207 rxq_index
= cpu_rmap_lookup_index(dev
->rx_cpu_rmap
, next_cpu
);
3208 if (rxq_index
== skb_get_rx_queue(skb
))
3211 rxqueue
= dev
->_rx
+ rxq_index
;
3212 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3215 flow_id
= skb_get_hash(skb
) & flow_table
->mask
;
3216 rc
= dev
->netdev_ops
->ndo_rx_flow_steer(dev
, skb
,
3217 rxq_index
, flow_id
);
3221 rflow
= &flow_table
->flows
[flow_id
];
3223 if (old_rflow
->filter
== rflow
->filter
)
3224 old_rflow
->filter
= RPS_NO_FILTER
;
3228 per_cpu(softnet_data
, next_cpu
).input_queue_head
;
3231 rflow
->cpu
= next_cpu
;
3236 * get_rps_cpu is called from netif_receive_skb and returns the target
3237 * CPU from the RPS map of the receiving queue for a given skb.
3238 * rcu_read_lock must be held on entry.
3240 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3241 struct rps_dev_flow
**rflowp
)
3243 const struct rps_sock_flow_table
*sock_flow_table
;
3244 struct netdev_rx_queue
*rxqueue
= dev
->_rx
;
3245 struct rps_dev_flow_table
*flow_table
;
3246 struct rps_map
*map
;
3251 if (skb_rx_queue_recorded(skb
)) {
3252 u16 index
= skb_get_rx_queue(skb
);
3254 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
3255 WARN_ONCE(dev
->real_num_rx_queues
> 1,
3256 "%s received packet on queue %u, but number "
3257 "of RX queues is %u\n",
3258 dev
->name
, index
, dev
->real_num_rx_queues
);
3264 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3266 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3267 map
= rcu_dereference(rxqueue
->rps_map
);
3268 if (!flow_table
&& !map
)
3271 skb_reset_network_header(skb
);
3272 hash
= skb_get_hash(skb
);
3276 sock_flow_table
= rcu_dereference(rps_sock_flow_table
);
3277 if (flow_table
&& sock_flow_table
) {
3278 struct rps_dev_flow
*rflow
;
3282 /* First check into global flow table if there is a match */
3283 ident
= sock_flow_table
->ents
[hash
& sock_flow_table
->mask
];
3284 if ((ident
^ hash
) & ~rps_cpu_mask
)
3287 next_cpu
= ident
& rps_cpu_mask
;
3289 /* OK, now we know there is a match,
3290 * we can look at the local (per receive queue) flow table
3292 rflow
= &flow_table
->flows
[hash
& flow_table
->mask
];
3296 * If the desired CPU (where last recvmsg was done) is
3297 * different from current CPU (one in the rx-queue flow
3298 * table entry), switch if one of the following holds:
3299 * - Current CPU is unset (>= nr_cpu_ids).
3300 * - Current CPU is offline.
3301 * - The current CPU's queue tail has advanced beyond the
3302 * last packet that was enqueued using this table entry.
3303 * This guarantees that all previous packets for the flow
3304 * have been dequeued, thus preserving in order delivery.
3306 if (unlikely(tcpu
!= next_cpu
) &&
3307 (tcpu
>= nr_cpu_ids
|| !cpu_online(tcpu
) ||
3308 ((int)(per_cpu(softnet_data
, tcpu
).input_queue_head
-
3309 rflow
->last_qtail
)) >= 0)) {
3311 rflow
= set_rps_cpu(dev
, skb
, rflow
, next_cpu
);
3314 if (tcpu
< nr_cpu_ids
&& cpu_online(tcpu
)) {
3324 tcpu
= map
->cpus
[reciprocal_scale(hash
, map
->len
)];
3325 if (cpu_online(tcpu
)) {
3335 #ifdef CONFIG_RFS_ACCEL
3338 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3339 * @dev: Device on which the filter was set
3340 * @rxq_index: RX queue index
3341 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3342 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3344 * Drivers that implement ndo_rx_flow_steer() should periodically call
3345 * this function for each installed filter and remove the filters for
3346 * which it returns %true.
3348 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
3349 u32 flow_id
, u16 filter_id
)
3351 struct netdev_rx_queue
*rxqueue
= dev
->_rx
+ rxq_index
;
3352 struct rps_dev_flow_table
*flow_table
;
3353 struct rps_dev_flow
*rflow
;
3358 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3359 if (flow_table
&& flow_id
<= flow_table
->mask
) {
3360 rflow
= &flow_table
->flows
[flow_id
];
3361 cpu
= ACCESS_ONCE(rflow
->cpu
);
3362 if (rflow
->filter
== filter_id
&& cpu
< nr_cpu_ids
&&
3363 ((int)(per_cpu(softnet_data
, cpu
).input_queue_head
-
3364 rflow
->last_qtail
) <
3365 (int)(10 * flow_table
->mask
)))
3371 EXPORT_SYMBOL(rps_may_expire_flow
);
3373 #endif /* CONFIG_RFS_ACCEL */
3375 /* Called from hardirq (IPI) context */
3376 static void rps_trigger_softirq(void *data
)
3378 struct softnet_data
*sd
= data
;
3380 ____napi_schedule(sd
, &sd
->backlog
);
3384 #endif /* CONFIG_RPS */
3387 * Check if this softnet_data structure is another cpu one
3388 * If yes, queue it to our IPI list and return 1
3391 static int rps_ipi_queued(struct softnet_data
*sd
)
3394 struct softnet_data
*mysd
= this_cpu_ptr(&softnet_data
);
3397 sd
->rps_ipi_next
= mysd
->rps_ipi_list
;
3398 mysd
->rps_ipi_list
= sd
;
3400 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3403 #endif /* CONFIG_RPS */
3407 #ifdef CONFIG_NET_FLOW_LIMIT
3408 int netdev_flow_limit_table_len __read_mostly
= (1 << 12);
3411 static bool skb_flow_limit(struct sk_buff
*skb
, unsigned int qlen
)
3413 #ifdef CONFIG_NET_FLOW_LIMIT
3414 struct sd_flow_limit
*fl
;
3415 struct softnet_data
*sd
;
3416 unsigned int old_flow
, new_flow
;
3418 if (qlen
< (netdev_max_backlog
>> 1))
3421 sd
= this_cpu_ptr(&softnet_data
);
3424 fl
= rcu_dereference(sd
->flow_limit
);
3426 new_flow
= skb_get_hash(skb
) & (fl
->num_buckets
- 1);
3427 old_flow
= fl
->history
[fl
->history_head
];
3428 fl
->history
[fl
->history_head
] = new_flow
;
3431 fl
->history_head
&= FLOW_LIMIT_HISTORY
- 1;
3433 if (likely(fl
->buckets
[old_flow
]))
3434 fl
->buckets
[old_flow
]--;
3436 if (++fl
->buckets
[new_flow
] > (FLOW_LIMIT_HISTORY
>> 1)) {
3448 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3449 * queue (may be a remote CPU queue).
3451 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
,
3452 unsigned int *qtail
)
3454 struct softnet_data
*sd
;
3455 unsigned long flags
;
3458 sd
= &per_cpu(softnet_data
, cpu
);
3460 local_irq_save(flags
);
3463 if (!netif_running(skb
->dev
))
3465 qlen
= skb_queue_len(&sd
->input_pkt_queue
);
3466 if (qlen
<= netdev_max_backlog
&& !skb_flow_limit(skb
, qlen
)) {
3469 __skb_queue_tail(&sd
->input_pkt_queue
, skb
);
3470 input_queue_tail_incr_save(sd
, qtail
);
3472 local_irq_restore(flags
);
3473 return NET_RX_SUCCESS
;
3476 /* Schedule NAPI for backlog device
3477 * We can use non atomic operation since we own the queue lock
3479 if (!__test_and_set_bit(NAPI_STATE_SCHED
, &sd
->backlog
.state
)) {
3480 if (!rps_ipi_queued(sd
))
3481 ____napi_schedule(sd
, &sd
->backlog
);
3490 local_irq_restore(flags
);
3492 atomic_long_inc(&skb
->dev
->rx_dropped
);
3497 static int netif_rx_internal(struct sk_buff
*skb
)
3501 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
3503 trace_netif_rx(skb
);
3505 if (static_key_false(&rps_needed
)) {
3506 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
3512 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
3514 cpu
= smp_processor_id();
3516 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
3524 ret
= enqueue_to_backlog(skb
, get_cpu(), &qtail
);
3531 * netif_rx - post buffer to the network code
3532 * @skb: buffer to post
3534 * This function receives a packet from a device driver and queues it for
3535 * the upper (protocol) levels to process. It always succeeds. The buffer
3536 * may be dropped during processing for congestion control or by the
3540 * NET_RX_SUCCESS (no congestion)
3541 * NET_RX_DROP (packet was dropped)
3545 int netif_rx(struct sk_buff
*skb
)
3547 trace_netif_rx_entry(skb
);
3549 return netif_rx_internal(skb
);
3551 EXPORT_SYMBOL(netif_rx
);
3553 int netif_rx_ni(struct sk_buff
*skb
)
3557 trace_netif_rx_ni_entry(skb
);
3560 err
= netif_rx_internal(skb
);
3561 if (local_softirq_pending())
3567 EXPORT_SYMBOL(netif_rx_ni
);
3569 static void net_tx_action(struct softirq_action
*h
)
3571 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
3573 if (sd
->completion_queue
) {
3574 struct sk_buff
*clist
;
3576 local_irq_disable();
3577 clist
= sd
->completion_queue
;
3578 sd
->completion_queue
= NULL
;
3582 struct sk_buff
*skb
= clist
;
3583 clist
= clist
->next
;
3585 WARN_ON(atomic_read(&skb
->users
));
3586 if (likely(get_kfree_skb_cb(skb
)->reason
== SKB_REASON_CONSUMED
))
3587 trace_consume_skb(skb
);
3589 trace_kfree_skb(skb
, net_tx_action
);
3594 if (sd
->output_queue
) {
3597 local_irq_disable();
3598 head
= sd
->output_queue
;
3599 sd
->output_queue
= NULL
;
3600 sd
->output_queue_tailp
= &sd
->output_queue
;
3604 struct Qdisc
*q
= head
;
3605 spinlock_t
*root_lock
;
3607 head
= head
->next_sched
;
3609 root_lock
= qdisc_lock(q
);
3610 if (spin_trylock(root_lock
)) {
3611 smp_mb__before_atomic();
3612 clear_bit(__QDISC_STATE_SCHED
,
3615 spin_unlock(root_lock
);
3617 if (!test_bit(__QDISC_STATE_DEACTIVATED
,
3619 __netif_reschedule(q
);
3621 smp_mb__before_atomic();
3622 clear_bit(__QDISC_STATE_SCHED
,
3630 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3631 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3632 /* This hook is defined here for ATM LANE */
3633 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
3634 unsigned char *addr
) __read_mostly
;
3635 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
3638 static inline struct sk_buff
*handle_ing(struct sk_buff
*skb
,
3639 struct packet_type
**pt_prev
,
3640 int *ret
, struct net_device
*orig_dev
)
3642 #ifdef CONFIG_NET_CLS_ACT
3643 struct tcf_proto
*cl
= rcu_dereference_bh(skb
->dev
->ingress_cl_list
);
3644 struct tcf_result cl_res
;
3646 /* If there's at least one ingress present somewhere (so
3647 * we get here via enabled static key), remaining devices
3648 * that are not configured with an ingress qdisc will bail
3654 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
3658 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
3659 skb
->tc_verd
= SET_TC_AT(skb
->tc_verd
, AT_INGRESS
);
3660 qdisc_bstats_cpu_update(cl
->q
, skb
);
3662 switch (tc_classify(skb
, cl
, &cl_res
, false)) {
3664 case TC_ACT_RECLASSIFY
:
3665 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
3668 qdisc_qstats_cpu_drop(cl
->q
);
3673 case TC_ACT_REDIRECT
:
3674 /* skb_mac_header check was done by cls/act_bpf, so
3675 * we can safely push the L2 header back before
3676 * redirecting to another netdev
3678 __skb_push(skb
, skb
->mac_len
);
3679 skb_do_redirect(skb
);
3684 #endif /* CONFIG_NET_CLS_ACT */
3689 * netdev_rx_handler_register - register receive handler
3690 * @dev: device to register a handler for
3691 * @rx_handler: receive handler to register
3692 * @rx_handler_data: data pointer that is used by rx handler
3694 * Register a receive handler for a device. This handler will then be
3695 * called from __netif_receive_skb. A negative errno code is returned
3698 * The caller must hold the rtnl_mutex.
3700 * For a general description of rx_handler, see enum rx_handler_result.
3702 int netdev_rx_handler_register(struct net_device
*dev
,
3703 rx_handler_func_t
*rx_handler
,
3704 void *rx_handler_data
)
3708 if (dev
->rx_handler
)
3711 /* Note: rx_handler_data must be set before rx_handler */
3712 rcu_assign_pointer(dev
->rx_handler_data
, rx_handler_data
);
3713 rcu_assign_pointer(dev
->rx_handler
, rx_handler
);
3717 EXPORT_SYMBOL_GPL(netdev_rx_handler_register
);
3720 * netdev_rx_handler_unregister - unregister receive handler
3721 * @dev: device to unregister a handler from
3723 * Unregister a receive handler from a device.
3725 * The caller must hold the rtnl_mutex.
3727 void netdev_rx_handler_unregister(struct net_device
*dev
)
3731 RCU_INIT_POINTER(dev
->rx_handler
, NULL
);
3732 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3733 * section has a guarantee to see a non NULL rx_handler_data
3737 RCU_INIT_POINTER(dev
->rx_handler_data
, NULL
);
3739 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister
);
3742 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3743 * the special handling of PFMEMALLOC skbs.
3745 static bool skb_pfmemalloc_protocol(struct sk_buff
*skb
)
3747 switch (skb
->protocol
) {
3748 case htons(ETH_P_ARP
):
3749 case htons(ETH_P_IP
):
3750 case htons(ETH_P_IPV6
):
3751 case htons(ETH_P_8021Q
):
3752 case htons(ETH_P_8021AD
):
3759 static inline int nf_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
,
3760 int *ret
, struct net_device
*orig_dev
)
3762 #ifdef CONFIG_NETFILTER_INGRESS
3763 if (nf_hook_ingress_active(skb
)) {
3765 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
3769 return nf_hook_ingress(skb
);
3771 #endif /* CONFIG_NETFILTER_INGRESS */
3775 static int __netif_receive_skb_core(struct sk_buff
*skb
, bool pfmemalloc
)
3777 struct packet_type
*ptype
, *pt_prev
;
3778 rx_handler_func_t
*rx_handler
;
3779 struct net_device
*orig_dev
;
3780 bool deliver_exact
= false;
3781 int ret
= NET_RX_DROP
;
3784 net_timestamp_check(!netdev_tstamp_prequeue
, skb
);
3786 trace_netif_receive_skb(skb
);
3788 orig_dev
= skb
->dev
;
3790 skb_reset_network_header(skb
);
3791 if (!skb_transport_header_was_set(skb
))
3792 skb_reset_transport_header(skb
);
3793 skb_reset_mac_len(skb
);
3798 skb
->skb_iif
= skb
->dev
->ifindex
;
3800 __this_cpu_inc(softnet_data
.processed
);
3802 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
) ||
3803 skb
->protocol
== cpu_to_be16(ETH_P_8021AD
)) {
3804 skb
= skb_vlan_untag(skb
);
3809 #ifdef CONFIG_NET_CLS_ACT
3810 if (skb
->tc_verd
& TC_NCLS
) {
3811 skb
->tc_verd
= CLR_TC_NCLS(skb
->tc_verd
);
3819 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
3821 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3825 list_for_each_entry_rcu(ptype
, &skb
->dev
->ptype_all
, list
) {
3827 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3832 #ifdef CONFIG_NET_INGRESS
3833 if (static_key_false(&ingress_needed
)) {
3834 skb
= handle_ing(skb
, &pt_prev
, &ret
, orig_dev
);
3838 if (nf_ingress(skb
, &pt_prev
, &ret
, orig_dev
) < 0)
3842 #ifdef CONFIG_NET_CLS_ACT
3846 if (pfmemalloc
&& !skb_pfmemalloc_protocol(skb
))
3849 if (skb_vlan_tag_present(skb
)) {
3851 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3854 if (vlan_do_receive(&skb
))
3856 else if (unlikely(!skb
))
3860 rx_handler
= rcu_dereference(skb
->dev
->rx_handler
);
3863 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3866 switch (rx_handler(&skb
)) {
3867 case RX_HANDLER_CONSUMED
:
3868 ret
= NET_RX_SUCCESS
;
3870 case RX_HANDLER_ANOTHER
:
3872 case RX_HANDLER_EXACT
:
3873 deliver_exact
= true;
3874 case RX_HANDLER_PASS
:
3881 if (unlikely(skb_vlan_tag_present(skb
))) {
3882 if (skb_vlan_tag_get_id(skb
))
3883 skb
->pkt_type
= PACKET_OTHERHOST
;
3884 /* Note: we might in the future use prio bits
3885 * and set skb->priority like in vlan_do_receive()
3886 * For the time being, just ignore Priority Code Point
3891 type
= skb
->protocol
;
3893 /* deliver only exact match when indicated */
3894 if (likely(!deliver_exact
)) {
3895 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
3896 &ptype_base
[ntohs(type
) &
3900 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
3901 &orig_dev
->ptype_specific
);
3903 if (unlikely(skb
->dev
!= orig_dev
)) {
3904 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
3905 &skb
->dev
->ptype_specific
);
3909 if (unlikely(skb_orphan_frags(skb
, GFP_ATOMIC
)))
3912 ret
= pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
3915 atomic_long_inc(&skb
->dev
->rx_dropped
);
3917 /* Jamal, now you will not able to escape explaining
3918 * me how you were going to use this. :-)
3927 static int __netif_receive_skb(struct sk_buff
*skb
)
3931 if (sk_memalloc_socks() && skb_pfmemalloc(skb
)) {
3932 unsigned long pflags
= current
->flags
;
3935 * PFMEMALLOC skbs are special, they should
3936 * - be delivered to SOCK_MEMALLOC sockets only
3937 * - stay away from userspace
3938 * - have bounded memory usage
3940 * Use PF_MEMALLOC as this saves us from propagating the allocation
3941 * context down to all allocation sites.
3943 current
->flags
|= PF_MEMALLOC
;
3944 ret
= __netif_receive_skb_core(skb
, true);
3945 tsk_restore_flags(current
, pflags
, PF_MEMALLOC
);
3947 ret
= __netif_receive_skb_core(skb
, false);
3952 static int netif_receive_skb_internal(struct sk_buff
*skb
)
3956 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
3958 if (skb_defer_rx_timestamp(skb
))
3959 return NET_RX_SUCCESS
;
3964 if (static_key_false(&rps_needed
)) {
3965 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
3966 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
3969 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
3975 ret
= __netif_receive_skb(skb
);
3981 * netif_receive_skb - process receive buffer from network
3982 * @skb: buffer to process
3984 * netif_receive_skb() is the main receive data processing function.
3985 * It always succeeds. The buffer may be dropped during processing
3986 * for congestion control or by the protocol layers.
3988 * This function may only be called from softirq context and interrupts
3989 * should be enabled.
3991 * Return values (usually ignored):
3992 * NET_RX_SUCCESS: no congestion
3993 * NET_RX_DROP: packet was dropped
3995 int netif_receive_skb(struct sk_buff
*skb
)
3997 trace_netif_receive_skb_entry(skb
);
3999 return netif_receive_skb_internal(skb
);
4001 EXPORT_SYMBOL(netif_receive_skb
);
4003 /* Network device is going away, flush any packets still pending
4004 * Called with irqs disabled.
4006 static void flush_backlog(void *arg
)
4008 struct net_device
*dev
= arg
;
4009 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
4010 struct sk_buff
*skb
, *tmp
;
4013 skb_queue_walk_safe(&sd
->input_pkt_queue
, skb
, tmp
) {
4014 if (skb
->dev
== dev
) {
4015 __skb_unlink(skb
, &sd
->input_pkt_queue
);
4017 input_queue_head_incr(sd
);
4022 skb_queue_walk_safe(&sd
->process_queue
, skb
, tmp
) {
4023 if (skb
->dev
== dev
) {
4024 __skb_unlink(skb
, &sd
->process_queue
);
4026 input_queue_head_incr(sd
);
4031 static int napi_gro_complete(struct sk_buff
*skb
)
4033 struct packet_offload
*ptype
;
4034 __be16 type
= skb
->protocol
;
4035 struct list_head
*head
= &offload_base
;
4038 BUILD_BUG_ON(sizeof(struct napi_gro_cb
) > sizeof(skb
->cb
));
4040 if (NAPI_GRO_CB(skb
)->count
== 1) {
4041 skb_shinfo(skb
)->gso_size
= 0;
4046 list_for_each_entry_rcu(ptype
, head
, list
) {
4047 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
4050 err
= ptype
->callbacks
.gro_complete(skb
, 0);
4056 WARN_ON(&ptype
->list
== head
);
4058 return NET_RX_SUCCESS
;
4062 return netif_receive_skb_internal(skb
);
4065 /* napi->gro_list contains packets ordered by age.
4066 * youngest packets at the head of it.
4067 * Complete skbs in reverse order to reduce latencies.
4069 void napi_gro_flush(struct napi_struct
*napi
, bool flush_old
)
4071 struct sk_buff
*skb
, *prev
= NULL
;
4073 /* scan list and build reverse chain */
4074 for (skb
= napi
->gro_list
; skb
!= NULL
; skb
= skb
->next
) {
4079 for (skb
= prev
; skb
; skb
= prev
) {
4082 if (flush_old
&& NAPI_GRO_CB(skb
)->age
== jiffies
)
4086 napi_gro_complete(skb
);
4090 napi
->gro_list
= NULL
;
4092 EXPORT_SYMBOL(napi_gro_flush
);
4094 static void gro_list_prepare(struct napi_struct
*napi
, struct sk_buff
*skb
)
4097 unsigned int maclen
= skb
->dev
->hard_header_len
;
4098 u32 hash
= skb_get_hash_raw(skb
);
4100 for (p
= napi
->gro_list
; p
; p
= p
->next
) {
4101 unsigned long diffs
;
4103 NAPI_GRO_CB(p
)->flush
= 0;
4105 if (hash
!= skb_get_hash_raw(p
)) {
4106 NAPI_GRO_CB(p
)->same_flow
= 0;
4110 diffs
= (unsigned long)p
->dev
^ (unsigned long)skb
->dev
;
4111 diffs
|= p
->vlan_tci
^ skb
->vlan_tci
;
4112 if (maclen
== ETH_HLEN
)
4113 diffs
|= compare_ether_header(skb_mac_header(p
),
4114 skb_mac_header(skb
));
4116 diffs
= memcmp(skb_mac_header(p
),
4117 skb_mac_header(skb
),
4119 NAPI_GRO_CB(p
)->same_flow
= !diffs
;
4123 static void skb_gro_reset_offset(struct sk_buff
*skb
)
4125 const struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
4126 const skb_frag_t
*frag0
= &pinfo
->frags
[0];
4128 NAPI_GRO_CB(skb
)->data_offset
= 0;
4129 NAPI_GRO_CB(skb
)->frag0
= NULL
;
4130 NAPI_GRO_CB(skb
)->frag0_len
= 0;
4132 if (skb_mac_header(skb
) == skb_tail_pointer(skb
) &&
4134 !PageHighMem(skb_frag_page(frag0
))) {
4135 NAPI_GRO_CB(skb
)->frag0
= skb_frag_address(frag0
);
4136 NAPI_GRO_CB(skb
)->frag0_len
= skb_frag_size(frag0
);
4140 static void gro_pull_from_frag0(struct sk_buff
*skb
, int grow
)
4142 struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
4144 BUG_ON(skb
->end
- skb
->tail
< grow
);
4146 memcpy(skb_tail_pointer(skb
), NAPI_GRO_CB(skb
)->frag0
, grow
);
4148 skb
->data_len
-= grow
;
4151 pinfo
->frags
[0].page_offset
+= grow
;
4152 skb_frag_size_sub(&pinfo
->frags
[0], grow
);
4154 if (unlikely(!skb_frag_size(&pinfo
->frags
[0]))) {
4155 skb_frag_unref(skb
, 0);
4156 memmove(pinfo
->frags
, pinfo
->frags
+ 1,
4157 --pinfo
->nr_frags
* sizeof(pinfo
->frags
[0]));
4161 static enum gro_result
dev_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
4163 struct sk_buff
**pp
= NULL
;
4164 struct packet_offload
*ptype
;
4165 __be16 type
= skb
->protocol
;
4166 struct list_head
*head
= &offload_base
;
4168 enum gro_result ret
;
4171 if (!(skb
->dev
->features
& NETIF_F_GRO
))
4174 if (skb_is_gso(skb
) || skb_has_frag_list(skb
) || skb
->csum_bad
)
4177 gro_list_prepare(napi
, skb
);
4180 list_for_each_entry_rcu(ptype
, head
, list
) {
4181 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
4184 skb_set_network_header(skb
, skb_gro_offset(skb
));
4185 skb_reset_mac_len(skb
);
4186 NAPI_GRO_CB(skb
)->same_flow
= 0;
4187 NAPI_GRO_CB(skb
)->flush
= 0;
4188 NAPI_GRO_CB(skb
)->free
= 0;
4189 NAPI_GRO_CB(skb
)->udp_mark
= 0;
4190 NAPI_GRO_CB(skb
)->gro_remcsum_start
= 0;
4192 /* Setup for GRO checksum validation */
4193 switch (skb
->ip_summed
) {
4194 case CHECKSUM_COMPLETE
:
4195 NAPI_GRO_CB(skb
)->csum
= skb
->csum
;
4196 NAPI_GRO_CB(skb
)->csum_valid
= 1;
4197 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
4199 case CHECKSUM_UNNECESSARY
:
4200 NAPI_GRO_CB(skb
)->csum_cnt
= skb
->csum_level
+ 1;
4201 NAPI_GRO_CB(skb
)->csum_valid
= 0;
4204 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
4205 NAPI_GRO_CB(skb
)->csum_valid
= 0;
4208 pp
= ptype
->callbacks
.gro_receive(&napi
->gro_list
, skb
);
4213 if (&ptype
->list
== head
)
4216 same_flow
= NAPI_GRO_CB(skb
)->same_flow
;
4217 ret
= NAPI_GRO_CB(skb
)->free
? GRO_MERGED_FREE
: GRO_MERGED
;
4220 struct sk_buff
*nskb
= *pp
;
4224 napi_gro_complete(nskb
);
4231 if (NAPI_GRO_CB(skb
)->flush
)
4234 if (unlikely(napi
->gro_count
>= MAX_GRO_SKBS
)) {
4235 struct sk_buff
*nskb
= napi
->gro_list
;
4237 /* locate the end of the list to select the 'oldest' flow */
4238 while (nskb
->next
) {
4244 napi_gro_complete(nskb
);
4248 NAPI_GRO_CB(skb
)->count
= 1;
4249 NAPI_GRO_CB(skb
)->age
= jiffies
;
4250 NAPI_GRO_CB(skb
)->last
= skb
;
4251 skb_shinfo(skb
)->gso_size
= skb_gro_len(skb
);
4252 skb
->next
= napi
->gro_list
;
4253 napi
->gro_list
= skb
;
4257 grow
= skb_gro_offset(skb
) - skb_headlen(skb
);
4259 gro_pull_from_frag0(skb
, grow
);
4268 struct packet_offload
*gro_find_receive_by_type(__be16 type
)
4270 struct list_head
*offload_head
= &offload_base
;
4271 struct packet_offload
*ptype
;
4273 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
4274 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
4280 EXPORT_SYMBOL(gro_find_receive_by_type
);
4282 struct packet_offload
*gro_find_complete_by_type(__be16 type
)
4284 struct list_head
*offload_head
= &offload_base
;
4285 struct packet_offload
*ptype
;
4287 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
4288 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
4294 EXPORT_SYMBOL(gro_find_complete_by_type
);
4296 static gro_result_t
napi_skb_finish(gro_result_t ret
, struct sk_buff
*skb
)
4300 if (netif_receive_skb_internal(skb
))
4308 case GRO_MERGED_FREE
:
4309 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
4310 kmem_cache_free(skbuff_head_cache
, skb
);
4323 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
4325 trace_napi_gro_receive_entry(skb
);
4327 skb_gro_reset_offset(skb
);
4329 return napi_skb_finish(dev_gro_receive(napi
, skb
), skb
);
4331 EXPORT_SYMBOL(napi_gro_receive
);
4333 static void napi_reuse_skb(struct napi_struct
*napi
, struct sk_buff
*skb
)
4335 if (unlikely(skb
->pfmemalloc
)) {
4339 __skb_pull(skb
, skb_headlen(skb
));
4340 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4341 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
- skb_headroom(skb
));
4343 skb
->dev
= napi
->dev
;
4345 skb
->encapsulation
= 0;
4346 skb_shinfo(skb
)->gso_type
= 0;
4347 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
4352 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
)
4354 struct sk_buff
*skb
= napi
->skb
;
4357 skb
= napi_alloc_skb(napi
, GRO_MAX_HEAD
);
4362 EXPORT_SYMBOL(napi_get_frags
);
4364 static gro_result_t
napi_frags_finish(struct napi_struct
*napi
,
4365 struct sk_buff
*skb
,
4371 __skb_push(skb
, ETH_HLEN
);
4372 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
4373 if (ret
== GRO_NORMAL
&& netif_receive_skb_internal(skb
))
4378 case GRO_MERGED_FREE
:
4379 napi_reuse_skb(napi
, skb
);
4389 /* Upper GRO stack assumes network header starts at gro_offset=0
4390 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4391 * We copy ethernet header into skb->data to have a common layout.
4393 static struct sk_buff
*napi_frags_skb(struct napi_struct
*napi
)
4395 struct sk_buff
*skb
= napi
->skb
;
4396 const struct ethhdr
*eth
;
4397 unsigned int hlen
= sizeof(*eth
);
4401 skb_reset_mac_header(skb
);
4402 skb_gro_reset_offset(skb
);
4404 eth
= skb_gro_header_fast(skb
, 0);
4405 if (unlikely(skb_gro_header_hard(skb
, hlen
))) {
4406 eth
= skb_gro_header_slow(skb
, hlen
, 0);
4407 if (unlikely(!eth
)) {
4408 napi_reuse_skb(napi
, skb
);
4412 gro_pull_from_frag0(skb
, hlen
);
4413 NAPI_GRO_CB(skb
)->frag0
+= hlen
;
4414 NAPI_GRO_CB(skb
)->frag0_len
-= hlen
;
4416 __skb_pull(skb
, hlen
);
4419 * This works because the only protocols we care about don't require
4421 * We'll fix it up properly in napi_frags_finish()
4423 skb
->protocol
= eth
->h_proto
;
4428 gro_result_t
napi_gro_frags(struct napi_struct
*napi
)
4430 struct sk_buff
*skb
= napi_frags_skb(napi
);
4435 trace_napi_gro_frags_entry(skb
);
4437 return napi_frags_finish(napi
, skb
, dev_gro_receive(napi
, skb
));
4439 EXPORT_SYMBOL(napi_gro_frags
);
4441 /* Compute the checksum from gro_offset and return the folded value
4442 * after adding in any pseudo checksum.
4444 __sum16
__skb_gro_checksum_complete(struct sk_buff
*skb
)
4449 wsum
= skb_checksum(skb
, skb_gro_offset(skb
), skb_gro_len(skb
), 0);
4451 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4452 sum
= csum_fold(csum_add(NAPI_GRO_CB(skb
)->csum
, wsum
));
4454 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
4455 !skb
->csum_complete_sw
)
4456 netdev_rx_csum_fault(skb
->dev
);
4459 NAPI_GRO_CB(skb
)->csum
= wsum
;
4460 NAPI_GRO_CB(skb
)->csum_valid
= 1;
4464 EXPORT_SYMBOL(__skb_gro_checksum_complete
);
4467 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4468 * Note: called with local irq disabled, but exits with local irq enabled.
4470 static void net_rps_action_and_irq_enable(struct softnet_data
*sd
)
4473 struct softnet_data
*remsd
= sd
->rps_ipi_list
;
4476 sd
->rps_ipi_list
= NULL
;
4480 /* Send pending IPI's to kick RPS processing on remote cpus. */
4482 struct softnet_data
*next
= remsd
->rps_ipi_next
;
4484 if (cpu_online(remsd
->cpu
))
4485 smp_call_function_single_async(remsd
->cpu
,
4494 static bool sd_has_rps_ipi_waiting(struct softnet_data
*sd
)
4497 return sd
->rps_ipi_list
!= NULL
;
4503 static int process_backlog(struct napi_struct
*napi
, int quota
)
4506 struct softnet_data
*sd
= container_of(napi
, struct softnet_data
, backlog
);
4508 /* Check if we have pending ipi, its better to send them now,
4509 * not waiting net_rx_action() end.
4511 if (sd_has_rps_ipi_waiting(sd
)) {
4512 local_irq_disable();
4513 net_rps_action_and_irq_enable(sd
);
4516 napi
->weight
= weight_p
;
4517 local_irq_disable();
4519 struct sk_buff
*skb
;
4521 while ((skb
= __skb_dequeue(&sd
->process_queue
))) {
4524 __netif_receive_skb(skb
);
4526 local_irq_disable();
4527 input_queue_head_incr(sd
);
4528 if (++work
>= quota
) {
4535 if (skb_queue_empty(&sd
->input_pkt_queue
)) {
4537 * Inline a custom version of __napi_complete().
4538 * only current cpu owns and manipulates this napi,
4539 * and NAPI_STATE_SCHED is the only possible flag set
4541 * We can use a plain write instead of clear_bit(),
4542 * and we dont need an smp_mb() memory barrier.
4550 skb_queue_splice_tail_init(&sd
->input_pkt_queue
,
4551 &sd
->process_queue
);
4560 * __napi_schedule - schedule for receive
4561 * @n: entry to schedule
4563 * The entry's receive function will be scheduled to run.
4564 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
4566 void __napi_schedule(struct napi_struct
*n
)
4568 unsigned long flags
;
4570 local_irq_save(flags
);
4571 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
4572 local_irq_restore(flags
);
4574 EXPORT_SYMBOL(__napi_schedule
);
4577 * __napi_schedule_irqoff - schedule for receive
4578 * @n: entry to schedule
4580 * Variant of __napi_schedule() assuming hard irqs are masked
4582 void __napi_schedule_irqoff(struct napi_struct
*n
)
4584 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
4586 EXPORT_SYMBOL(__napi_schedule_irqoff
);
4588 void __napi_complete(struct napi_struct
*n
)
4590 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &n
->state
));
4592 list_del_init(&n
->poll_list
);
4593 smp_mb__before_atomic();
4594 clear_bit(NAPI_STATE_SCHED
, &n
->state
);
4596 EXPORT_SYMBOL(__napi_complete
);
4598 void napi_complete_done(struct napi_struct
*n
, int work_done
)
4600 unsigned long flags
;
4603 * don't let napi dequeue from the cpu poll list
4604 * just in case its running on a different cpu
4606 if (unlikely(test_bit(NAPI_STATE_NPSVC
, &n
->state
)))
4610 unsigned long timeout
= 0;
4613 timeout
= n
->dev
->gro_flush_timeout
;
4616 hrtimer_start(&n
->timer
, ns_to_ktime(timeout
),
4617 HRTIMER_MODE_REL_PINNED
);
4619 napi_gro_flush(n
, false);
4621 if (likely(list_empty(&n
->poll_list
))) {
4622 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED
, &n
->state
));
4624 /* If n->poll_list is not empty, we need to mask irqs */
4625 local_irq_save(flags
);
4627 local_irq_restore(flags
);
4630 EXPORT_SYMBOL(napi_complete_done
);
4632 /* must be called under rcu_read_lock(), as we dont take a reference */
4633 struct napi_struct
*napi_by_id(unsigned int napi_id
)
4635 unsigned int hash
= napi_id
% HASH_SIZE(napi_hash
);
4636 struct napi_struct
*napi
;
4638 hlist_for_each_entry_rcu(napi
, &napi_hash
[hash
], napi_hash_node
)
4639 if (napi
->napi_id
== napi_id
)
4644 EXPORT_SYMBOL_GPL(napi_by_id
);
4646 void napi_hash_add(struct napi_struct
*napi
)
4648 if (!test_and_set_bit(NAPI_STATE_HASHED
, &napi
->state
)) {
4650 spin_lock(&napi_hash_lock
);
4652 /* 0 is not a valid id, we also skip an id that is taken
4653 * we expect both events to be extremely rare
4656 while (!napi
->napi_id
) {
4657 napi
->napi_id
= ++napi_gen_id
;
4658 if (napi_by_id(napi
->napi_id
))
4662 hlist_add_head_rcu(&napi
->napi_hash_node
,
4663 &napi_hash
[napi
->napi_id
% HASH_SIZE(napi_hash
)]);
4665 spin_unlock(&napi_hash_lock
);
4668 EXPORT_SYMBOL_GPL(napi_hash_add
);
4670 /* Warning : caller is responsible to make sure rcu grace period
4671 * is respected before freeing memory containing @napi
4673 void napi_hash_del(struct napi_struct
*napi
)
4675 spin_lock(&napi_hash_lock
);
4677 if (test_and_clear_bit(NAPI_STATE_HASHED
, &napi
->state
))
4678 hlist_del_rcu(&napi
->napi_hash_node
);
4680 spin_unlock(&napi_hash_lock
);
4682 EXPORT_SYMBOL_GPL(napi_hash_del
);
4684 static enum hrtimer_restart
napi_watchdog(struct hrtimer
*timer
)
4686 struct napi_struct
*napi
;
4688 napi
= container_of(timer
, struct napi_struct
, timer
);
4690 napi_schedule(napi
);
4692 return HRTIMER_NORESTART
;
4695 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
4696 int (*poll
)(struct napi_struct
*, int), int weight
)
4698 INIT_LIST_HEAD(&napi
->poll_list
);
4699 hrtimer_init(&napi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
4700 napi
->timer
.function
= napi_watchdog
;
4701 napi
->gro_count
= 0;
4702 napi
->gro_list
= NULL
;
4705 if (weight
> NAPI_POLL_WEIGHT
)
4706 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4708 napi
->weight
= weight
;
4709 list_add(&napi
->dev_list
, &dev
->napi_list
);
4711 #ifdef CONFIG_NETPOLL
4712 spin_lock_init(&napi
->poll_lock
);
4713 napi
->poll_owner
= -1;
4715 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
4717 EXPORT_SYMBOL(netif_napi_add
);
4719 void napi_disable(struct napi_struct
*n
)
4722 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
4724 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
4726 while (test_and_set_bit(NAPI_STATE_NPSVC
, &n
->state
))
4729 hrtimer_cancel(&n
->timer
);
4731 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
4733 EXPORT_SYMBOL(napi_disable
);
4735 void netif_napi_del(struct napi_struct
*napi
)
4737 list_del_init(&napi
->dev_list
);
4738 napi_free_frags(napi
);
4740 kfree_skb_list(napi
->gro_list
);
4741 napi
->gro_list
= NULL
;
4742 napi
->gro_count
= 0;
4744 EXPORT_SYMBOL(netif_napi_del
);
4746 static int napi_poll(struct napi_struct
*n
, struct list_head
*repoll
)
4751 list_del_init(&n
->poll_list
);
4753 have
= netpoll_poll_lock(n
);
4757 /* This NAPI_STATE_SCHED test is for avoiding a race
4758 * with netpoll's poll_napi(). Only the entity which
4759 * obtains the lock and sees NAPI_STATE_SCHED set will
4760 * actually make the ->poll() call. Therefore we avoid
4761 * accidentally calling ->poll() when NAPI is not scheduled.
4764 if (test_bit(NAPI_STATE_SCHED
, &n
->state
)) {
4765 work
= n
->poll(n
, weight
);
4769 WARN_ON_ONCE(work
> weight
);
4771 if (likely(work
< weight
))
4774 /* Drivers must not modify the NAPI state if they
4775 * consume the entire weight. In such cases this code
4776 * still "owns" the NAPI instance and therefore can
4777 * move the instance around on the list at-will.
4779 if (unlikely(napi_disable_pending(n
))) {
4785 /* flush too old packets
4786 * If HZ < 1000, flush all packets.
4788 napi_gro_flush(n
, HZ
>= 1000);
4791 /* Some drivers may have called napi_schedule
4792 * prior to exhausting their budget.
4794 if (unlikely(!list_empty(&n
->poll_list
))) {
4795 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4796 n
->dev
? n
->dev
->name
: "backlog");
4800 list_add_tail(&n
->poll_list
, repoll
);
4803 netpoll_poll_unlock(have
);
4808 static void net_rx_action(struct softirq_action
*h
)
4810 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
4811 unsigned long time_limit
= jiffies
+ 2;
4812 int budget
= netdev_budget
;
4816 local_irq_disable();
4817 list_splice_init(&sd
->poll_list
, &list
);
4821 struct napi_struct
*n
;
4823 if (list_empty(&list
)) {
4824 if (!sd_has_rps_ipi_waiting(sd
) && list_empty(&repoll
))
4829 n
= list_first_entry(&list
, struct napi_struct
, poll_list
);
4830 budget
-= napi_poll(n
, &repoll
);
4832 /* If softirq window is exhausted then punt.
4833 * Allow this to run for 2 jiffies since which will allow
4834 * an average latency of 1.5/HZ.
4836 if (unlikely(budget
<= 0 ||
4837 time_after_eq(jiffies
, time_limit
))) {
4843 local_irq_disable();
4845 list_splice_tail_init(&sd
->poll_list
, &list
);
4846 list_splice_tail(&repoll
, &list
);
4847 list_splice(&list
, &sd
->poll_list
);
4848 if (!list_empty(&sd
->poll_list
))
4849 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4851 net_rps_action_and_irq_enable(sd
);
4854 struct netdev_adjacent
{
4855 struct net_device
*dev
;
4857 /* upper master flag, there can only be one master device per list */
4860 /* counter for the number of times this device was added to us */
4863 /* private field for the users */
4866 struct list_head list
;
4867 struct rcu_head rcu
;
4870 static struct netdev_adjacent
*__netdev_find_adj(struct net_device
*adj_dev
,
4871 struct list_head
*adj_list
)
4873 struct netdev_adjacent
*adj
;
4875 list_for_each_entry(adj
, adj_list
, list
) {
4876 if (adj
->dev
== adj_dev
)
4883 * netdev_has_upper_dev - Check if device is linked to an upper device
4885 * @upper_dev: upper device to check
4887 * Find out if a device is linked to specified upper device and return true
4888 * in case it is. Note that this checks only immediate upper device,
4889 * not through a complete stack of devices. The caller must hold the RTNL lock.
4891 bool netdev_has_upper_dev(struct net_device
*dev
,
4892 struct net_device
*upper_dev
)
4896 return __netdev_find_adj(upper_dev
, &dev
->all_adj_list
.upper
);
4898 EXPORT_SYMBOL(netdev_has_upper_dev
);
4901 * netdev_has_any_upper_dev - Check if device is linked to some device
4904 * Find out if a device is linked to an upper device and return true in case
4905 * it is. The caller must hold the RTNL lock.
4907 static bool netdev_has_any_upper_dev(struct net_device
*dev
)
4911 return !list_empty(&dev
->all_adj_list
.upper
);
4915 * netdev_master_upper_dev_get - Get master upper device
4918 * Find a master upper device and return pointer to it or NULL in case
4919 * it's not there. The caller must hold the RTNL lock.
4921 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
)
4923 struct netdev_adjacent
*upper
;
4927 if (list_empty(&dev
->adj_list
.upper
))
4930 upper
= list_first_entry(&dev
->adj_list
.upper
,
4931 struct netdev_adjacent
, list
);
4932 if (likely(upper
->master
))
4936 EXPORT_SYMBOL(netdev_master_upper_dev_get
);
4938 void *netdev_adjacent_get_private(struct list_head
*adj_list
)
4940 struct netdev_adjacent
*adj
;
4942 adj
= list_entry(adj_list
, struct netdev_adjacent
, list
);
4944 return adj
->private;
4946 EXPORT_SYMBOL(netdev_adjacent_get_private
);
4949 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4951 * @iter: list_head ** of the current position
4953 * Gets the next device from the dev's upper list, starting from iter
4954 * position. The caller must hold RCU read lock.
4956 struct net_device
*netdev_upper_get_next_dev_rcu(struct net_device
*dev
,
4957 struct list_head
**iter
)
4959 struct netdev_adjacent
*upper
;
4961 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4963 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
4965 if (&upper
->list
== &dev
->adj_list
.upper
)
4968 *iter
= &upper
->list
;
4972 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu
);
4975 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4977 * @iter: list_head ** of the current position
4979 * Gets the next device from the dev's upper list, starting from iter
4980 * position. The caller must hold RCU read lock.
4982 struct net_device
*netdev_all_upper_get_next_dev_rcu(struct net_device
*dev
,
4983 struct list_head
**iter
)
4985 struct netdev_adjacent
*upper
;
4987 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4989 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
4991 if (&upper
->list
== &dev
->all_adj_list
.upper
)
4994 *iter
= &upper
->list
;
4998 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu
);
5001 * netdev_lower_get_next_private - Get the next ->private from the
5002 * lower neighbour list
5004 * @iter: list_head ** of the current position
5006 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5007 * list, starting from iter position. The caller must hold either hold the
5008 * RTNL lock or its own locking that guarantees that the neighbour lower
5009 * list will remain unchanged.
5011 void *netdev_lower_get_next_private(struct net_device
*dev
,
5012 struct list_head
**iter
)
5014 struct netdev_adjacent
*lower
;
5016 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
5018 if (&lower
->list
== &dev
->adj_list
.lower
)
5021 *iter
= lower
->list
.next
;
5023 return lower
->private;
5025 EXPORT_SYMBOL(netdev_lower_get_next_private
);
5028 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5029 * lower neighbour list, RCU
5032 * @iter: list_head ** of the current position
5034 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5035 * list, starting from iter position. The caller must hold RCU read lock.
5037 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
5038 struct list_head
**iter
)
5040 struct netdev_adjacent
*lower
;
5042 WARN_ON_ONCE(!rcu_read_lock_held());
5044 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
5046 if (&lower
->list
== &dev
->adj_list
.lower
)
5049 *iter
= &lower
->list
;
5051 return lower
->private;
5053 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu
);
5056 * netdev_lower_get_next - Get the next device from the lower neighbour
5059 * @iter: list_head ** of the current position
5061 * Gets the next netdev_adjacent from the dev's lower neighbour
5062 * list, starting from iter position. The caller must hold RTNL lock or
5063 * its own locking that guarantees that the neighbour lower
5064 * list will remain unchanged.
5066 void *netdev_lower_get_next(struct net_device
*dev
, struct list_head
**iter
)
5068 struct netdev_adjacent
*lower
;
5070 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
5072 if (&lower
->list
== &dev
->adj_list
.lower
)
5075 *iter
= &lower
->list
;
5079 EXPORT_SYMBOL(netdev_lower_get_next
);
5082 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5083 * lower neighbour list, RCU
5087 * Gets the first netdev_adjacent->private from the dev's lower neighbour
5088 * list. The caller must hold RCU read lock.
5090 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
)
5092 struct netdev_adjacent
*lower
;
5094 lower
= list_first_or_null_rcu(&dev
->adj_list
.lower
,
5095 struct netdev_adjacent
, list
);
5097 return lower
->private;
5100 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu
);
5103 * netdev_master_upper_dev_get_rcu - Get master upper device
5106 * Find a master upper device and return pointer to it or NULL in case
5107 * it's not there. The caller must hold the RCU read lock.
5109 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
)
5111 struct netdev_adjacent
*upper
;
5113 upper
= list_first_or_null_rcu(&dev
->adj_list
.upper
,
5114 struct netdev_adjacent
, list
);
5115 if (upper
&& likely(upper
->master
))
5119 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu
);
5121 static int netdev_adjacent_sysfs_add(struct net_device
*dev
,
5122 struct net_device
*adj_dev
,
5123 struct list_head
*dev_list
)
5125 char linkname
[IFNAMSIZ
+7];
5126 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
5127 "upper_%s" : "lower_%s", adj_dev
->name
);
5128 return sysfs_create_link(&(dev
->dev
.kobj
), &(adj_dev
->dev
.kobj
),
5131 static void netdev_adjacent_sysfs_del(struct net_device
*dev
,
5133 struct list_head
*dev_list
)
5135 char linkname
[IFNAMSIZ
+7];
5136 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
5137 "upper_%s" : "lower_%s", name
);
5138 sysfs_remove_link(&(dev
->dev
.kobj
), linkname
);
5141 static inline bool netdev_adjacent_is_neigh_list(struct net_device
*dev
,
5142 struct net_device
*adj_dev
,
5143 struct list_head
*dev_list
)
5145 return (dev_list
== &dev
->adj_list
.upper
||
5146 dev_list
== &dev
->adj_list
.lower
) &&
5147 net_eq(dev_net(dev
), dev_net(adj_dev
));
5150 static int __netdev_adjacent_dev_insert(struct net_device
*dev
,
5151 struct net_device
*adj_dev
,
5152 struct list_head
*dev_list
,
5153 void *private, bool master
)
5155 struct netdev_adjacent
*adj
;
5158 adj
= __netdev_find_adj(adj_dev
, dev_list
);
5165 adj
= kmalloc(sizeof(*adj
), GFP_KERNEL
);
5170 adj
->master
= master
;
5172 adj
->private = private;
5175 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5176 adj_dev
->name
, dev
->name
, adj_dev
->name
);
5178 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
)) {
5179 ret
= netdev_adjacent_sysfs_add(dev
, adj_dev
, dev_list
);
5184 /* Ensure that master link is always the first item in list. */
5186 ret
= sysfs_create_link(&(dev
->dev
.kobj
),
5187 &(adj_dev
->dev
.kobj
), "master");
5189 goto remove_symlinks
;
5191 list_add_rcu(&adj
->list
, dev_list
);
5193 list_add_tail_rcu(&adj
->list
, dev_list
);
5199 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
5200 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
5208 static void __netdev_adjacent_dev_remove(struct net_device
*dev
,
5209 struct net_device
*adj_dev
,
5210 struct list_head
*dev_list
)
5212 struct netdev_adjacent
*adj
;
5214 adj
= __netdev_find_adj(adj_dev
, dev_list
);
5217 pr_err("tried to remove device %s from %s\n",
5218 dev
->name
, adj_dev
->name
);
5222 if (adj
->ref_nr
> 1) {
5223 pr_debug("%s to %s ref_nr-- = %d\n", dev
->name
, adj_dev
->name
,
5230 sysfs_remove_link(&(dev
->dev
.kobj
), "master");
5232 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
5233 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
5235 list_del_rcu(&adj
->list
);
5236 pr_debug("dev_put for %s, because link removed from %s to %s\n",
5237 adj_dev
->name
, dev
->name
, adj_dev
->name
);
5239 kfree_rcu(adj
, rcu
);
5242 static int __netdev_adjacent_dev_link_lists(struct net_device
*dev
,
5243 struct net_device
*upper_dev
,
5244 struct list_head
*up_list
,
5245 struct list_head
*down_list
,
5246 void *private, bool master
)
5250 ret
= __netdev_adjacent_dev_insert(dev
, upper_dev
, up_list
, private,
5255 ret
= __netdev_adjacent_dev_insert(upper_dev
, dev
, down_list
, private,
5258 __netdev_adjacent_dev_remove(dev
, upper_dev
, up_list
);
5265 static int __netdev_adjacent_dev_link(struct net_device
*dev
,
5266 struct net_device
*upper_dev
)
5268 return __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
5269 &dev
->all_adj_list
.upper
,
5270 &upper_dev
->all_adj_list
.lower
,
5274 static void __netdev_adjacent_dev_unlink_lists(struct net_device
*dev
,
5275 struct net_device
*upper_dev
,
5276 struct list_head
*up_list
,
5277 struct list_head
*down_list
)
5279 __netdev_adjacent_dev_remove(dev
, upper_dev
, up_list
);
5280 __netdev_adjacent_dev_remove(upper_dev
, dev
, down_list
);
5283 static void __netdev_adjacent_dev_unlink(struct net_device
*dev
,
5284 struct net_device
*upper_dev
)
5286 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
,
5287 &dev
->all_adj_list
.upper
,
5288 &upper_dev
->all_adj_list
.lower
);
5291 static int __netdev_adjacent_dev_link_neighbour(struct net_device
*dev
,
5292 struct net_device
*upper_dev
,
5293 void *private, bool master
)
5295 int ret
= __netdev_adjacent_dev_link(dev
, upper_dev
);
5300 ret
= __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
5301 &dev
->adj_list
.upper
,
5302 &upper_dev
->adj_list
.lower
,
5305 __netdev_adjacent_dev_unlink(dev
, upper_dev
);
5312 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device
*dev
,
5313 struct net_device
*upper_dev
)
5315 __netdev_adjacent_dev_unlink(dev
, upper_dev
);
5316 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
,
5317 &dev
->adj_list
.upper
,
5318 &upper_dev
->adj_list
.lower
);
5321 static int __netdev_upper_dev_link(struct net_device
*dev
,
5322 struct net_device
*upper_dev
, bool master
,
5325 struct netdev_notifier_changeupper_info changeupper_info
;
5326 struct netdev_adjacent
*i
, *j
, *to_i
, *to_j
;
5331 if (dev
== upper_dev
)
5334 /* To prevent loops, check if dev is not upper device to upper_dev. */
5335 if (__netdev_find_adj(dev
, &upper_dev
->all_adj_list
.upper
))
5338 if (__netdev_find_adj(upper_dev
, &dev
->adj_list
.upper
))
5341 if (master
&& netdev_master_upper_dev_get(dev
))
5344 changeupper_info
.upper_dev
= upper_dev
;
5345 changeupper_info
.master
= master
;
5346 changeupper_info
.linking
= true;
5348 ret
= __netdev_adjacent_dev_link_neighbour(dev
, upper_dev
, private,
5353 /* Now that we linked these devs, make all the upper_dev's
5354 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5355 * versa, and don't forget the devices itself. All of these
5356 * links are non-neighbours.
5358 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
) {
5359 list_for_each_entry(j
, &upper_dev
->all_adj_list
.upper
, list
) {
5360 pr_debug("Interlinking %s with %s, non-neighbour\n",
5361 i
->dev
->name
, j
->dev
->name
);
5362 ret
= __netdev_adjacent_dev_link(i
->dev
, j
->dev
);
5368 /* add dev to every upper_dev's upper device */
5369 list_for_each_entry(i
, &upper_dev
->all_adj_list
.upper
, list
) {
5370 pr_debug("linking %s's upper device %s with %s\n",
5371 upper_dev
->name
, i
->dev
->name
, dev
->name
);
5372 ret
= __netdev_adjacent_dev_link(dev
, i
->dev
);
5374 goto rollback_upper_mesh
;
5377 /* add upper_dev to every dev's lower device */
5378 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
) {
5379 pr_debug("linking %s's lower device %s with %s\n", dev
->name
,
5380 i
->dev
->name
, upper_dev
->name
);
5381 ret
= __netdev_adjacent_dev_link(i
->dev
, upper_dev
);
5383 goto rollback_lower_mesh
;
5386 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
, dev
,
5387 &changeupper_info
.info
);
5390 rollback_lower_mesh
:
5392 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
) {
5395 __netdev_adjacent_dev_unlink(i
->dev
, upper_dev
);
5400 rollback_upper_mesh
:
5402 list_for_each_entry(i
, &upper_dev
->all_adj_list
.upper
, list
) {
5405 __netdev_adjacent_dev_unlink(dev
, i
->dev
);
5413 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
) {
5414 list_for_each_entry(j
, &upper_dev
->all_adj_list
.upper
, list
) {
5415 if (i
== to_i
&& j
== to_j
)
5417 __netdev_adjacent_dev_unlink(i
->dev
, j
->dev
);
5423 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
5429 * netdev_upper_dev_link - Add a link to the upper device
5431 * @upper_dev: new upper device
5433 * Adds a link to device which is upper to this one. The caller must hold
5434 * the RTNL lock. On a failure a negative errno code is returned.
5435 * On success the reference counts are adjusted and the function
5438 int netdev_upper_dev_link(struct net_device
*dev
,
5439 struct net_device
*upper_dev
)
5441 return __netdev_upper_dev_link(dev
, upper_dev
, false, NULL
);
5443 EXPORT_SYMBOL(netdev_upper_dev_link
);
5446 * netdev_master_upper_dev_link - Add a master link to the upper device
5448 * @upper_dev: new upper device
5450 * Adds a link to device which is upper to this one. In this case, only
5451 * one master upper device can be linked, although other non-master devices
5452 * might be linked as well. The caller must hold the RTNL lock.
5453 * On a failure a negative errno code is returned. On success the reference
5454 * counts are adjusted and the function returns zero.
5456 int netdev_master_upper_dev_link(struct net_device
*dev
,
5457 struct net_device
*upper_dev
)
5459 return __netdev_upper_dev_link(dev
, upper_dev
, true, NULL
);
5461 EXPORT_SYMBOL(netdev_master_upper_dev_link
);
5463 int netdev_master_upper_dev_link_private(struct net_device
*dev
,
5464 struct net_device
*upper_dev
,
5467 return __netdev_upper_dev_link(dev
, upper_dev
, true, private);
5469 EXPORT_SYMBOL(netdev_master_upper_dev_link_private
);
5472 * netdev_upper_dev_unlink - Removes a link to upper device
5474 * @upper_dev: new upper device
5476 * Removes a link to device which is upper to this one. The caller must hold
5479 void netdev_upper_dev_unlink(struct net_device
*dev
,
5480 struct net_device
*upper_dev
)
5482 struct netdev_notifier_changeupper_info changeupper_info
;
5483 struct netdev_adjacent
*i
, *j
;
5486 changeupper_info
.upper_dev
= upper_dev
;
5487 changeupper_info
.master
= netdev_master_upper_dev_get(dev
) == upper_dev
;
5488 changeupper_info
.linking
= false;
5490 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
5492 /* Here is the tricky part. We must remove all dev's lower
5493 * devices from all upper_dev's upper devices and vice
5494 * versa, to maintain the graph relationship.
5496 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
)
5497 list_for_each_entry(j
, &upper_dev
->all_adj_list
.upper
, list
)
5498 __netdev_adjacent_dev_unlink(i
->dev
, j
->dev
);
5500 /* remove also the devices itself from lower/upper device
5503 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
)
5504 __netdev_adjacent_dev_unlink(i
->dev
, upper_dev
);
5506 list_for_each_entry(i
, &upper_dev
->all_adj_list
.upper
, list
)
5507 __netdev_adjacent_dev_unlink(dev
, i
->dev
);
5509 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
, dev
,
5510 &changeupper_info
.info
);
5512 EXPORT_SYMBOL(netdev_upper_dev_unlink
);
5515 * netdev_bonding_info_change - Dispatch event about slave change
5517 * @bonding_info: info to dispatch
5519 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5520 * The caller must hold the RTNL lock.
5522 void netdev_bonding_info_change(struct net_device
*dev
,
5523 struct netdev_bonding_info
*bonding_info
)
5525 struct netdev_notifier_bonding_info info
;
5527 memcpy(&info
.bonding_info
, bonding_info
,
5528 sizeof(struct netdev_bonding_info
));
5529 call_netdevice_notifiers_info(NETDEV_BONDING_INFO
, dev
,
5532 EXPORT_SYMBOL(netdev_bonding_info_change
);
5534 static void netdev_adjacent_add_links(struct net_device
*dev
)
5536 struct netdev_adjacent
*iter
;
5538 struct net
*net
= dev_net(dev
);
5540 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
5541 if (!net_eq(net
,dev_net(iter
->dev
)))
5543 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
5544 &iter
->dev
->adj_list
.lower
);
5545 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
5546 &dev
->adj_list
.upper
);
5549 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
5550 if (!net_eq(net
,dev_net(iter
->dev
)))
5552 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
5553 &iter
->dev
->adj_list
.upper
);
5554 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
5555 &dev
->adj_list
.lower
);
5559 static void netdev_adjacent_del_links(struct net_device
*dev
)
5561 struct netdev_adjacent
*iter
;
5563 struct net
*net
= dev_net(dev
);
5565 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
5566 if (!net_eq(net
,dev_net(iter
->dev
)))
5568 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
5569 &iter
->dev
->adj_list
.lower
);
5570 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
5571 &dev
->adj_list
.upper
);
5574 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
5575 if (!net_eq(net
,dev_net(iter
->dev
)))
5577 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
5578 &iter
->dev
->adj_list
.upper
);
5579 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
5580 &dev
->adj_list
.lower
);
5584 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
)
5586 struct netdev_adjacent
*iter
;
5588 struct net
*net
= dev_net(dev
);
5590 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
5591 if (!net_eq(net
,dev_net(iter
->dev
)))
5593 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
5594 &iter
->dev
->adj_list
.lower
);
5595 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
5596 &iter
->dev
->adj_list
.lower
);
5599 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
5600 if (!net_eq(net
,dev_net(iter
->dev
)))
5602 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
5603 &iter
->dev
->adj_list
.upper
);
5604 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
5605 &iter
->dev
->adj_list
.upper
);
5609 void *netdev_lower_dev_get_private(struct net_device
*dev
,
5610 struct net_device
*lower_dev
)
5612 struct netdev_adjacent
*lower
;
5616 lower
= __netdev_find_adj(lower_dev
, &dev
->adj_list
.lower
);
5620 return lower
->private;
5622 EXPORT_SYMBOL(netdev_lower_dev_get_private
);
5625 int dev_get_nest_level(struct net_device
*dev
,
5626 bool (*type_check
)(struct net_device
*dev
))
5628 struct net_device
*lower
= NULL
;
5629 struct list_head
*iter
;
5635 netdev_for_each_lower_dev(dev
, lower
, iter
) {
5636 nest
= dev_get_nest_level(lower
, type_check
);
5637 if (max_nest
< nest
)
5641 if (type_check(dev
))
5646 EXPORT_SYMBOL(dev_get_nest_level
);
5648 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
5650 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5652 if (ops
->ndo_change_rx_flags
)
5653 ops
->ndo_change_rx_flags(dev
, flags
);
5656 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
, bool notify
)
5658 unsigned int old_flags
= dev
->flags
;
5664 dev
->flags
|= IFF_PROMISC
;
5665 dev
->promiscuity
+= inc
;
5666 if (dev
->promiscuity
== 0) {
5669 * If inc causes overflow, untouch promisc and return error.
5672 dev
->flags
&= ~IFF_PROMISC
;
5674 dev
->promiscuity
-= inc
;
5675 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5680 if (dev
->flags
!= old_flags
) {
5681 pr_info("device %s %s promiscuous mode\n",
5683 dev
->flags
& IFF_PROMISC
? "entered" : "left");
5684 if (audit_enabled
) {
5685 current_uid_gid(&uid
, &gid
);
5686 audit_log(current
->audit_context
, GFP_ATOMIC
,
5687 AUDIT_ANOM_PROMISCUOUS
,
5688 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5689 dev
->name
, (dev
->flags
& IFF_PROMISC
),
5690 (old_flags
& IFF_PROMISC
),
5691 from_kuid(&init_user_ns
, audit_get_loginuid(current
)),
5692 from_kuid(&init_user_ns
, uid
),
5693 from_kgid(&init_user_ns
, gid
),
5694 audit_get_sessionid(current
));
5697 dev_change_rx_flags(dev
, IFF_PROMISC
);
5700 __dev_notify_flags(dev
, old_flags
, IFF_PROMISC
);
5705 * dev_set_promiscuity - update promiscuity count on a device
5709 * Add or remove promiscuity from a device. While the count in the device
5710 * remains above zero the interface remains promiscuous. Once it hits zero
5711 * the device reverts back to normal filtering operation. A negative inc
5712 * value is used to drop promiscuity on the device.
5713 * Return 0 if successful or a negative errno code on error.
5715 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
5717 unsigned int old_flags
= dev
->flags
;
5720 err
= __dev_set_promiscuity(dev
, inc
, true);
5723 if (dev
->flags
!= old_flags
)
5724 dev_set_rx_mode(dev
);
5727 EXPORT_SYMBOL(dev_set_promiscuity
);
5729 static int __dev_set_allmulti(struct net_device
*dev
, int inc
, bool notify
)
5731 unsigned int old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
5735 dev
->flags
|= IFF_ALLMULTI
;
5736 dev
->allmulti
+= inc
;
5737 if (dev
->allmulti
== 0) {
5740 * If inc causes overflow, untouch allmulti and return error.
5743 dev
->flags
&= ~IFF_ALLMULTI
;
5745 dev
->allmulti
-= inc
;
5746 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5751 if (dev
->flags
^ old_flags
) {
5752 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
5753 dev_set_rx_mode(dev
);
5755 __dev_notify_flags(dev
, old_flags
,
5756 dev
->gflags
^ old_gflags
);
5762 * dev_set_allmulti - update allmulti count on a device
5766 * Add or remove reception of all multicast frames to a device. While the
5767 * count in the device remains above zero the interface remains listening
5768 * to all interfaces. Once it hits zero the device reverts back to normal
5769 * filtering operation. A negative @inc value is used to drop the counter
5770 * when releasing a resource needing all multicasts.
5771 * Return 0 if successful or a negative errno code on error.
5774 int dev_set_allmulti(struct net_device
*dev
, int inc
)
5776 return __dev_set_allmulti(dev
, inc
, true);
5778 EXPORT_SYMBOL(dev_set_allmulti
);
5781 * Upload unicast and multicast address lists to device and
5782 * configure RX filtering. When the device doesn't support unicast
5783 * filtering it is put in promiscuous mode while unicast addresses
5786 void __dev_set_rx_mode(struct net_device
*dev
)
5788 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5790 /* dev_open will call this function so the list will stay sane. */
5791 if (!(dev
->flags
&IFF_UP
))
5794 if (!netif_device_present(dev
))
5797 if (!(dev
->priv_flags
& IFF_UNICAST_FLT
)) {
5798 /* Unicast addresses changes may only happen under the rtnl,
5799 * therefore calling __dev_set_promiscuity here is safe.
5801 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
5802 __dev_set_promiscuity(dev
, 1, false);
5803 dev
->uc_promisc
= true;
5804 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
5805 __dev_set_promiscuity(dev
, -1, false);
5806 dev
->uc_promisc
= false;
5810 if (ops
->ndo_set_rx_mode
)
5811 ops
->ndo_set_rx_mode(dev
);
5814 void dev_set_rx_mode(struct net_device
*dev
)
5816 netif_addr_lock_bh(dev
);
5817 __dev_set_rx_mode(dev
);
5818 netif_addr_unlock_bh(dev
);
5822 * dev_get_flags - get flags reported to userspace
5825 * Get the combination of flag bits exported through APIs to userspace.
5827 unsigned int dev_get_flags(const struct net_device
*dev
)
5831 flags
= (dev
->flags
& ~(IFF_PROMISC
|
5836 (dev
->gflags
& (IFF_PROMISC
|
5839 if (netif_running(dev
)) {
5840 if (netif_oper_up(dev
))
5841 flags
|= IFF_RUNNING
;
5842 if (netif_carrier_ok(dev
))
5843 flags
|= IFF_LOWER_UP
;
5844 if (netif_dormant(dev
))
5845 flags
|= IFF_DORMANT
;
5850 EXPORT_SYMBOL(dev_get_flags
);
5852 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
)
5854 unsigned int old_flags
= dev
->flags
;
5860 * Set the flags on our device.
5863 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
5864 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
5866 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
5870 * Load in the correct multicast list now the flags have changed.
5873 if ((old_flags
^ flags
) & IFF_MULTICAST
)
5874 dev_change_rx_flags(dev
, IFF_MULTICAST
);
5876 dev_set_rx_mode(dev
);
5879 * Have we downed the interface. We handle IFF_UP ourselves
5880 * according to user attempts to set it, rather than blindly
5885 if ((old_flags
^ flags
) & IFF_UP
)
5886 ret
= ((old_flags
& IFF_UP
) ? __dev_close
: __dev_open
)(dev
);
5888 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
5889 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
5890 unsigned int old_flags
= dev
->flags
;
5892 dev
->gflags
^= IFF_PROMISC
;
5894 if (__dev_set_promiscuity(dev
, inc
, false) >= 0)
5895 if (dev
->flags
!= old_flags
)
5896 dev_set_rx_mode(dev
);
5899 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5900 is important. Some (broken) drivers set IFF_PROMISC, when
5901 IFF_ALLMULTI is requested not asking us and not reporting.
5903 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
5904 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
5906 dev
->gflags
^= IFF_ALLMULTI
;
5907 __dev_set_allmulti(dev
, inc
, false);
5913 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
,
5914 unsigned int gchanges
)
5916 unsigned int changes
= dev
->flags
^ old_flags
;
5919 rtmsg_ifinfo(RTM_NEWLINK
, dev
, gchanges
, GFP_ATOMIC
);
5921 if (changes
& IFF_UP
) {
5922 if (dev
->flags
& IFF_UP
)
5923 call_netdevice_notifiers(NETDEV_UP
, dev
);
5925 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
5928 if (dev
->flags
& IFF_UP
&&
5929 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
))) {
5930 struct netdev_notifier_change_info change_info
;
5932 change_info
.flags_changed
= changes
;
5933 call_netdevice_notifiers_info(NETDEV_CHANGE
, dev
,
5939 * dev_change_flags - change device settings
5941 * @flags: device state flags
5943 * Change settings on device based state flags. The flags are
5944 * in the userspace exported format.
5946 int dev_change_flags(struct net_device
*dev
, unsigned int flags
)
5949 unsigned int changes
, old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
5951 ret
= __dev_change_flags(dev
, flags
);
5955 changes
= (old_flags
^ dev
->flags
) | (old_gflags
^ dev
->gflags
);
5956 __dev_notify_flags(dev
, old_flags
, changes
);
5959 EXPORT_SYMBOL(dev_change_flags
);
5961 static int __dev_set_mtu(struct net_device
*dev
, int new_mtu
)
5963 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5965 if (ops
->ndo_change_mtu
)
5966 return ops
->ndo_change_mtu(dev
, new_mtu
);
5973 * dev_set_mtu - Change maximum transfer unit
5975 * @new_mtu: new transfer unit
5977 * Change the maximum transfer size of the network device.
5979 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
5983 if (new_mtu
== dev
->mtu
)
5986 /* MTU must be positive. */
5990 if (!netif_device_present(dev
))
5993 err
= call_netdevice_notifiers(NETDEV_PRECHANGEMTU
, dev
);
5994 err
= notifier_to_errno(err
);
5998 orig_mtu
= dev
->mtu
;
5999 err
= __dev_set_mtu(dev
, new_mtu
);
6002 err
= call_netdevice_notifiers(NETDEV_CHANGEMTU
, dev
);
6003 err
= notifier_to_errno(err
);
6005 /* setting mtu back and notifying everyone again,
6006 * so that they have a chance to revert changes.
6008 __dev_set_mtu(dev
, orig_mtu
);
6009 call_netdevice_notifiers(NETDEV_CHANGEMTU
, dev
);
6014 EXPORT_SYMBOL(dev_set_mtu
);
6017 * dev_set_group - Change group this device belongs to
6019 * @new_group: group this device should belong to
6021 void dev_set_group(struct net_device
*dev
, int new_group
)
6023 dev
->group
= new_group
;
6025 EXPORT_SYMBOL(dev_set_group
);
6028 * dev_set_mac_address - Change Media Access Control Address
6032 * Change the hardware (MAC) address of the device
6034 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
)
6036 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6039 if (!ops
->ndo_set_mac_address
)
6041 if (sa
->sa_family
!= dev
->type
)
6043 if (!netif_device_present(dev
))
6045 err
= ops
->ndo_set_mac_address(dev
, sa
);
6048 dev
->addr_assign_type
= NET_ADDR_SET
;
6049 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
6050 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
6053 EXPORT_SYMBOL(dev_set_mac_address
);
6056 * dev_change_carrier - Change device carrier
6058 * @new_carrier: new value
6060 * Change device carrier
6062 int dev_change_carrier(struct net_device
*dev
, bool new_carrier
)
6064 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6066 if (!ops
->ndo_change_carrier
)
6068 if (!netif_device_present(dev
))
6070 return ops
->ndo_change_carrier(dev
, new_carrier
);
6072 EXPORT_SYMBOL(dev_change_carrier
);
6075 * dev_get_phys_port_id - Get device physical port ID
6079 * Get device physical port ID
6081 int dev_get_phys_port_id(struct net_device
*dev
,
6082 struct netdev_phys_item_id
*ppid
)
6084 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6086 if (!ops
->ndo_get_phys_port_id
)
6088 return ops
->ndo_get_phys_port_id(dev
, ppid
);
6090 EXPORT_SYMBOL(dev_get_phys_port_id
);
6093 * dev_get_phys_port_name - Get device physical port name
6097 * Get device physical port name
6099 int dev_get_phys_port_name(struct net_device
*dev
,
6100 char *name
, size_t len
)
6102 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6104 if (!ops
->ndo_get_phys_port_name
)
6106 return ops
->ndo_get_phys_port_name(dev
, name
, len
);
6108 EXPORT_SYMBOL(dev_get_phys_port_name
);
6111 * dev_change_proto_down - update protocol port state information
6113 * @proto_down: new value
6115 * This info can be used by switch drivers to set the phys state of the
6118 int dev_change_proto_down(struct net_device
*dev
, bool proto_down
)
6120 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6122 if (!ops
->ndo_change_proto_down
)
6124 if (!netif_device_present(dev
))
6126 return ops
->ndo_change_proto_down(dev
, proto_down
);
6128 EXPORT_SYMBOL(dev_change_proto_down
);
6131 * dev_new_index - allocate an ifindex
6132 * @net: the applicable net namespace
6134 * Returns a suitable unique value for a new device interface
6135 * number. The caller must hold the rtnl semaphore or the
6136 * dev_base_lock to be sure it remains unique.
6138 static int dev_new_index(struct net
*net
)
6140 int ifindex
= net
->ifindex
;
6144 if (!__dev_get_by_index(net
, ifindex
))
6145 return net
->ifindex
= ifindex
;
6149 /* Delayed registration/unregisteration */
6150 static LIST_HEAD(net_todo_list
);
6151 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq
);
6153 static void net_set_todo(struct net_device
*dev
)
6155 list_add_tail(&dev
->todo_list
, &net_todo_list
);
6156 dev_net(dev
)->dev_unreg_count
++;
6159 static void rollback_registered_many(struct list_head
*head
)
6161 struct net_device
*dev
, *tmp
;
6162 LIST_HEAD(close_head
);
6164 BUG_ON(dev_boot_phase
);
6167 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
6168 /* Some devices call without registering
6169 * for initialization unwind. Remove those
6170 * devices and proceed with the remaining.
6172 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
6173 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6177 list_del(&dev
->unreg_list
);
6180 dev
->dismantle
= true;
6181 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
6184 /* If device is running, close it first. */
6185 list_for_each_entry(dev
, head
, unreg_list
)
6186 list_add_tail(&dev
->close_list
, &close_head
);
6187 dev_close_many(&close_head
, true);
6189 list_for_each_entry(dev
, head
, unreg_list
) {
6190 /* And unlink it from device chain. */
6191 unlist_netdevice(dev
);
6193 dev
->reg_state
= NETREG_UNREGISTERING
;
6194 on_each_cpu(flush_backlog
, dev
, 1);
6199 list_for_each_entry(dev
, head
, unreg_list
) {
6200 struct sk_buff
*skb
= NULL
;
6202 /* Shutdown queueing discipline. */
6206 /* Notify protocols, that we are about to destroy
6207 this device. They should clean all the things.
6209 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
6211 if (!dev
->rtnl_link_ops
||
6212 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
6213 skb
= rtmsg_ifinfo_build_skb(RTM_DELLINK
, dev
, ~0U,
6217 * Flush the unicast and multicast chains
6222 if (dev
->netdev_ops
->ndo_uninit
)
6223 dev
->netdev_ops
->ndo_uninit(dev
);
6226 rtmsg_ifinfo_send(skb
, dev
, GFP_KERNEL
);
6228 /* Notifier chain MUST detach us all upper devices. */
6229 WARN_ON(netdev_has_any_upper_dev(dev
));
6231 /* Remove entries from kobject tree */
6232 netdev_unregister_kobject(dev
);
6234 /* Remove XPS queueing entries */
6235 netif_reset_xps_queues_gt(dev
, 0);
6241 list_for_each_entry(dev
, head
, unreg_list
)
6245 static void rollback_registered(struct net_device
*dev
)
6249 list_add(&dev
->unreg_list
, &single
);
6250 rollback_registered_many(&single
);
6254 static netdev_features_t
netdev_fix_features(struct net_device
*dev
,
6255 netdev_features_t features
)
6257 /* Fix illegal checksum combinations */
6258 if ((features
& NETIF_F_HW_CSUM
) &&
6259 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
6260 netdev_warn(dev
, "mixed HW and IP checksum settings.\n");
6261 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
6264 /* TSO requires that SG is present as well. */
6265 if ((features
& NETIF_F_ALL_TSO
) && !(features
& NETIF_F_SG
)) {
6266 netdev_dbg(dev
, "Dropping TSO features since no SG feature.\n");
6267 features
&= ~NETIF_F_ALL_TSO
;
6270 if ((features
& NETIF_F_TSO
) && !(features
& NETIF_F_HW_CSUM
) &&
6271 !(features
& NETIF_F_IP_CSUM
)) {
6272 netdev_dbg(dev
, "Dropping TSO features since no CSUM feature.\n");
6273 features
&= ~NETIF_F_TSO
;
6274 features
&= ~NETIF_F_TSO_ECN
;
6277 if ((features
& NETIF_F_TSO6
) && !(features
& NETIF_F_HW_CSUM
) &&
6278 !(features
& NETIF_F_IPV6_CSUM
)) {
6279 netdev_dbg(dev
, "Dropping TSO6 features since no CSUM feature.\n");
6280 features
&= ~NETIF_F_TSO6
;
6283 /* TSO ECN requires that TSO is present as well. */
6284 if ((features
& NETIF_F_ALL_TSO
) == NETIF_F_TSO_ECN
)
6285 features
&= ~NETIF_F_TSO_ECN
;
6287 /* Software GSO depends on SG. */
6288 if ((features
& NETIF_F_GSO
) && !(features
& NETIF_F_SG
)) {
6289 netdev_dbg(dev
, "Dropping NETIF_F_GSO since no SG feature.\n");
6290 features
&= ~NETIF_F_GSO
;
6293 /* UFO needs SG and checksumming */
6294 if (features
& NETIF_F_UFO
) {
6295 /* maybe split UFO into V4 and V6? */
6296 if (!((features
& NETIF_F_GEN_CSUM
) ||
6297 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))
6298 == (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
6300 "Dropping NETIF_F_UFO since no checksum offload features.\n");
6301 features
&= ~NETIF_F_UFO
;
6304 if (!(features
& NETIF_F_SG
)) {
6306 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
6307 features
&= ~NETIF_F_UFO
;
6311 #ifdef CONFIG_NET_RX_BUSY_POLL
6312 if (dev
->netdev_ops
->ndo_busy_poll
)
6313 features
|= NETIF_F_BUSY_POLL
;
6316 features
&= ~NETIF_F_BUSY_POLL
;
6321 int __netdev_update_features(struct net_device
*dev
)
6323 netdev_features_t features
;
6328 features
= netdev_get_wanted_features(dev
);
6330 if (dev
->netdev_ops
->ndo_fix_features
)
6331 features
= dev
->netdev_ops
->ndo_fix_features(dev
, features
);
6333 /* driver might be less strict about feature dependencies */
6334 features
= netdev_fix_features(dev
, features
);
6336 if (dev
->features
== features
)
6339 netdev_dbg(dev
, "Features changed: %pNF -> %pNF\n",
6340 &dev
->features
, &features
);
6342 if (dev
->netdev_ops
->ndo_set_features
)
6343 err
= dev
->netdev_ops
->ndo_set_features(dev
, features
);
6345 if (unlikely(err
< 0)) {
6347 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6348 err
, &features
, &dev
->features
);
6353 dev
->features
= features
;
6359 * netdev_update_features - recalculate device features
6360 * @dev: the device to check
6362 * Recalculate dev->features set and send notifications if it
6363 * has changed. Should be called after driver or hardware dependent
6364 * conditions might have changed that influence the features.
6366 void netdev_update_features(struct net_device
*dev
)
6368 if (__netdev_update_features(dev
))
6369 netdev_features_change(dev
);
6371 EXPORT_SYMBOL(netdev_update_features
);
6374 * netdev_change_features - recalculate device features
6375 * @dev: the device to check
6377 * Recalculate dev->features set and send notifications even
6378 * if they have not changed. Should be called instead of
6379 * netdev_update_features() if also dev->vlan_features might
6380 * have changed to allow the changes to be propagated to stacked
6383 void netdev_change_features(struct net_device
*dev
)
6385 __netdev_update_features(dev
);
6386 netdev_features_change(dev
);
6388 EXPORT_SYMBOL(netdev_change_features
);
6391 * netif_stacked_transfer_operstate - transfer operstate
6392 * @rootdev: the root or lower level device to transfer state from
6393 * @dev: the device to transfer operstate to
6395 * Transfer operational state from root to device. This is normally
6396 * called when a stacking relationship exists between the root
6397 * device and the device(a leaf device).
6399 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
6400 struct net_device
*dev
)
6402 if (rootdev
->operstate
== IF_OPER_DORMANT
)
6403 netif_dormant_on(dev
);
6405 netif_dormant_off(dev
);
6407 if (netif_carrier_ok(rootdev
)) {
6408 if (!netif_carrier_ok(dev
))
6409 netif_carrier_on(dev
);
6411 if (netif_carrier_ok(dev
))
6412 netif_carrier_off(dev
);
6415 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
6418 static int netif_alloc_rx_queues(struct net_device
*dev
)
6420 unsigned int i
, count
= dev
->num_rx_queues
;
6421 struct netdev_rx_queue
*rx
;
6422 size_t sz
= count
* sizeof(*rx
);
6426 rx
= kzalloc(sz
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
6434 for (i
= 0; i
< count
; i
++)
6440 static void netdev_init_one_queue(struct net_device
*dev
,
6441 struct netdev_queue
*queue
, void *_unused
)
6443 /* Initialize queue lock */
6444 spin_lock_init(&queue
->_xmit_lock
);
6445 netdev_set_xmit_lockdep_class(&queue
->_xmit_lock
, dev
->type
);
6446 queue
->xmit_lock_owner
= -1;
6447 netdev_queue_numa_node_write(queue
, NUMA_NO_NODE
);
6450 dql_init(&queue
->dql
, HZ
);
6454 static void netif_free_tx_queues(struct net_device
*dev
)
6459 static int netif_alloc_netdev_queues(struct net_device
*dev
)
6461 unsigned int count
= dev
->num_tx_queues
;
6462 struct netdev_queue
*tx
;
6463 size_t sz
= count
* sizeof(*tx
);
6465 if (count
< 1 || count
> 0xffff)
6468 tx
= kzalloc(sz
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
6476 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
6477 spin_lock_init(&dev
->tx_global_lock
);
6482 void netif_tx_stop_all_queues(struct net_device
*dev
)
6486 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
6487 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
6488 netif_tx_stop_queue(txq
);
6491 EXPORT_SYMBOL(netif_tx_stop_all_queues
);
6494 * register_netdevice - register a network device
6495 * @dev: device to register
6497 * Take a completed network device structure and add it to the kernel
6498 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6499 * chain. 0 is returned on success. A negative errno code is returned
6500 * on a failure to set up the device, or if the name is a duplicate.
6502 * Callers must hold the rtnl semaphore. You may want
6503 * register_netdev() instead of this.
6506 * The locking appears insufficient to guarantee two parallel registers
6507 * will not get the same name.
6510 int register_netdevice(struct net_device
*dev
)
6513 struct net
*net
= dev_net(dev
);
6515 BUG_ON(dev_boot_phase
);
6520 /* When net_device's are persistent, this will be fatal. */
6521 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
6524 spin_lock_init(&dev
->addr_list_lock
);
6525 netdev_set_addr_lockdep_class(dev
);
6527 ret
= dev_get_valid_name(net
, dev
, dev
->name
);
6531 /* Init, if this function is available */
6532 if (dev
->netdev_ops
->ndo_init
) {
6533 ret
= dev
->netdev_ops
->ndo_init(dev
);
6541 if (((dev
->hw_features
| dev
->features
) &
6542 NETIF_F_HW_VLAN_CTAG_FILTER
) &&
6543 (!dev
->netdev_ops
->ndo_vlan_rx_add_vid
||
6544 !dev
->netdev_ops
->ndo_vlan_rx_kill_vid
)) {
6545 netdev_WARN(dev
, "Buggy VLAN acceleration in driver!\n");
6552 dev
->ifindex
= dev_new_index(net
);
6553 else if (__dev_get_by_index(net
, dev
->ifindex
))
6556 /* Transfer changeable features to wanted_features and enable
6557 * software offloads (GSO and GRO).
6559 dev
->hw_features
|= NETIF_F_SOFT_FEATURES
;
6560 dev
->features
|= NETIF_F_SOFT_FEATURES
;
6561 dev
->wanted_features
= dev
->features
& dev
->hw_features
;
6563 if (!(dev
->flags
& IFF_LOOPBACK
)) {
6564 dev
->hw_features
|= NETIF_F_NOCACHE_COPY
;
6567 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
6569 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
6571 /* Make NETIF_F_SG inheritable to tunnel devices.
6573 dev
->hw_enc_features
|= NETIF_F_SG
;
6575 /* Make NETIF_F_SG inheritable to MPLS.
6577 dev
->mpls_features
|= NETIF_F_SG
;
6579 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
6580 ret
= notifier_to_errno(ret
);
6584 ret
= netdev_register_kobject(dev
);
6587 dev
->reg_state
= NETREG_REGISTERED
;
6589 __netdev_update_features(dev
);
6592 * Default initial state at registry is that the
6593 * device is present.
6596 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
6598 linkwatch_init_dev(dev
);
6600 dev_init_scheduler(dev
);
6602 list_netdevice(dev
);
6603 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
6605 /* If the device has permanent device address, driver should
6606 * set dev_addr and also addr_assign_type should be set to
6607 * NET_ADDR_PERM (default value).
6609 if (dev
->addr_assign_type
== NET_ADDR_PERM
)
6610 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
6612 /* Notify protocols, that a new device appeared. */
6613 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
6614 ret
= notifier_to_errno(ret
);
6616 rollback_registered(dev
);
6617 dev
->reg_state
= NETREG_UNREGISTERED
;
6620 * Prevent userspace races by waiting until the network
6621 * device is fully setup before sending notifications.
6623 if (!dev
->rtnl_link_ops
||
6624 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
6625 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
6631 if (dev
->netdev_ops
->ndo_uninit
)
6632 dev
->netdev_ops
->ndo_uninit(dev
);
6635 EXPORT_SYMBOL(register_netdevice
);
6638 * init_dummy_netdev - init a dummy network device for NAPI
6639 * @dev: device to init
6641 * This takes a network device structure and initialize the minimum
6642 * amount of fields so it can be used to schedule NAPI polls without
6643 * registering a full blown interface. This is to be used by drivers
6644 * that need to tie several hardware interfaces to a single NAPI
6645 * poll scheduler due to HW limitations.
6647 int init_dummy_netdev(struct net_device
*dev
)
6649 /* Clear everything. Note we don't initialize spinlocks
6650 * are they aren't supposed to be taken by any of the
6651 * NAPI code and this dummy netdev is supposed to be
6652 * only ever used for NAPI polls
6654 memset(dev
, 0, sizeof(struct net_device
));
6656 /* make sure we BUG if trying to hit standard
6657 * register/unregister code path
6659 dev
->reg_state
= NETREG_DUMMY
;
6661 /* NAPI wants this */
6662 INIT_LIST_HEAD(&dev
->napi_list
);
6664 /* a dummy interface is started by default */
6665 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
6666 set_bit(__LINK_STATE_START
, &dev
->state
);
6668 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6669 * because users of this 'device' dont need to change
6675 EXPORT_SYMBOL_GPL(init_dummy_netdev
);
6679 * register_netdev - register a network device
6680 * @dev: device to register
6682 * Take a completed network device structure and add it to the kernel
6683 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6684 * chain. 0 is returned on success. A negative errno code is returned
6685 * on a failure to set up the device, or if the name is a duplicate.
6687 * This is a wrapper around register_netdevice that takes the rtnl semaphore
6688 * and expands the device name if you passed a format string to
6691 int register_netdev(struct net_device
*dev
)
6696 err
= register_netdevice(dev
);
6700 EXPORT_SYMBOL(register_netdev
);
6702 int netdev_refcnt_read(const struct net_device
*dev
)
6706 for_each_possible_cpu(i
)
6707 refcnt
+= *per_cpu_ptr(dev
->pcpu_refcnt
, i
);
6710 EXPORT_SYMBOL(netdev_refcnt_read
);
6713 * netdev_wait_allrefs - wait until all references are gone.
6714 * @dev: target net_device
6716 * This is called when unregistering network devices.
6718 * Any protocol or device that holds a reference should register
6719 * for netdevice notification, and cleanup and put back the
6720 * reference if they receive an UNREGISTER event.
6721 * We can get stuck here if buggy protocols don't correctly
6724 static void netdev_wait_allrefs(struct net_device
*dev
)
6726 unsigned long rebroadcast_time
, warning_time
;
6729 linkwatch_forget_dev(dev
);
6731 rebroadcast_time
= warning_time
= jiffies
;
6732 refcnt
= netdev_refcnt_read(dev
);
6734 while (refcnt
!= 0) {
6735 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
6738 /* Rebroadcast unregister notification */
6739 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
6745 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
6746 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
6748 /* We must not have linkwatch events
6749 * pending on unregister. If this
6750 * happens, we simply run the queue
6751 * unscheduled, resulting in a noop
6754 linkwatch_run_queue();
6759 rebroadcast_time
= jiffies
;
6764 refcnt
= netdev_refcnt_read(dev
);
6766 if (time_after(jiffies
, warning_time
+ 10 * HZ
)) {
6767 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6769 warning_time
= jiffies
;
6778 * register_netdevice(x1);
6779 * register_netdevice(x2);
6781 * unregister_netdevice(y1);
6782 * unregister_netdevice(y2);
6788 * We are invoked by rtnl_unlock().
6789 * This allows us to deal with problems:
6790 * 1) We can delete sysfs objects which invoke hotplug
6791 * without deadlocking with linkwatch via keventd.
6792 * 2) Since we run with the RTNL semaphore not held, we can sleep
6793 * safely in order to wait for the netdev refcnt to drop to zero.
6795 * We must not return until all unregister events added during
6796 * the interval the lock was held have been completed.
6798 void netdev_run_todo(void)
6800 struct list_head list
;
6802 /* Snapshot list, allow later requests */
6803 list_replace_init(&net_todo_list
, &list
);
6808 /* Wait for rcu callbacks to finish before next phase */
6809 if (!list_empty(&list
))
6812 while (!list_empty(&list
)) {
6813 struct net_device
*dev
6814 = list_first_entry(&list
, struct net_device
, todo_list
);
6815 list_del(&dev
->todo_list
);
6818 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
6821 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
6822 pr_err("network todo '%s' but state %d\n",
6823 dev
->name
, dev
->reg_state
);
6828 dev
->reg_state
= NETREG_UNREGISTERED
;
6830 netdev_wait_allrefs(dev
);
6833 BUG_ON(netdev_refcnt_read(dev
));
6834 BUG_ON(!list_empty(&dev
->ptype_all
));
6835 BUG_ON(!list_empty(&dev
->ptype_specific
));
6836 WARN_ON(rcu_access_pointer(dev
->ip_ptr
));
6837 WARN_ON(rcu_access_pointer(dev
->ip6_ptr
));
6838 WARN_ON(dev
->dn_ptr
);
6840 if (dev
->destructor
)
6841 dev
->destructor(dev
);
6843 /* Report a network device has been unregistered */
6845 dev_net(dev
)->dev_unreg_count
--;
6847 wake_up(&netdev_unregistering_wq
);
6849 /* Free network device */
6850 kobject_put(&dev
->dev
.kobj
);
6854 /* Convert net_device_stats to rtnl_link_stats64. They have the same
6855 * fields in the same order, with only the type differing.
6857 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
6858 const struct net_device_stats
*netdev_stats
)
6860 #if BITS_PER_LONG == 64
6861 BUILD_BUG_ON(sizeof(*stats64
) != sizeof(*netdev_stats
));
6862 memcpy(stats64
, netdev_stats
, sizeof(*stats64
));
6864 size_t i
, n
= sizeof(*stats64
) / sizeof(u64
);
6865 const unsigned long *src
= (const unsigned long *)netdev_stats
;
6866 u64
*dst
= (u64
*)stats64
;
6868 BUILD_BUG_ON(sizeof(*netdev_stats
) / sizeof(unsigned long) !=
6869 sizeof(*stats64
) / sizeof(u64
));
6870 for (i
= 0; i
< n
; i
++)
6874 EXPORT_SYMBOL(netdev_stats_to_stats64
);
6877 * dev_get_stats - get network device statistics
6878 * @dev: device to get statistics from
6879 * @storage: place to store stats
6881 * Get network statistics from device. Return @storage.
6882 * The device driver may provide its own method by setting
6883 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6884 * otherwise the internal statistics structure is used.
6886 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
6887 struct rtnl_link_stats64
*storage
)
6889 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6891 if (ops
->ndo_get_stats64
) {
6892 memset(storage
, 0, sizeof(*storage
));
6893 ops
->ndo_get_stats64(dev
, storage
);
6894 } else if (ops
->ndo_get_stats
) {
6895 netdev_stats_to_stats64(storage
, ops
->ndo_get_stats(dev
));
6897 netdev_stats_to_stats64(storage
, &dev
->stats
);
6899 storage
->rx_dropped
+= atomic_long_read(&dev
->rx_dropped
);
6900 storage
->tx_dropped
+= atomic_long_read(&dev
->tx_dropped
);
6903 EXPORT_SYMBOL(dev_get_stats
);
6905 struct netdev_queue
*dev_ingress_queue_create(struct net_device
*dev
)
6907 struct netdev_queue
*queue
= dev_ingress_queue(dev
);
6909 #ifdef CONFIG_NET_CLS_ACT
6912 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
6915 netdev_init_one_queue(dev
, queue
, NULL
);
6916 RCU_INIT_POINTER(queue
->qdisc
, &noop_qdisc
);
6917 queue
->qdisc_sleeping
= &noop_qdisc
;
6918 rcu_assign_pointer(dev
->ingress_queue
, queue
);
6923 static const struct ethtool_ops default_ethtool_ops
;
6925 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
6926 const struct ethtool_ops
*ops
)
6928 if (dev
->ethtool_ops
== &default_ethtool_ops
)
6929 dev
->ethtool_ops
= ops
;
6931 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops
);
6933 void netdev_freemem(struct net_device
*dev
)
6935 char *addr
= (char *)dev
- dev
->padded
;
6941 * alloc_netdev_mqs - allocate network device
6942 * @sizeof_priv: size of private data to allocate space for
6943 * @name: device name format string
6944 * @name_assign_type: origin of device name
6945 * @setup: callback to initialize device
6946 * @txqs: the number of TX subqueues to allocate
6947 * @rxqs: the number of RX subqueues to allocate
6949 * Allocates a struct net_device with private data area for driver use
6950 * and performs basic initialization. Also allocates subqueue structs
6951 * for each queue on the device.
6953 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
6954 unsigned char name_assign_type
,
6955 void (*setup
)(struct net_device
*),
6956 unsigned int txqs
, unsigned int rxqs
)
6958 struct net_device
*dev
;
6960 struct net_device
*p
;
6962 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
6965 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6971 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6976 alloc_size
= sizeof(struct net_device
);
6978 /* ensure 32-byte alignment of private area */
6979 alloc_size
= ALIGN(alloc_size
, NETDEV_ALIGN
);
6980 alloc_size
+= sizeof_priv
;
6982 /* ensure 32-byte alignment of whole construct */
6983 alloc_size
+= NETDEV_ALIGN
- 1;
6985 p
= kzalloc(alloc_size
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
6987 p
= vzalloc(alloc_size
);
6991 dev
= PTR_ALIGN(p
, NETDEV_ALIGN
);
6992 dev
->padded
= (char *)dev
- (char *)p
;
6994 dev
->pcpu_refcnt
= alloc_percpu(int);
6995 if (!dev
->pcpu_refcnt
)
6998 if (dev_addr_init(dev
))
7004 dev_net_set(dev
, &init_net
);
7006 dev
->gso_max_size
= GSO_MAX_SIZE
;
7007 dev
->gso_max_segs
= GSO_MAX_SEGS
;
7008 dev
->gso_min_segs
= 0;
7010 INIT_LIST_HEAD(&dev
->napi_list
);
7011 INIT_LIST_HEAD(&dev
->unreg_list
);
7012 INIT_LIST_HEAD(&dev
->close_list
);
7013 INIT_LIST_HEAD(&dev
->link_watch_list
);
7014 INIT_LIST_HEAD(&dev
->adj_list
.upper
);
7015 INIT_LIST_HEAD(&dev
->adj_list
.lower
);
7016 INIT_LIST_HEAD(&dev
->all_adj_list
.upper
);
7017 INIT_LIST_HEAD(&dev
->all_adj_list
.lower
);
7018 INIT_LIST_HEAD(&dev
->ptype_all
);
7019 INIT_LIST_HEAD(&dev
->ptype_specific
);
7020 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
;
7023 if (!dev
->tx_queue_len
)
7024 dev
->priv_flags
|= IFF_NO_QUEUE
;
7026 dev
->num_tx_queues
= txqs
;
7027 dev
->real_num_tx_queues
= txqs
;
7028 if (netif_alloc_netdev_queues(dev
))
7032 dev
->num_rx_queues
= rxqs
;
7033 dev
->real_num_rx_queues
= rxqs
;
7034 if (netif_alloc_rx_queues(dev
))
7038 strcpy(dev
->name
, name
);
7039 dev
->name_assign_type
= name_assign_type
;
7040 dev
->group
= INIT_NETDEV_GROUP
;
7041 if (!dev
->ethtool_ops
)
7042 dev
->ethtool_ops
= &default_ethtool_ops
;
7044 nf_hook_ingress_init(dev
);
7053 free_percpu(dev
->pcpu_refcnt
);
7055 netdev_freemem(dev
);
7058 EXPORT_SYMBOL(alloc_netdev_mqs
);
7061 * free_netdev - free network device
7064 * This function does the last stage of destroying an allocated device
7065 * interface. The reference to the device object is released.
7066 * If this is the last reference then it will be freed.
7068 void free_netdev(struct net_device
*dev
)
7070 struct napi_struct
*p
, *n
;
7072 netif_free_tx_queues(dev
);
7077 kfree(rcu_dereference_protected(dev
->ingress_queue
, 1));
7079 /* Flush device addresses */
7080 dev_addr_flush(dev
);
7082 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
7085 free_percpu(dev
->pcpu_refcnt
);
7086 dev
->pcpu_refcnt
= NULL
;
7088 /* Compatibility with error handling in drivers */
7089 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
7090 netdev_freemem(dev
);
7094 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
7095 dev
->reg_state
= NETREG_RELEASED
;
7097 /* will free via device release */
7098 put_device(&dev
->dev
);
7100 EXPORT_SYMBOL(free_netdev
);
7103 * synchronize_net - Synchronize with packet receive processing
7105 * Wait for packets currently being received to be done.
7106 * Does not block later packets from starting.
7108 void synchronize_net(void)
7111 if (rtnl_is_locked())
7112 synchronize_rcu_expedited();
7116 EXPORT_SYMBOL(synchronize_net
);
7119 * unregister_netdevice_queue - remove device from the kernel
7123 * This function shuts down a device interface and removes it
7124 * from the kernel tables.
7125 * If head not NULL, device is queued to be unregistered later.
7127 * Callers must hold the rtnl semaphore. You may want
7128 * unregister_netdev() instead of this.
7131 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
7136 list_move_tail(&dev
->unreg_list
, head
);
7138 rollback_registered(dev
);
7139 /* Finish processing unregister after unlock */
7143 EXPORT_SYMBOL(unregister_netdevice_queue
);
7146 * unregister_netdevice_many - unregister many devices
7147 * @head: list of devices
7149 * Note: As most callers use a stack allocated list_head,
7150 * we force a list_del() to make sure stack wont be corrupted later.
7152 void unregister_netdevice_many(struct list_head
*head
)
7154 struct net_device
*dev
;
7156 if (!list_empty(head
)) {
7157 rollback_registered_many(head
);
7158 list_for_each_entry(dev
, head
, unreg_list
)
7163 EXPORT_SYMBOL(unregister_netdevice_many
);
7166 * unregister_netdev - remove device from the kernel
7169 * This function shuts down a device interface and removes it
7170 * from the kernel tables.
7172 * This is just a wrapper for unregister_netdevice that takes
7173 * the rtnl semaphore. In general you want to use this and not
7174 * unregister_netdevice.
7176 void unregister_netdev(struct net_device
*dev
)
7179 unregister_netdevice(dev
);
7182 EXPORT_SYMBOL(unregister_netdev
);
7185 * dev_change_net_namespace - move device to different nethost namespace
7187 * @net: network namespace
7188 * @pat: If not NULL name pattern to try if the current device name
7189 * is already taken in the destination network namespace.
7191 * This function shuts down a device interface and moves it
7192 * to a new network namespace. On success 0 is returned, on
7193 * a failure a netagive errno code is returned.
7195 * Callers must hold the rtnl semaphore.
7198 int dev_change_net_namespace(struct net_device
*dev
, struct net
*net
, const char *pat
)
7204 /* Don't allow namespace local devices to be moved. */
7206 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
7209 /* Ensure the device has been registrered */
7210 if (dev
->reg_state
!= NETREG_REGISTERED
)
7213 /* Get out if there is nothing todo */
7215 if (net_eq(dev_net(dev
), net
))
7218 /* Pick the destination device name, and ensure
7219 * we can use it in the destination network namespace.
7222 if (__dev_get_by_name(net
, dev
->name
)) {
7223 /* We get here if we can't use the current device name */
7226 if (dev_get_valid_name(net
, dev
, pat
) < 0)
7231 * And now a mini version of register_netdevice unregister_netdevice.
7234 /* If device is running close it first. */
7237 /* And unlink it from device chain */
7239 unlist_netdevice(dev
);
7243 /* Shutdown queueing discipline. */
7246 /* Notify protocols, that we are about to destroy
7247 this device. They should clean all the things.
7249 Note that dev->reg_state stays at NETREG_REGISTERED.
7250 This is wanted because this way 8021q and macvlan know
7251 the device is just moving and can keep their slaves up.
7253 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
7255 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
7256 rtmsg_ifinfo(RTM_DELLINK
, dev
, ~0U, GFP_KERNEL
);
7259 * Flush the unicast and multicast chains
7264 /* Send a netdev-removed uevent to the old namespace */
7265 kobject_uevent(&dev
->dev
.kobj
, KOBJ_REMOVE
);
7266 netdev_adjacent_del_links(dev
);
7268 /* Actually switch the network namespace */
7269 dev_net_set(dev
, net
);
7271 /* If there is an ifindex conflict assign a new one */
7272 if (__dev_get_by_index(net
, dev
->ifindex
))
7273 dev
->ifindex
= dev_new_index(net
);
7275 /* Send a netdev-add uevent to the new namespace */
7276 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ADD
);
7277 netdev_adjacent_add_links(dev
);
7279 /* Fixup kobjects */
7280 err
= device_rename(&dev
->dev
, dev
->name
);
7283 /* Add the device back in the hashes */
7284 list_netdevice(dev
);
7286 /* Notify protocols, that a new device appeared. */
7287 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
7290 * Prevent userspace races by waiting until the network
7291 * device is fully setup before sending notifications.
7293 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
7300 EXPORT_SYMBOL_GPL(dev_change_net_namespace
);
7302 static int dev_cpu_callback(struct notifier_block
*nfb
,
7303 unsigned long action
,
7306 struct sk_buff
**list_skb
;
7307 struct sk_buff
*skb
;
7308 unsigned int cpu
, oldcpu
= (unsigned long)ocpu
;
7309 struct softnet_data
*sd
, *oldsd
;
7311 if (action
!= CPU_DEAD
&& action
!= CPU_DEAD_FROZEN
)
7314 local_irq_disable();
7315 cpu
= smp_processor_id();
7316 sd
= &per_cpu(softnet_data
, cpu
);
7317 oldsd
= &per_cpu(softnet_data
, oldcpu
);
7319 /* Find end of our completion_queue. */
7320 list_skb
= &sd
->completion_queue
;
7322 list_skb
= &(*list_skb
)->next
;
7323 /* Append completion queue from offline CPU. */
7324 *list_skb
= oldsd
->completion_queue
;
7325 oldsd
->completion_queue
= NULL
;
7327 /* Append output queue from offline CPU. */
7328 if (oldsd
->output_queue
) {
7329 *sd
->output_queue_tailp
= oldsd
->output_queue
;
7330 sd
->output_queue_tailp
= oldsd
->output_queue_tailp
;
7331 oldsd
->output_queue
= NULL
;
7332 oldsd
->output_queue_tailp
= &oldsd
->output_queue
;
7334 /* Append NAPI poll list from offline CPU, with one exception :
7335 * process_backlog() must be called by cpu owning percpu backlog.
7336 * We properly handle process_queue & input_pkt_queue later.
7338 while (!list_empty(&oldsd
->poll_list
)) {
7339 struct napi_struct
*napi
= list_first_entry(&oldsd
->poll_list
,
7343 list_del_init(&napi
->poll_list
);
7344 if (napi
->poll
== process_backlog
)
7347 ____napi_schedule(sd
, napi
);
7350 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
7353 /* Process offline CPU's input_pkt_queue */
7354 while ((skb
= __skb_dequeue(&oldsd
->process_queue
))) {
7356 input_queue_head_incr(oldsd
);
7358 while ((skb
= skb_dequeue(&oldsd
->input_pkt_queue
))) {
7360 input_queue_head_incr(oldsd
);
7368 * netdev_increment_features - increment feature set by one
7369 * @all: current feature set
7370 * @one: new feature set
7371 * @mask: mask feature set
7373 * Computes a new feature set after adding a device with feature set
7374 * @one to the master device with current feature set @all. Will not
7375 * enable anything that is off in @mask. Returns the new feature set.
7377 netdev_features_t
netdev_increment_features(netdev_features_t all
,
7378 netdev_features_t one
, netdev_features_t mask
)
7380 if (mask
& NETIF_F_GEN_CSUM
)
7381 mask
|= NETIF_F_ALL_CSUM
;
7382 mask
|= NETIF_F_VLAN_CHALLENGED
;
7384 all
|= one
& (NETIF_F_ONE_FOR_ALL
|NETIF_F_ALL_CSUM
) & mask
;
7385 all
&= one
| ~NETIF_F_ALL_FOR_ALL
;
7387 /* If one device supports hw checksumming, set for all. */
7388 if (all
& NETIF_F_GEN_CSUM
)
7389 all
&= ~(NETIF_F_ALL_CSUM
& ~NETIF_F_GEN_CSUM
);
7393 EXPORT_SYMBOL(netdev_increment_features
);
7395 static struct hlist_head
* __net_init
netdev_create_hash(void)
7398 struct hlist_head
*hash
;
7400 hash
= kmalloc(sizeof(*hash
) * NETDEV_HASHENTRIES
, GFP_KERNEL
);
7402 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
7403 INIT_HLIST_HEAD(&hash
[i
]);
7408 /* Initialize per network namespace state */
7409 static int __net_init
netdev_init(struct net
*net
)
7411 if (net
!= &init_net
)
7412 INIT_LIST_HEAD(&net
->dev_base_head
);
7414 net
->dev_name_head
= netdev_create_hash();
7415 if (net
->dev_name_head
== NULL
)
7418 net
->dev_index_head
= netdev_create_hash();
7419 if (net
->dev_index_head
== NULL
)
7425 kfree(net
->dev_name_head
);
7431 * netdev_drivername - network driver for the device
7432 * @dev: network device
7434 * Determine network driver for device.
7436 const char *netdev_drivername(const struct net_device
*dev
)
7438 const struct device_driver
*driver
;
7439 const struct device
*parent
;
7440 const char *empty
= "";
7442 parent
= dev
->dev
.parent
;
7446 driver
= parent
->driver
;
7447 if (driver
&& driver
->name
)
7448 return driver
->name
;
7452 static void __netdev_printk(const char *level
, const struct net_device
*dev
,
7453 struct va_format
*vaf
)
7455 if (dev
&& dev
->dev
.parent
) {
7456 dev_printk_emit(level
[1] - '0',
7459 dev_driver_string(dev
->dev
.parent
),
7460 dev_name(dev
->dev
.parent
),
7461 netdev_name(dev
), netdev_reg_state(dev
),
7464 printk("%s%s%s: %pV",
7465 level
, netdev_name(dev
), netdev_reg_state(dev
), vaf
);
7467 printk("%s(NULL net_device): %pV", level
, vaf
);
7471 void netdev_printk(const char *level
, const struct net_device
*dev
,
7472 const char *format
, ...)
7474 struct va_format vaf
;
7477 va_start(args
, format
);
7482 __netdev_printk(level
, dev
, &vaf
);
7486 EXPORT_SYMBOL(netdev_printk
);
7488 #define define_netdev_printk_level(func, level) \
7489 void func(const struct net_device *dev, const char *fmt, ...) \
7491 struct va_format vaf; \
7494 va_start(args, fmt); \
7499 __netdev_printk(level, dev, &vaf); \
7503 EXPORT_SYMBOL(func);
7505 define_netdev_printk_level(netdev_emerg
, KERN_EMERG
);
7506 define_netdev_printk_level(netdev_alert
, KERN_ALERT
);
7507 define_netdev_printk_level(netdev_crit
, KERN_CRIT
);
7508 define_netdev_printk_level(netdev_err
, KERN_ERR
);
7509 define_netdev_printk_level(netdev_warn
, KERN_WARNING
);
7510 define_netdev_printk_level(netdev_notice
, KERN_NOTICE
);
7511 define_netdev_printk_level(netdev_info
, KERN_INFO
);
7513 static void __net_exit
netdev_exit(struct net
*net
)
7515 kfree(net
->dev_name_head
);
7516 kfree(net
->dev_index_head
);
7519 static struct pernet_operations __net_initdata netdev_net_ops
= {
7520 .init
= netdev_init
,
7521 .exit
= netdev_exit
,
7524 static void __net_exit
default_device_exit(struct net
*net
)
7526 struct net_device
*dev
, *aux
;
7528 * Push all migratable network devices back to the
7529 * initial network namespace
7532 for_each_netdev_safe(net
, dev
, aux
) {
7534 char fb_name
[IFNAMSIZ
];
7536 /* Ignore unmoveable devices (i.e. loopback) */
7537 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
7540 /* Leave virtual devices for the generic cleanup */
7541 if (dev
->rtnl_link_ops
)
7544 /* Push remaining network devices to init_net */
7545 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
7546 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
7548 pr_emerg("%s: failed to move %s to init_net: %d\n",
7549 __func__
, dev
->name
, err
);
7556 static void __net_exit
rtnl_lock_unregistering(struct list_head
*net_list
)
7558 /* Return with the rtnl_lock held when there are no network
7559 * devices unregistering in any network namespace in net_list.
7563 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
7565 add_wait_queue(&netdev_unregistering_wq
, &wait
);
7567 unregistering
= false;
7569 list_for_each_entry(net
, net_list
, exit_list
) {
7570 if (net
->dev_unreg_count
> 0) {
7571 unregistering
= true;
7579 wait_woken(&wait
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
7581 remove_wait_queue(&netdev_unregistering_wq
, &wait
);
7584 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
7586 /* At exit all network devices most be removed from a network
7587 * namespace. Do this in the reverse order of registration.
7588 * Do this across as many network namespaces as possible to
7589 * improve batching efficiency.
7591 struct net_device
*dev
;
7593 LIST_HEAD(dev_kill_list
);
7595 /* To prevent network device cleanup code from dereferencing
7596 * loopback devices or network devices that have been freed
7597 * wait here for all pending unregistrations to complete,
7598 * before unregistring the loopback device and allowing the
7599 * network namespace be freed.
7601 * The netdev todo list containing all network devices
7602 * unregistrations that happen in default_device_exit_batch
7603 * will run in the rtnl_unlock() at the end of
7604 * default_device_exit_batch.
7606 rtnl_lock_unregistering(net_list
);
7607 list_for_each_entry(net
, net_list
, exit_list
) {
7608 for_each_netdev_reverse(net
, dev
) {
7609 if (dev
->rtnl_link_ops
&& dev
->rtnl_link_ops
->dellink
)
7610 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
7612 unregister_netdevice_queue(dev
, &dev_kill_list
);
7615 unregister_netdevice_many(&dev_kill_list
);
7619 static struct pernet_operations __net_initdata default_device_ops
= {
7620 .exit
= default_device_exit
,
7621 .exit_batch
= default_device_exit_batch
,
7625 * Initialize the DEV module. At boot time this walks the device list and
7626 * unhooks any devices that fail to initialise (normally hardware not
7627 * present) and leaves us with a valid list of present and active devices.
7632 * This is called single threaded during boot, so no need
7633 * to take the rtnl semaphore.
7635 static int __init
net_dev_init(void)
7637 int i
, rc
= -ENOMEM
;
7639 BUG_ON(!dev_boot_phase
);
7641 if (dev_proc_init())
7644 if (netdev_kobject_init())
7647 INIT_LIST_HEAD(&ptype_all
);
7648 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
7649 INIT_LIST_HEAD(&ptype_base
[i
]);
7651 INIT_LIST_HEAD(&offload_base
);
7653 if (register_pernet_subsys(&netdev_net_ops
))
7657 * Initialise the packet receive queues.
7660 for_each_possible_cpu(i
) {
7661 struct softnet_data
*sd
= &per_cpu(softnet_data
, i
);
7663 skb_queue_head_init(&sd
->input_pkt_queue
);
7664 skb_queue_head_init(&sd
->process_queue
);
7665 INIT_LIST_HEAD(&sd
->poll_list
);
7666 sd
->output_queue_tailp
= &sd
->output_queue
;
7668 sd
->csd
.func
= rps_trigger_softirq
;
7673 sd
->backlog
.poll
= process_backlog
;
7674 sd
->backlog
.weight
= weight_p
;
7679 /* The loopback device is special if any other network devices
7680 * is present in a network namespace the loopback device must
7681 * be present. Since we now dynamically allocate and free the
7682 * loopback device ensure this invariant is maintained by
7683 * keeping the loopback device as the first device on the
7684 * list of network devices. Ensuring the loopback devices
7685 * is the first device that appears and the last network device
7688 if (register_pernet_device(&loopback_net_ops
))
7691 if (register_pernet_device(&default_device_ops
))
7694 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
7695 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
7697 hotcpu_notifier(dev_cpu_callback
, 0);
7704 subsys_initcall(net_dev_init
);