1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET3 Protocol independent device support routines.
5 * Derived from the non IP parts of dev.c 1.0.19
7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Florian la Roche <rzsfl@rz.uni-sb.de>
12 * Alan Cox <gw4pts@gw4pts.ampr.org>
13 * David Hinds <dahinds@users.sourceforge.net>
14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15 * Adam Sulmicki <adam@cfar.umd.edu>
16 * Pekka Riikonen <priikone@poesidon.pspt.fi>
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
20 * to 2 if register_netdev gets called
21 * before net_dev_init & also removed a
22 * few lines of code in the process.
23 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant
25 * stunts to keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into
31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
32 * Alan Cox : 100 backlog just doesn't cut it when
33 * you start doing multicast video 8)
34 * Alan Cox : Rewrote net_bh and list manager.
35 * Alan Cox : Fix ETH_P_ALL echoback lengths.
36 * Alan Cox : Took out transmit every packet pass
37 * Saved a few bytes in the ioctl handler
38 * Alan Cox : Network driver sets packet type before
39 * calling netif_rx. Saves a function
41 * Alan Cox : Hashed net_bh()
42 * Richard Kooijman: Timestamp fixes.
43 * Alan Cox : Wrong field in SIOCGIFDSTADDR
44 * Alan Cox : Device lock protection.
45 * Alan Cox : Fixed nasty side effect of device close
47 * Rudi Cilibrasi : Pass the right thing to
49 * Dave Miller : 32bit quantity for the device lock to
50 * make it work out on a Sparc.
51 * Bjorn Ekwall : Added KERNELD hack.
52 * Alan Cox : Cleaned up the backlog initialise.
53 * Craig Metz : SIOCGIFCONF fix if space for under
55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
56 * is no device open function.
57 * Andi Kleen : Fix error reporting for SIOCGIFCONF
58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
59 * Cyrus Durgin : Cleaned for KMOD
60 * Adam Sulmicki : Bug Fix : Network Device Unload
61 * A network device unload needs to purge
63 * Paul Rusty Russell : SIOCSIFNAME
64 * Pekka Riikonen : Netdev boot-time settings code
65 * Andrew Morton : Make unregister_netdevice wait
66 * indefinitely on dev->refcnt
67 * J Hadi Salim : - Backlog queue sampling
68 * - netif_rx() feedback
71 #include <linux/uaccess.h>
72 #include <linux/bitops.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/mm.h>
81 #include <linux/mutex.h>
82 #include <linux/string.h>
84 #include <linux/socket.h>
85 #include <linux/sockios.h>
86 #include <linux/errno.h>
87 #include <linux/interrupt.h>
88 #include <linux/if_ether.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/ethtool.h>
92 #include <linux/skbuff.h>
93 #include <linux/bpf.h>
94 #include <linux/bpf_trace.h>
95 #include <net/net_namespace.h>
97 #include <net/busy_poll.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/stat.h>
101 #include <net/dst_metadata.h>
102 #include <net/pkt_sched.h>
103 #include <net/pkt_cls.h>
104 #include <net/checksum.h>
105 #include <net/xfrm.h>
106 #include <linux/highmem.h>
107 #include <linux/init.h>
108 #include <linux/module.h>
109 #include <linux/netpoll.h>
110 #include <linux/rcupdate.h>
111 #include <linux/delay.h>
112 #include <net/iw_handler.h>
113 #include <asm/current.h>
114 #include <linux/audit.h>
115 #include <linux/dmaengine.h>
116 #include <linux/err.h>
117 #include <linux/ctype.h>
118 #include <linux/if_arp.h>
119 #include <linux/if_vlan.h>
120 #include <linux/ip.h>
122 #include <net/mpls.h>
123 #include <linux/ipv6.h>
124 #include <linux/in.h>
125 #include <linux/jhash.h>
126 #include <linux/random.h>
127 #include <trace/events/napi.h>
128 #include <trace/events/net.h>
129 #include <trace/events/skb.h>
130 #include <linux/inetdevice.h>
131 #include <linux/cpu_rmap.h>
132 #include <linux/static_key.h>
133 #include <linux/hashtable.h>
134 #include <linux/vmalloc.h>
135 #include <linux/if_macvlan.h>
136 #include <linux/errqueue.h>
137 #include <linux/hrtimer.h>
138 #include <linux/netfilter_ingress.h>
139 #include <linux/crash_dump.h>
140 #include <linux/sctp.h>
141 #include <net/udp_tunnel.h>
142 #include <linux/net_namespace.h>
143 #include <linux/indirect_call_wrapper.h>
144 #include <net/devlink.h>
146 #include "net-sysfs.h"
148 #define MAX_GRO_SKBS 8
149 #define MAX_NEST_DEV 8
151 /* This should be increased if a protocol with a bigger head is added. */
152 #define GRO_MAX_HEAD (MAX_HEADER + 128)
154 static DEFINE_SPINLOCK(ptype_lock
);
155 static DEFINE_SPINLOCK(offload_lock
);
156 struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
157 struct list_head ptype_all __read_mostly
; /* Taps */
158 static struct list_head offload_base __read_mostly
;
160 static int netif_rx_internal(struct sk_buff
*skb
);
161 static int call_netdevice_notifiers_info(unsigned long val
,
162 struct netdev_notifier_info
*info
);
163 static int call_netdevice_notifiers_extack(unsigned long val
,
164 struct net_device
*dev
,
165 struct netlink_ext_ack
*extack
);
166 static struct napi_struct
*napi_by_id(unsigned int napi_id
);
169 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
172 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
174 * Writers must hold the rtnl semaphore while they loop through the
175 * dev_base_head list, and hold dev_base_lock for writing when they do the
176 * actual updates. This allows pure readers to access the list even
177 * while a writer is preparing to update it.
179 * To put it another way, dev_base_lock is held for writing only to
180 * protect against pure readers; the rtnl semaphore provides the
181 * protection against other writers.
183 * See, for example usages, register_netdevice() and
184 * unregister_netdevice(), which must be called with the rtnl
187 DEFINE_RWLOCK(dev_base_lock
);
188 EXPORT_SYMBOL(dev_base_lock
);
190 static DEFINE_MUTEX(ifalias_mutex
);
192 /* protects napi_hash addition/deletion and napi_gen_id */
193 static DEFINE_SPINLOCK(napi_hash_lock
);
195 static unsigned int napi_gen_id
= NR_CPUS
;
196 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash
, 8);
198 static seqcount_t devnet_rename_seq
;
200 static inline void dev_base_seq_inc(struct net
*net
)
202 while (++net
->dev_base_seq
== 0)
206 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
208 unsigned int hash
= full_name_hash(net
, name
, strnlen(name
, IFNAMSIZ
));
210 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
213 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
215 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
218 static inline void rps_lock(struct softnet_data
*sd
)
221 spin_lock(&sd
->input_pkt_queue
.lock
);
225 static inline void rps_unlock(struct softnet_data
*sd
)
228 spin_unlock(&sd
->input_pkt_queue
.lock
);
232 /* Device list insertion */
233 static void list_netdevice(struct net_device
*dev
)
235 struct net
*net
= dev_net(dev
);
239 write_lock_bh(&dev_base_lock
);
240 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
241 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
242 hlist_add_head_rcu(&dev
->index_hlist
,
243 dev_index_hash(net
, dev
->ifindex
));
244 write_unlock_bh(&dev_base_lock
);
246 dev_base_seq_inc(net
);
249 /* Device list removal
250 * caller must respect a RCU grace period before freeing/reusing dev
252 static void unlist_netdevice(struct net_device
*dev
)
256 /* Unlink dev from the device chain */
257 write_lock_bh(&dev_base_lock
);
258 list_del_rcu(&dev
->dev_list
);
259 hlist_del_rcu(&dev
->name_hlist
);
260 hlist_del_rcu(&dev
->index_hlist
);
261 write_unlock_bh(&dev_base_lock
);
263 dev_base_seq_inc(dev_net(dev
));
270 static RAW_NOTIFIER_HEAD(netdev_chain
);
273 * Device drivers call our routines to queue packets here. We empty the
274 * queue in the local softnet handler.
277 DEFINE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
278 EXPORT_PER_CPU_SYMBOL(softnet_data
);
280 /*******************************************************************************
282 * Protocol management and registration routines
284 *******************************************************************************/
288 * Add a protocol ID to the list. Now that the input handler is
289 * smarter we can dispense with all the messy stuff that used to be
292 * BEWARE!!! Protocol handlers, mangling input packets,
293 * MUST BE last in hash buckets and checking protocol handlers
294 * MUST start from promiscuous ptype_all chain in net_bh.
295 * It is true now, do not change it.
296 * Explanation follows: if protocol handler, mangling packet, will
297 * be the first on list, it is not able to sense, that packet
298 * is cloned and should be copied-on-write, so that it will
299 * change it and subsequent readers will get broken packet.
303 static inline struct list_head
*ptype_head(const struct packet_type
*pt
)
305 if (pt
->type
== htons(ETH_P_ALL
))
306 return pt
->dev
? &pt
->dev
->ptype_all
: &ptype_all
;
308 return pt
->dev
? &pt
->dev
->ptype_specific
:
309 &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
313 * dev_add_pack - add packet handler
314 * @pt: packet type declaration
316 * Add a protocol handler to the networking stack. The passed &packet_type
317 * is linked into kernel lists and may not be freed until it has been
318 * removed from the kernel lists.
320 * This call does not sleep therefore it can not
321 * guarantee all CPU's that are in middle of receiving packets
322 * will see the new packet type (until the next received packet).
325 void dev_add_pack(struct packet_type
*pt
)
327 struct list_head
*head
= ptype_head(pt
);
329 spin_lock(&ptype_lock
);
330 list_add_rcu(&pt
->list
, head
);
331 spin_unlock(&ptype_lock
);
333 EXPORT_SYMBOL(dev_add_pack
);
336 * __dev_remove_pack - remove packet handler
337 * @pt: packet type declaration
339 * Remove a protocol handler that was previously added to the kernel
340 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
341 * from the kernel lists and can be freed or reused once this function
344 * The packet type might still be in use by receivers
345 * and must not be freed until after all the CPU's have gone
346 * through a quiescent state.
348 void __dev_remove_pack(struct packet_type
*pt
)
350 struct list_head
*head
= ptype_head(pt
);
351 struct packet_type
*pt1
;
353 spin_lock(&ptype_lock
);
355 list_for_each_entry(pt1
, head
, list
) {
357 list_del_rcu(&pt
->list
);
362 pr_warn("dev_remove_pack: %p not found\n", pt
);
364 spin_unlock(&ptype_lock
);
366 EXPORT_SYMBOL(__dev_remove_pack
);
369 * dev_remove_pack - remove packet handler
370 * @pt: packet type declaration
372 * Remove a protocol handler that was previously added to the kernel
373 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
374 * from the kernel lists and can be freed or reused once this function
377 * This call sleeps to guarantee that no CPU is looking at the packet
380 void dev_remove_pack(struct packet_type
*pt
)
382 __dev_remove_pack(pt
);
386 EXPORT_SYMBOL(dev_remove_pack
);
390 * dev_add_offload - register offload handlers
391 * @po: protocol offload declaration
393 * Add protocol offload handlers to the networking stack. The passed
394 * &proto_offload is linked into kernel lists and may not be freed until
395 * it has been removed from the kernel lists.
397 * This call does not sleep therefore it can not
398 * guarantee all CPU's that are in middle of receiving packets
399 * will see the new offload handlers (until the next received packet).
401 void dev_add_offload(struct packet_offload
*po
)
403 struct packet_offload
*elem
;
405 spin_lock(&offload_lock
);
406 list_for_each_entry(elem
, &offload_base
, list
) {
407 if (po
->priority
< elem
->priority
)
410 list_add_rcu(&po
->list
, elem
->list
.prev
);
411 spin_unlock(&offload_lock
);
413 EXPORT_SYMBOL(dev_add_offload
);
416 * __dev_remove_offload - remove offload handler
417 * @po: packet offload declaration
419 * Remove a protocol offload handler that was previously added to the
420 * kernel offload handlers by dev_add_offload(). The passed &offload_type
421 * is removed from the kernel lists and can be freed or reused once this
424 * The packet type might still be in use by receivers
425 * and must not be freed until after all the CPU's have gone
426 * through a quiescent state.
428 static void __dev_remove_offload(struct packet_offload
*po
)
430 struct list_head
*head
= &offload_base
;
431 struct packet_offload
*po1
;
433 spin_lock(&offload_lock
);
435 list_for_each_entry(po1
, head
, list
) {
437 list_del_rcu(&po
->list
);
442 pr_warn("dev_remove_offload: %p not found\n", po
);
444 spin_unlock(&offload_lock
);
448 * dev_remove_offload - remove packet offload handler
449 * @po: packet offload declaration
451 * Remove a packet offload handler that was previously added to the kernel
452 * offload handlers by dev_add_offload(). The passed &offload_type is
453 * removed from the kernel lists and can be freed or reused once this
456 * This call sleeps to guarantee that no CPU is looking at the packet
459 void dev_remove_offload(struct packet_offload
*po
)
461 __dev_remove_offload(po
);
465 EXPORT_SYMBOL(dev_remove_offload
);
467 /******************************************************************************
469 * Device Boot-time Settings Routines
471 ******************************************************************************/
473 /* Boot time configuration table */
474 static struct netdev_boot_setup dev_boot_setup
[NETDEV_BOOT_SETUP_MAX
];
477 * netdev_boot_setup_add - add new setup entry
478 * @name: name of the device
479 * @map: configured settings for the device
481 * Adds new setup entry to the dev_boot_setup list. The function
482 * returns 0 on error and 1 on success. This is a generic routine to
485 static int netdev_boot_setup_add(char *name
, struct ifmap
*map
)
487 struct netdev_boot_setup
*s
;
491 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
492 if (s
[i
].name
[0] == '\0' || s
[i
].name
[0] == ' ') {
493 memset(s
[i
].name
, 0, sizeof(s
[i
].name
));
494 strlcpy(s
[i
].name
, name
, IFNAMSIZ
);
495 memcpy(&s
[i
].map
, map
, sizeof(s
[i
].map
));
500 return i
>= NETDEV_BOOT_SETUP_MAX
? 0 : 1;
504 * netdev_boot_setup_check - check boot time settings
505 * @dev: the netdevice
507 * Check boot time settings for the device.
508 * The found settings are set for the device to be used
509 * later in the device probing.
510 * Returns 0 if no settings found, 1 if they are.
512 int netdev_boot_setup_check(struct net_device
*dev
)
514 struct netdev_boot_setup
*s
= dev_boot_setup
;
517 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
518 if (s
[i
].name
[0] != '\0' && s
[i
].name
[0] != ' ' &&
519 !strcmp(dev
->name
, s
[i
].name
)) {
520 dev
->irq
= s
[i
].map
.irq
;
521 dev
->base_addr
= s
[i
].map
.base_addr
;
522 dev
->mem_start
= s
[i
].map
.mem_start
;
523 dev
->mem_end
= s
[i
].map
.mem_end
;
529 EXPORT_SYMBOL(netdev_boot_setup_check
);
533 * netdev_boot_base - get address from boot time settings
534 * @prefix: prefix for network device
535 * @unit: id for network device
537 * Check boot time settings for the base address of device.
538 * The found settings are set for the device to be used
539 * later in the device probing.
540 * Returns 0 if no settings found.
542 unsigned long netdev_boot_base(const char *prefix
, int unit
)
544 const struct netdev_boot_setup
*s
= dev_boot_setup
;
548 sprintf(name
, "%s%d", prefix
, unit
);
551 * If device already registered then return base of 1
552 * to indicate not to probe for this interface
554 if (__dev_get_by_name(&init_net
, name
))
557 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++)
558 if (!strcmp(name
, s
[i
].name
))
559 return s
[i
].map
.base_addr
;
564 * Saves at boot time configured settings for any netdevice.
566 int __init
netdev_boot_setup(char *str
)
571 str
= get_options(str
, ARRAY_SIZE(ints
), ints
);
576 memset(&map
, 0, sizeof(map
));
580 map
.base_addr
= ints
[2];
582 map
.mem_start
= ints
[3];
584 map
.mem_end
= ints
[4];
586 /* Add new entry to the list */
587 return netdev_boot_setup_add(str
, &map
);
590 __setup("netdev=", netdev_boot_setup
);
592 /*******************************************************************************
594 * Device Interface Subroutines
596 *******************************************************************************/
599 * dev_get_iflink - get 'iflink' value of a interface
600 * @dev: targeted interface
602 * Indicates the ifindex the interface is linked to.
603 * Physical interfaces have the same 'ifindex' and 'iflink' values.
606 int dev_get_iflink(const struct net_device
*dev
)
608 if (dev
->netdev_ops
&& dev
->netdev_ops
->ndo_get_iflink
)
609 return dev
->netdev_ops
->ndo_get_iflink(dev
);
613 EXPORT_SYMBOL(dev_get_iflink
);
616 * dev_fill_metadata_dst - Retrieve tunnel egress information.
617 * @dev: targeted interface
620 * For better visibility of tunnel traffic OVS needs to retrieve
621 * egress tunnel information for a packet. Following API allows
622 * user to get this info.
624 int dev_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
626 struct ip_tunnel_info
*info
;
628 if (!dev
->netdev_ops
|| !dev
->netdev_ops
->ndo_fill_metadata_dst
)
631 info
= skb_tunnel_info_unclone(skb
);
634 if (unlikely(!(info
->mode
& IP_TUNNEL_INFO_TX
)))
637 return dev
->netdev_ops
->ndo_fill_metadata_dst(dev
, skb
);
639 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst
);
642 * __dev_get_by_name - find a device by its name
643 * @net: the applicable net namespace
644 * @name: name to find
646 * Find an interface by name. Must be called under RTNL semaphore
647 * or @dev_base_lock. If the name is found a pointer to the device
648 * is returned. If the name is not found then %NULL is returned. The
649 * reference counters are not incremented so the caller must be
650 * careful with locks.
653 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
655 struct net_device
*dev
;
656 struct hlist_head
*head
= dev_name_hash(net
, name
);
658 hlist_for_each_entry(dev
, head
, name_hlist
)
659 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
664 EXPORT_SYMBOL(__dev_get_by_name
);
667 * dev_get_by_name_rcu - find a device by its name
668 * @net: the applicable net namespace
669 * @name: name to find
671 * Find an interface by name.
672 * If the name is found a pointer to the device is returned.
673 * If the name is not found then %NULL is returned.
674 * The reference counters are not incremented so the caller must be
675 * careful with locks. The caller must hold RCU lock.
678 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
680 struct net_device
*dev
;
681 struct hlist_head
*head
= dev_name_hash(net
, name
);
683 hlist_for_each_entry_rcu(dev
, head
, name_hlist
)
684 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
689 EXPORT_SYMBOL(dev_get_by_name_rcu
);
692 * dev_get_by_name - find a device by its name
693 * @net: the applicable net namespace
694 * @name: name to find
696 * Find an interface by name. This can be called from any
697 * context and does its own locking. The returned handle has
698 * the usage count incremented and the caller must use dev_put() to
699 * release it when it is no longer needed. %NULL is returned if no
700 * matching device is found.
703 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
705 struct net_device
*dev
;
708 dev
= dev_get_by_name_rcu(net
, name
);
714 EXPORT_SYMBOL(dev_get_by_name
);
717 * __dev_get_by_index - find a device by its ifindex
718 * @net: the applicable net namespace
719 * @ifindex: index of device
721 * Search for an interface by index. Returns %NULL if the device
722 * is not found or a pointer to the device. The device has not
723 * had its reference counter increased so the caller must be careful
724 * about locking. The caller must hold either the RTNL semaphore
728 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
730 struct net_device
*dev
;
731 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
733 hlist_for_each_entry(dev
, head
, index_hlist
)
734 if (dev
->ifindex
== ifindex
)
739 EXPORT_SYMBOL(__dev_get_by_index
);
742 * dev_get_by_index_rcu - find a device by its ifindex
743 * @net: the applicable net namespace
744 * @ifindex: index of device
746 * Search for an interface by index. Returns %NULL if the device
747 * is not found or a pointer to the device. The device has not
748 * had its reference counter increased so the caller must be careful
749 * about locking. The caller must hold RCU lock.
752 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
754 struct net_device
*dev
;
755 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
757 hlist_for_each_entry_rcu(dev
, head
, index_hlist
)
758 if (dev
->ifindex
== ifindex
)
763 EXPORT_SYMBOL(dev_get_by_index_rcu
);
767 * dev_get_by_index - find a device by its ifindex
768 * @net: the applicable net namespace
769 * @ifindex: index of device
771 * Search for an interface by index. Returns NULL if the device
772 * is not found or a pointer to the device. The device returned has
773 * had a reference added and the pointer is safe until the user calls
774 * dev_put to indicate they have finished with it.
777 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
779 struct net_device
*dev
;
782 dev
= dev_get_by_index_rcu(net
, ifindex
);
788 EXPORT_SYMBOL(dev_get_by_index
);
791 * dev_get_by_napi_id - find a device by napi_id
792 * @napi_id: ID of the NAPI struct
794 * Search for an interface by NAPI ID. Returns %NULL if the device
795 * is not found or a pointer to the device. The device has not had
796 * its reference counter increased so the caller must be careful
797 * about locking. The caller must hold RCU lock.
800 struct net_device
*dev_get_by_napi_id(unsigned int napi_id
)
802 struct napi_struct
*napi
;
804 WARN_ON_ONCE(!rcu_read_lock_held());
806 if (napi_id
< MIN_NAPI_ID
)
809 napi
= napi_by_id(napi_id
);
811 return napi
? napi
->dev
: NULL
;
813 EXPORT_SYMBOL(dev_get_by_napi_id
);
816 * netdev_get_name - get a netdevice name, knowing its ifindex.
817 * @net: network namespace
818 * @name: a pointer to the buffer where the name will be stored.
819 * @ifindex: the ifindex of the interface to get the name from.
821 * The use of raw_seqcount_begin() and cond_resched() before
822 * retrying is required as we want to give the writers a chance
823 * to complete when CONFIG_PREEMPT is not set.
825 int netdev_get_name(struct net
*net
, char *name
, int ifindex
)
827 struct net_device
*dev
;
831 seq
= raw_seqcount_begin(&devnet_rename_seq
);
833 dev
= dev_get_by_index_rcu(net
, ifindex
);
839 strcpy(name
, dev
->name
);
841 if (read_seqcount_retry(&devnet_rename_seq
, seq
)) {
850 * dev_getbyhwaddr_rcu - find a device by its hardware address
851 * @net: the applicable net namespace
852 * @type: media type of device
853 * @ha: hardware address
855 * Search for an interface by MAC address. Returns NULL if the device
856 * is not found or a pointer to the device.
857 * The caller must hold RCU or RTNL.
858 * The returned device has not had its ref count increased
859 * and the caller must therefore be careful about locking
863 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
866 struct net_device
*dev
;
868 for_each_netdev_rcu(net
, dev
)
869 if (dev
->type
== type
&&
870 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
875 EXPORT_SYMBOL(dev_getbyhwaddr_rcu
);
877 struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
879 struct net_device
*dev
;
882 for_each_netdev(net
, dev
)
883 if (dev
->type
== type
)
888 EXPORT_SYMBOL(__dev_getfirstbyhwtype
);
890 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
892 struct net_device
*dev
, *ret
= NULL
;
895 for_each_netdev_rcu(net
, dev
)
896 if (dev
->type
== type
) {
904 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
907 * __dev_get_by_flags - find any device with given flags
908 * @net: the applicable net namespace
909 * @if_flags: IFF_* values
910 * @mask: bitmask of bits in if_flags to check
912 * Search for any interface with the given flags. Returns NULL if a device
913 * is not found or a pointer to the device. Must be called inside
914 * rtnl_lock(), and result refcount is unchanged.
917 struct net_device
*__dev_get_by_flags(struct net
*net
, unsigned short if_flags
,
920 struct net_device
*dev
, *ret
;
925 for_each_netdev(net
, dev
) {
926 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
933 EXPORT_SYMBOL(__dev_get_by_flags
);
936 * dev_valid_name - check if name is okay for network device
939 * Network device names need to be valid file names to
940 * to allow sysfs to work. We also disallow any kind of
943 bool dev_valid_name(const char *name
)
947 if (strnlen(name
, IFNAMSIZ
) == IFNAMSIZ
)
949 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
953 if (*name
== '/' || *name
== ':' || isspace(*name
))
959 EXPORT_SYMBOL(dev_valid_name
);
962 * __dev_alloc_name - allocate a name for a device
963 * @net: network namespace to allocate the device name in
964 * @name: name format string
965 * @buf: scratch buffer and result name string
967 * Passed a format string - eg "lt%d" it will try and find a suitable
968 * id. It scans list of devices to build up a free map, then chooses
969 * the first empty slot. The caller must hold the dev_base or rtnl lock
970 * while allocating the name and adding the device in order to avoid
972 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
973 * Returns the number of the unit assigned or a negative errno code.
976 static int __dev_alloc_name(struct net
*net
, const char *name
, char *buf
)
980 const int max_netdevices
= 8*PAGE_SIZE
;
981 unsigned long *inuse
;
982 struct net_device
*d
;
984 if (!dev_valid_name(name
))
987 p
= strchr(name
, '%');
990 * Verify the string as this thing may have come from
991 * the user. There must be either one "%d" and no other "%"
994 if (p
[1] != 'd' || strchr(p
+ 2, '%'))
997 /* Use one page as a bit array of possible slots */
998 inuse
= (unsigned long *) get_zeroed_page(GFP_ATOMIC
);
1002 for_each_netdev(net
, d
) {
1003 if (!sscanf(d
->name
, name
, &i
))
1005 if (i
< 0 || i
>= max_netdevices
)
1008 /* avoid cases where sscanf is not exact inverse of printf */
1009 snprintf(buf
, IFNAMSIZ
, name
, i
);
1010 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
1014 i
= find_first_zero_bit(inuse
, max_netdevices
);
1015 free_page((unsigned long) inuse
);
1018 snprintf(buf
, IFNAMSIZ
, name
, i
);
1019 if (!__dev_get_by_name(net
, buf
))
1022 /* It is possible to run out of possible slots
1023 * when the name is long and there isn't enough space left
1024 * for the digits, or if all bits are used.
1029 static int dev_alloc_name_ns(struct net
*net
,
1030 struct net_device
*dev
,
1037 ret
= __dev_alloc_name(net
, name
, buf
);
1039 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1044 * dev_alloc_name - allocate a name for a device
1046 * @name: name format string
1048 * Passed a format string - eg "lt%d" it will try and find a suitable
1049 * id. It scans list of devices to build up a free map, then chooses
1050 * the first empty slot. The caller must hold the dev_base or rtnl lock
1051 * while allocating the name and adding the device in order to avoid
1053 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1054 * Returns the number of the unit assigned or a negative errno code.
1057 int dev_alloc_name(struct net_device
*dev
, const char *name
)
1059 return dev_alloc_name_ns(dev_net(dev
), dev
, name
);
1061 EXPORT_SYMBOL(dev_alloc_name
);
1063 int dev_get_valid_name(struct net
*net
, struct net_device
*dev
,
1068 if (!dev_valid_name(name
))
1071 if (strchr(name
, '%'))
1072 return dev_alloc_name_ns(net
, dev
, name
);
1073 else if (__dev_get_by_name(net
, name
))
1075 else if (dev
->name
!= name
)
1076 strlcpy(dev
->name
, name
, IFNAMSIZ
);
1080 EXPORT_SYMBOL(dev_get_valid_name
);
1083 * dev_change_name - change name of a device
1085 * @newname: name (or format string) must be at least IFNAMSIZ
1087 * Change name of a device, can pass format strings "eth%d".
1090 int dev_change_name(struct net_device
*dev
, const char *newname
)
1092 unsigned char old_assign_type
;
1093 char oldname
[IFNAMSIZ
];
1099 BUG_ON(!dev_net(dev
));
1103 /* Some auto-enslaved devices e.g. failover slaves are
1104 * special, as userspace might rename the device after
1105 * the interface had been brought up and running since
1106 * the point kernel initiated auto-enslavement. Allow
1107 * live name change even when these slave devices are
1110 * Typically, users of these auto-enslaving devices
1111 * don't actually care about slave name change, as
1112 * they are supposed to operate on master interface
1115 if (dev
->flags
& IFF_UP
&&
1116 likely(!(dev
->priv_flags
& IFF_LIVE_RENAME_OK
)))
1119 write_seqcount_begin(&devnet_rename_seq
);
1121 if (strncmp(newname
, dev
->name
, IFNAMSIZ
) == 0) {
1122 write_seqcount_end(&devnet_rename_seq
);
1126 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
1128 err
= dev_get_valid_name(net
, dev
, newname
);
1130 write_seqcount_end(&devnet_rename_seq
);
1134 if (oldname
[0] && !strchr(oldname
, '%'))
1135 netdev_info(dev
, "renamed from %s\n", oldname
);
1137 old_assign_type
= dev
->name_assign_type
;
1138 dev
->name_assign_type
= NET_NAME_RENAMED
;
1141 ret
= device_rename(&dev
->dev
, dev
->name
);
1143 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1144 dev
->name_assign_type
= old_assign_type
;
1145 write_seqcount_end(&devnet_rename_seq
);
1149 write_seqcount_end(&devnet_rename_seq
);
1151 netdev_adjacent_rename_links(dev
, oldname
);
1153 write_lock_bh(&dev_base_lock
);
1154 hlist_del_rcu(&dev
->name_hlist
);
1155 write_unlock_bh(&dev_base_lock
);
1159 write_lock_bh(&dev_base_lock
);
1160 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
1161 write_unlock_bh(&dev_base_lock
);
1163 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1164 ret
= notifier_to_errno(ret
);
1167 /* err >= 0 after dev_alloc_name() or stores the first errno */
1170 write_seqcount_begin(&devnet_rename_seq
);
1171 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1172 memcpy(oldname
, newname
, IFNAMSIZ
);
1173 dev
->name_assign_type
= old_assign_type
;
1174 old_assign_type
= NET_NAME_RENAMED
;
1177 pr_err("%s: name change rollback failed: %d\n",
1186 * dev_set_alias - change ifalias of a device
1188 * @alias: name up to IFALIASZ
1189 * @len: limit of bytes to copy from info
1191 * Set ifalias for a device,
1193 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1195 struct dev_ifalias
*new_alias
= NULL
;
1197 if (len
>= IFALIASZ
)
1201 new_alias
= kmalloc(sizeof(*new_alias
) + len
+ 1, GFP_KERNEL
);
1205 memcpy(new_alias
->ifalias
, alias
, len
);
1206 new_alias
->ifalias
[len
] = 0;
1209 mutex_lock(&ifalias_mutex
);
1210 rcu_swap_protected(dev
->ifalias
, new_alias
,
1211 mutex_is_locked(&ifalias_mutex
));
1212 mutex_unlock(&ifalias_mutex
);
1215 kfree_rcu(new_alias
, rcuhead
);
1219 EXPORT_SYMBOL(dev_set_alias
);
1222 * dev_get_alias - get ifalias of a device
1224 * @name: buffer to store name of ifalias
1225 * @len: size of buffer
1227 * get ifalias for a device. Caller must make sure dev cannot go
1228 * away, e.g. rcu read lock or own a reference count to device.
1230 int dev_get_alias(const struct net_device
*dev
, char *name
, size_t len
)
1232 const struct dev_ifalias
*alias
;
1236 alias
= rcu_dereference(dev
->ifalias
);
1238 ret
= snprintf(name
, len
, "%s", alias
->ifalias
);
1245 * netdev_features_change - device changes features
1246 * @dev: device to cause notification
1248 * Called to indicate a device has changed features.
1250 void netdev_features_change(struct net_device
*dev
)
1252 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1254 EXPORT_SYMBOL(netdev_features_change
);
1257 * netdev_state_change - device changes state
1258 * @dev: device to cause notification
1260 * Called to indicate a device has changed state. This function calls
1261 * the notifier chains for netdev_chain and sends a NEWLINK message
1262 * to the routing socket.
1264 void netdev_state_change(struct net_device
*dev
)
1266 if (dev
->flags
& IFF_UP
) {
1267 struct netdev_notifier_change_info change_info
= {
1271 call_netdevice_notifiers_info(NETDEV_CHANGE
,
1273 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0, GFP_KERNEL
);
1276 EXPORT_SYMBOL(netdev_state_change
);
1279 * netdev_notify_peers - notify network peers about existence of @dev
1280 * @dev: network device
1282 * Generate traffic such that interested network peers are aware of
1283 * @dev, such as by generating a gratuitous ARP. This may be used when
1284 * a device wants to inform the rest of the network about some sort of
1285 * reconfiguration such as a failover event or virtual machine
1288 void netdev_notify_peers(struct net_device
*dev
)
1291 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, dev
);
1292 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, dev
);
1295 EXPORT_SYMBOL(netdev_notify_peers
);
1297 static int __dev_open(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
1299 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1304 if (!netif_device_present(dev
))
1307 /* Block netpoll from trying to do any rx path servicing.
1308 * If we don't do this there is a chance ndo_poll_controller
1309 * or ndo_poll may be running while we open the device
1311 netpoll_poll_disable(dev
);
1313 ret
= call_netdevice_notifiers_extack(NETDEV_PRE_UP
, dev
, extack
);
1314 ret
= notifier_to_errno(ret
);
1318 set_bit(__LINK_STATE_START
, &dev
->state
);
1320 if (ops
->ndo_validate_addr
)
1321 ret
= ops
->ndo_validate_addr(dev
);
1323 if (!ret
&& ops
->ndo_open
)
1324 ret
= ops
->ndo_open(dev
);
1326 netpoll_poll_enable(dev
);
1329 clear_bit(__LINK_STATE_START
, &dev
->state
);
1331 dev
->flags
|= IFF_UP
;
1332 dev_set_rx_mode(dev
);
1334 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
1341 * dev_open - prepare an interface for use.
1342 * @dev: device to open
1343 * @extack: netlink extended ack
1345 * Takes a device from down to up state. The device's private open
1346 * function is invoked and then the multicast lists are loaded. Finally
1347 * the device is moved into the up state and a %NETDEV_UP message is
1348 * sent to the netdev notifier chain.
1350 * Calling this function on an active interface is a nop. On a failure
1351 * a negative errno code is returned.
1353 int dev_open(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
1357 if (dev
->flags
& IFF_UP
)
1360 ret
= __dev_open(dev
, extack
);
1364 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1365 call_netdevice_notifiers(NETDEV_UP
, dev
);
1369 EXPORT_SYMBOL(dev_open
);
1371 static void __dev_close_many(struct list_head
*head
)
1373 struct net_device
*dev
;
1378 list_for_each_entry(dev
, head
, close_list
) {
1379 /* Temporarily disable netpoll until the interface is down */
1380 netpoll_poll_disable(dev
);
1382 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1384 clear_bit(__LINK_STATE_START
, &dev
->state
);
1386 /* Synchronize to scheduled poll. We cannot touch poll list, it
1387 * can be even on different cpu. So just clear netif_running().
1389 * dev->stop() will invoke napi_disable() on all of it's
1390 * napi_struct instances on this device.
1392 smp_mb__after_atomic(); /* Commit netif_running(). */
1395 dev_deactivate_many(head
);
1397 list_for_each_entry(dev
, head
, close_list
) {
1398 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1401 * Call the device specific close. This cannot fail.
1402 * Only if device is UP
1404 * We allow it to be called even after a DETACH hot-plug
1410 dev
->flags
&= ~IFF_UP
;
1411 netpoll_poll_enable(dev
);
1415 static void __dev_close(struct net_device
*dev
)
1419 list_add(&dev
->close_list
, &single
);
1420 __dev_close_many(&single
);
1424 void dev_close_many(struct list_head
*head
, bool unlink
)
1426 struct net_device
*dev
, *tmp
;
1428 /* Remove the devices that don't need to be closed */
1429 list_for_each_entry_safe(dev
, tmp
, head
, close_list
)
1430 if (!(dev
->flags
& IFF_UP
))
1431 list_del_init(&dev
->close_list
);
1433 __dev_close_many(head
);
1435 list_for_each_entry_safe(dev
, tmp
, head
, close_list
) {
1436 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1437 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1439 list_del_init(&dev
->close_list
);
1442 EXPORT_SYMBOL(dev_close_many
);
1445 * dev_close - shutdown an interface.
1446 * @dev: device to shutdown
1448 * This function moves an active device into down state. A
1449 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1450 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1453 void dev_close(struct net_device
*dev
)
1455 if (dev
->flags
& IFF_UP
) {
1458 list_add(&dev
->close_list
, &single
);
1459 dev_close_many(&single
, true);
1463 EXPORT_SYMBOL(dev_close
);
1467 * dev_disable_lro - disable Large Receive Offload on a device
1470 * Disable Large Receive Offload (LRO) on a net device. Must be
1471 * called under RTNL. This is needed if received packets may be
1472 * forwarded to another interface.
1474 void dev_disable_lro(struct net_device
*dev
)
1476 struct net_device
*lower_dev
;
1477 struct list_head
*iter
;
1479 dev
->wanted_features
&= ~NETIF_F_LRO
;
1480 netdev_update_features(dev
);
1482 if (unlikely(dev
->features
& NETIF_F_LRO
))
1483 netdev_WARN(dev
, "failed to disable LRO!\n");
1485 netdev_for_each_lower_dev(dev
, lower_dev
, iter
)
1486 dev_disable_lro(lower_dev
);
1488 EXPORT_SYMBOL(dev_disable_lro
);
1491 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1494 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1495 * called under RTNL. This is needed if Generic XDP is installed on
1498 static void dev_disable_gro_hw(struct net_device
*dev
)
1500 dev
->wanted_features
&= ~NETIF_F_GRO_HW
;
1501 netdev_update_features(dev
);
1503 if (unlikely(dev
->features
& NETIF_F_GRO_HW
))
1504 netdev_WARN(dev
, "failed to disable GRO_HW!\n");
1507 const char *netdev_cmd_to_name(enum netdev_cmd cmd
)
1510 case NETDEV_##val: \
1511 return "NETDEV_" __stringify(val);
1513 N(UP
) N(DOWN
) N(REBOOT
) N(CHANGE
) N(REGISTER
) N(UNREGISTER
)
1514 N(CHANGEMTU
) N(CHANGEADDR
) N(GOING_DOWN
) N(CHANGENAME
) N(FEAT_CHANGE
)
1515 N(BONDING_FAILOVER
) N(PRE_UP
) N(PRE_TYPE_CHANGE
) N(POST_TYPE_CHANGE
)
1516 N(POST_INIT
) N(RELEASE
) N(NOTIFY_PEERS
) N(JOIN
) N(CHANGEUPPER
)
1517 N(RESEND_IGMP
) N(PRECHANGEMTU
) N(CHANGEINFODATA
) N(BONDING_INFO
)
1518 N(PRECHANGEUPPER
) N(CHANGELOWERSTATE
) N(UDP_TUNNEL_PUSH_INFO
)
1519 N(UDP_TUNNEL_DROP_INFO
) N(CHANGE_TX_QUEUE_LEN
)
1520 N(CVLAN_FILTER_PUSH_INFO
) N(CVLAN_FILTER_DROP_INFO
)
1521 N(SVLAN_FILTER_PUSH_INFO
) N(SVLAN_FILTER_DROP_INFO
)
1525 return "UNKNOWN_NETDEV_EVENT";
1527 EXPORT_SYMBOL_GPL(netdev_cmd_to_name
);
1529 static int call_netdevice_notifier(struct notifier_block
*nb
, unsigned long val
,
1530 struct net_device
*dev
)
1532 struct netdev_notifier_info info
= {
1536 return nb
->notifier_call(nb
, val
, &info
);
1539 static int dev_boot_phase
= 1;
1542 * register_netdevice_notifier - register a network notifier block
1545 * Register a notifier to be called when network device events occur.
1546 * The notifier passed is linked into the kernel structures and must
1547 * not be reused until it has been unregistered. A negative errno code
1548 * is returned on a failure.
1550 * When registered all registration and up events are replayed
1551 * to the new notifier to allow device to have a race free
1552 * view of the network device list.
1555 int register_netdevice_notifier(struct notifier_block
*nb
)
1557 struct net_device
*dev
;
1558 struct net_device
*last
;
1562 /* Close race with setup_net() and cleanup_net() */
1563 down_write(&pernet_ops_rwsem
);
1565 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1571 for_each_netdev(net
, dev
) {
1572 err
= call_netdevice_notifier(nb
, NETDEV_REGISTER
, dev
);
1573 err
= notifier_to_errno(err
);
1577 if (!(dev
->flags
& IFF_UP
))
1580 call_netdevice_notifier(nb
, NETDEV_UP
, dev
);
1586 up_write(&pernet_ops_rwsem
);
1592 for_each_netdev(net
, dev
) {
1596 if (dev
->flags
& IFF_UP
) {
1597 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1599 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1601 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1606 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1609 EXPORT_SYMBOL(register_netdevice_notifier
);
1612 * unregister_netdevice_notifier - unregister a network notifier block
1615 * Unregister a notifier previously registered by
1616 * register_netdevice_notifier(). The notifier is unlinked into the
1617 * kernel structures and may then be reused. A negative errno code
1618 * is returned on a failure.
1620 * After unregistering unregister and down device events are synthesized
1621 * for all devices on the device list to the removed notifier to remove
1622 * the need for special case cleanup code.
1625 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1627 struct net_device
*dev
;
1631 /* Close race with setup_net() and cleanup_net() */
1632 down_write(&pernet_ops_rwsem
);
1634 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1639 for_each_netdev(net
, dev
) {
1640 if (dev
->flags
& IFF_UP
) {
1641 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1643 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1645 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1650 up_write(&pernet_ops_rwsem
);
1653 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1656 * call_netdevice_notifiers_info - call all network notifier blocks
1657 * @val: value passed unmodified to notifier function
1658 * @info: notifier information data
1660 * Call all network notifier blocks. Parameters and return value
1661 * are as for raw_notifier_call_chain().
1664 static int call_netdevice_notifiers_info(unsigned long val
,
1665 struct netdev_notifier_info
*info
)
1668 return raw_notifier_call_chain(&netdev_chain
, val
, info
);
1671 static int call_netdevice_notifiers_extack(unsigned long val
,
1672 struct net_device
*dev
,
1673 struct netlink_ext_ack
*extack
)
1675 struct netdev_notifier_info info
= {
1680 return call_netdevice_notifiers_info(val
, &info
);
1684 * call_netdevice_notifiers - call all network notifier blocks
1685 * @val: value passed unmodified to notifier function
1686 * @dev: net_device pointer passed unmodified to notifier function
1688 * Call all network notifier blocks. Parameters and return value
1689 * are as for raw_notifier_call_chain().
1692 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
1694 return call_netdevice_notifiers_extack(val
, dev
, NULL
);
1696 EXPORT_SYMBOL(call_netdevice_notifiers
);
1699 * call_netdevice_notifiers_mtu - call all network notifier blocks
1700 * @val: value passed unmodified to notifier function
1701 * @dev: net_device pointer passed unmodified to notifier function
1702 * @arg: additional u32 argument passed to the notifier function
1704 * Call all network notifier blocks. Parameters and return value
1705 * are as for raw_notifier_call_chain().
1707 static int call_netdevice_notifiers_mtu(unsigned long val
,
1708 struct net_device
*dev
, u32 arg
)
1710 struct netdev_notifier_info_ext info
= {
1715 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext
, info
) != 0);
1717 return call_netdevice_notifiers_info(val
, &info
.info
);
1720 #ifdef CONFIG_NET_INGRESS
1721 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key
);
1723 void net_inc_ingress_queue(void)
1725 static_branch_inc(&ingress_needed_key
);
1727 EXPORT_SYMBOL_GPL(net_inc_ingress_queue
);
1729 void net_dec_ingress_queue(void)
1731 static_branch_dec(&ingress_needed_key
);
1733 EXPORT_SYMBOL_GPL(net_dec_ingress_queue
);
1736 #ifdef CONFIG_NET_EGRESS
1737 static DEFINE_STATIC_KEY_FALSE(egress_needed_key
);
1739 void net_inc_egress_queue(void)
1741 static_branch_inc(&egress_needed_key
);
1743 EXPORT_SYMBOL_GPL(net_inc_egress_queue
);
1745 void net_dec_egress_queue(void)
1747 static_branch_dec(&egress_needed_key
);
1749 EXPORT_SYMBOL_GPL(net_dec_egress_queue
);
1752 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key
);
1753 #ifdef CONFIG_JUMP_LABEL
1754 static atomic_t netstamp_needed_deferred
;
1755 static atomic_t netstamp_wanted
;
1756 static void netstamp_clear(struct work_struct
*work
)
1758 int deferred
= atomic_xchg(&netstamp_needed_deferred
, 0);
1761 wanted
= atomic_add_return(deferred
, &netstamp_wanted
);
1763 static_branch_enable(&netstamp_needed_key
);
1765 static_branch_disable(&netstamp_needed_key
);
1767 static DECLARE_WORK(netstamp_work
, netstamp_clear
);
1770 void net_enable_timestamp(void)
1772 #ifdef CONFIG_JUMP_LABEL
1776 wanted
= atomic_read(&netstamp_wanted
);
1779 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
+ 1) == wanted
)
1782 atomic_inc(&netstamp_needed_deferred
);
1783 schedule_work(&netstamp_work
);
1785 static_branch_inc(&netstamp_needed_key
);
1788 EXPORT_SYMBOL(net_enable_timestamp
);
1790 void net_disable_timestamp(void)
1792 #ifdef CONFIG_JUMP_LABEL
1796 wanted
= atomic_read(&netstamp_wanted
);
1799 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
- 1) == wanted
)
1802 atomic_dec(&netstamp_needed_deferred
);
1803 schedule_work(&netstamp_work
);
1805 static_branch_dec(&netstamp_needed_key
);
1808 EXPORT_SYMBOL(net_disable_timestamp
);
1810 static inline void net_timestamp_set(struct sk_buff
*skb
)
1813 if (static_branch_unlikely(&netstamp_needed_key
))
1814 __net_timestamp(skb
);
1817 #define net_timestamp_check(COND, SKB) \
1818 if (static_branch_unlikely(&netstamp_needed_key)) { \
1819 if ((COND) && !(SKB)->tstamp) \
1820 __net_timestamp(SKB); \
1823 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
1827 if (!(dev
->flags
& IFF_UP
))
1830 len
= dev
->mtu
+ dev
->hard_header_len
+ VLAN_HLEN
;
1831 if (skb
->len
<= len
)
1834 /* if TSO is enabled, we don't care about the length as the packet
1835 * could be forwarded without being segmented before
1837 if (skb_is_gso(skb
))
1842 EXPORT_SYMBOL_GPL(is_skb_forwardable
);
1844 int __dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1846 int ret
= ____dev_forward_skb(dev
, skb
);
1849 skb
->protocol
= eth_type_trans(skb
, dev
);
1850 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
1855 EXPORT_SYMBOL_GPL(__dev_forward_skb
);
1858 * dev_forward_skb - loopback an skb to another netif
1860 * @dev: destination network device
1861 * @skb: buffer to forward
1864 * NET_RX_SUCCESS (no congestion)
1865 * NET_RX_DROP (packet was dropped, but freed)
1867 * dev_forward_skb can be used for injecting an skb from the
1868 * start_xmit function of one device into the receive queue
1869 * of another device.
1871 * The receiving device may be in another namespace, so
1872 * we have to clear all information in the skb that could
1873 * impact namespace isolation.
1875 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1877 return __dev_forward_skb(dev
, skb
) ?: netif_rx_internal(skb
);
1879 EXPORT_SYMBOL_GPL(dev_forward_skb
);
1881 static inline int deliver_skb(struct sk_buff
*skb
,
1882 struct packet_type
*pt_prev
,
1883 struct net_device
*orig_dev
)
1885 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
1887 refcount_inc(&skb
->users
);
1888 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
1891 static inline void deliver_ptype_list_skb(struct sk_buff
*skb
,
1892 struct packet_type
**pt
,
1893 struct net_device
*orig_dev
,
1895 struct list_head
*ptype_list
)
1897 struct packet_type
*ptype
, *pt_prev
= *pt
;
1899 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1900 if (ptype
->type
!= type
)
1903 deliver_skb(skb
, pt_prev
, orig_dev
);
1909 static inline bool skb_loop_sk(struct packet_type
*ptype
, struct sk_buff
*skb
)
1911 if (!ptype
->af_packet_priv
|| !skb
->sk
)
1914 if (ptype
->id_match
)
1915 return ptype
->id_match(ptype
, skb
->sk
);
1916 else if ((struct sock
*)ptype
->af_packet_priv
== skb
->sk
)
1923 * dev_nit_active - return true if any network interface taps are in use
1925 * @dev: network device to check for the presence of taps
1927 bool dev_nit_active(struct net_device
*dev
)
1929 return !list_empty(&ptype_all
) || !list_empty(&dev
->ptype_all
);
1931 EXPORT_SYMBOL_GPL(dev_nit_active
);
1934 * Support routine. Sends outgoing frames to any network
1935 * taps currently in use.
1938 void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
1940 struct packet_type
*ptype
;
1941 struct sk_buff
*skb2
= NULL
;
1942 struct packet_type
*pt_prev
= NULL
;
1943 struct list_head
*ptype_list
= &ptype_all
;
1947 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1948 if (ptype
->ignore_outgoing
)
1951 /* Never send packets back to the socket
1952 * they originated from - MvS (miquels@drinkel.ow.org)
1954 if (skb_loop_sk(ptype
, skb
))
1958 deliver_skb(skb2
, pt_prev
, skb
->dev
);
1963 /* need to clone skb, done only once */
1964 skb2
= skb_clone(skb
, GFP_ATOMIC
);
1968 net_timestamp_set(skb2
);
1970 /* skb->nh should be correctly
1971 * set by sender, so that the second statement is
1972 * just protection against buggy protocols.
1974 skb_reset_mac_header(skb2
);
1976 if (skb_network_header(skb2
) < skb2
->data
||
1977 skb_network_header(skb2
) > skb_tail_pointer(skb2
)) {
1978 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1979 ntohs(skb2
->protocol
),
1981 skb_reset_network_header(skb2
);
1984 skb2
->transport_header
= skb2
->network_header
;
1985 skb2
->pkt_type
= PACKET_OUTGOING
;
1989 if (ptype_list
== &ptype_all
) {
1990 ptype_list
= &dev
->ptype_all
;
1995 if (!skb_orphan_frags_rx(skb2
, GFP_ATOMIC
))
1996 pt_prev
->func(skb2
, skb
->dev
, pt_prev
, skb
->dev
);
2002 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit
);
2005 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2006 * @dev: Network device
2007 * @txq: number of queues available
2009 * If real_num_tx_queues is changed the tc mappings may no longer be
2010 * valid. To resolve this verify the tc mapping remains valid and if
2011 * not NULL the mapping. With no priorities mapping to this
2012 * offset/count pair it will no longer be used. In the worst case TC0
2013 * is invalid nothing can be done so disable priority mappings. If is
2014 * expected that drivers will fix this mapping if they can before
2015 * calling netif_set_real_num_tx_queues.
2017 static void netif_setup_tc(struct net_device
*dev
, unsigned int txq
)
2020 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2022 /* If TC0 is invalidated disable TC mapping */
2023 if (tc
->offset
+ tc
->count
> txq
) {
2024 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2029 /* Invalidated prio to tc mappings set to TC0 */
2030 for (i
= 1; i
< TC_BITMASK
+ 1; i
++) {
2031 int q
= netdev_get_prio_tc_map(dev
, i
);
2033 tc
= &dev
->tc_to_txq
[q
];
2034 if (tc
->offset
+ tc
->count
> txq
) {
2035 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2037 netdev_set_prio_tc_map(dev
, i
, 0);
2042 int netdev_txq_to_tc(struct net_device
*dev
, unsigned int txq
)
2045 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2048 /* walk through the TCs and see if it falls into any of them */
2049 for (i
= 0; i
< TC_MAX_QUEUE
; i
++, tc
++) {
2050 if ((txq
- tc
->offset
) < tc
->count
)
2054 /* didn't find it, just return -1 to indicate no match */
2060 EXPORT_SYMBOL(netdev_txq_to_tc
);
2063 struct static_key xps_needed __read_mostly
;
2064 EXPORT_SYMBOL(xps_needed
);
2065 struct static_key xps_rxqs_needed __read_mostly
;
2066 EXPORT_SYMBOL(xps_rxqs_needed
);
2067 static DEFINE_MUTEX(xps_map_mutex
);
2068 #define xmap_dereference(P) \
2069 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2071 static bool remove_xps_queue(struct xps_dev_maps
*dev_maps
,
2074 struct xps_map
*map
= NULL
;
2078 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2082 for (pos
= map
->len
; pos
--;) {
2083 if (map
->queues
[pos
] != index
)
2087 map
->queues
[pos
] = map
->queues
[--map
->len
];
2091 RCU_INIT_POINTER(dev_maps
->attr_map
[tci
], NULL
);
2092 kfree_rcu(map
, rcu
);
2099 static bool remove_xps_queue_cpu(struct net_device
*dev
,
2100 struct xps_dev_maps
*dev_maps
,
2101 int cpu
, u16 offset
, u16 count
)
2103 int num_tc
= dev
->num_tc
? : 1;
2104 bool active
= false;
2107 for (tci
= cpu
* num_tc
; num_tc
--; tci
++) {
2110 for (i
= count
, j
= offset
; i
--; j
++) {
2111 if (!remove_xps_queue(dev_maps
, tci
, j
))
2121 static void reset_xps_maps(struct net_device
*dev
,
2122 struct xps_dev_maps
*dev_maps
,
2126 static_key_slow_dec_cpuslocked(&xps_rxqs_needed
);
2127 RCU_INIT_POINTER(dev
->xps_rxqs_map
, NULL
);
2129 RCU_INIT_POINTER(dev
->xps_cpus_map
, NULL
);
2131 static_key_slow_dec_cpuslocked(&xps_needed
);
2132 kfree_rcu(dev_maps
, rcu
);
2135 static void clean_xps_maps(struct net_device
*dev
, const unsigned long *mask
,
2136 struct xps_dev_maps
*dev_maps
, unsigned int nr_ids
,
2137 u16 offset
, u16 count
, bool is_rxqs_map
)
2139 bool active
= false;
2142 for (j
= -1; j
= netif_attrmask_next(j
, mask
, nr_ids
),
2144 active
|= remove_xps_queue_cpu(dev
, dev_maps
, j
, offset
,
2147 reset_xps_maps(dev
, dev_maps
, is_rxqs_map
);
2150 for (i
= offset
+ (count
- 1); count
--; i
--) {
2151 netdev_queue_numa_node_write(
2152 netdev_get_tx_queue(dev
, i
),
2158 static void netif_reset_xps_queues(struct net_device
*dev
, u16 offset
,
2161 const unsigned long *possible_mask
= NULL
;
2162 struct xps_dev_maps
*dev_maps
;
2163 unsigned int nr_ids
;
2165 if (!static_key_false(&xps_needed
))
2169 mutex_lock(&xps_map_mutex
);
2171 if (static_key_false(&xps_rxqs_needed
)) {
2172 dev_maps
= xmap_dereference(dev
->xps_rxqs_map
);
2174 nr_ids
= dev
->num_rx_queues
;
2175 clean_xps_maps(dev
, possible_mask
, dev_maps
, nr_ids
,
2176 offset
, count
, true);
2180 dev_maps
= xmap_dereference(dev
->xps_cpus_map
);
2184 if (num_possible_cpus() > 1)
2185 possible_mask
= cpumask_bits(cpu_possible_mask
);
2186 nr_ids
= nr_cpu_ids
;
2187 clean_xps_maps(dev
, possible_mask
, dev_maps
, nr_ids
, offset
, count
,
2191 mutex_unlock(&xps_map_mutex
);
2195 static void netif_reset_xps_queues_gt(struct net_device
*dev
, u16 index
)
2197 netif_reset_xps_queues(dev
, index
, dev
->num_tx_queues
- index
);
2200 static struct xps_map
*expand_xps_map(struct xps_map
*map
, int attr_index
,
2201 u16 index
, bool is_rxqs_map
)
2203 struct xps_map
*new_map
;
2204 int alloc_len
= XPS_MIN_MAP_ALLOC
;
2207 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
2208 if (map
->queues
[pos
] != index
)
2213 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2215 if (pos
< map
->alloc_len
)
2218 alloc_len
= map
->alloc_len
* 2;
2221 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2225 new_map
= kzalloc(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
);
2227 new_map
= kzalloc_node(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
,
2228 cpu_to_node(attr_index
));
2232 for (i
= 0; i
< pos
; i
++)
2233 new_map
->queues
[i
] = map
->queues
[i
];
2234 new_map
->alloc_len
= alloc_len
;
2240 /* Must be called under cpus_read_lock */
2241 int __netif_set_xps_queue(struct net_device
*dev
, const unsigned long *mask
,
2242 u16 index
, bool is_rxqs_map
)
2244 const unsigned long *online_mask
= NULL
, *possible_mask
= NULL
;
2245 struct xps_dev_maps
*dev_maps
, *new_dev_maps
= NULL
;
2246 int i
, j
, tci
, numa_node_id
= -2;
2247 int maps_sz
, num_tc
= 1, tc
= 0;
2248 struct xps_map
*map
, *new_map
;
2249 bool active
= false;
2250 unsigned int nr_ids
;
2253 /* Do not allow XPS on subordinate device directly */
2254 num_tc
= dev
->num_tc
;
2258 /* If queue belongs to subordinate dev use its map */
2259 dev
= netdev_get_tx_queue(dev
, index
)->sb_dev
? : dev
;
2261 tc
= netdev_txq_to_tc(dev
, index
);
2266 mutex_lock(&xps_map_mutex
);
2268 maps_sz
= XPS_RXQ_DEV_MAPS_SIZE(num_tc
, dev
->num_rx_queues
);
2269 dev_maps
= xmap_dereference(dev
->xps_rxqs_map
);
2270 nr_ids
= dev
->num_rx_queues
;
2272 maps_sz
= XPS_CPU_DEV_MAPS_SIZE(num_tc
);
2273 if (num_possible_cpus() > 1) {
2274 online_mask
= cpumask_bits(cpu_online_mask
);
2275 possible_mask
= cpumask_bits(cpu_possible_mask
);
2277 dev_maps
= xmap_dereference(dev
->xps_cpus_map
);
2278 nr_ids
= nr_cpu_ids
;
2281 if (maps_sz
< L1_CACHE_BYTES
)
2282 maps_sz
= L1_CACHE_BYTES
;
2284 /* allocate memory for queue storage */
2285 for (j
= -1; j
= netif_attrmask_next_and(j
, online_mask
, mask
, nr_ids
),
2288 new_dev_maps
= kzalloc(maps_sz
, GFP_KERNEL
);
2289 if (!new_dev_maps
) {
2290 mutex_unlock(&xps_map_mutex
);
2294 tci
= j
* num_tc
+ tc
;
2295 map
= dev_maps
? xmap_dereference(dev_maps
->attr_map
[tci
]) :
2298 map
= expand_xps_map(map
, j
, index
, is_rxqs_map
);
2302 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2306 goto out_no_new_maps
;
2309 /* Increment static keys at most once per type */
2310 static_key_slow_inc_cpuslocked(&xps_needed
);
2312 static_key_slow_inc_cpuslocked(&xps_rxqs_needed
);
2315 for (j
= -1; j
= netif_attrmask_next(j
, possible_mask
, nr_ids
),
2317 /* copy maps belonging to foreign traffic classes */
2318 for (i
= tc
, tci
= j
* num_tc
; dev_maps
&& i
--; tci
++) {
2319 /* fill in the new device map from the old device map */
2320 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2321 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2324 /* We need to explicitly update tci as prevous loop
2325 * could break out early if dev_maps is NULL.
2327 tci
= j
* num_tc
+ tc
;
2329 if (netif_attr_test_mask(j
, mask
, nr_ids
) &&
2330 netif_attr_test_online(j
, online_mask
, nr_ids
)) {
2331 /* add tx-queue to CPU/rx-queue maps */
2334 map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2335 while ((pos
< map
->len
) && (map
->queues
[pos
] != index
))
2338 if (pos
== map
->len
)
2339 map
->queues
[map
->len
++] = index
;
2342 if (numa_node_id
== -2)
2343 numa_node_id
= cpu_to_node(j
);
2344 else if (numa_node_id
!= cpu_to_node(j
))
2348 } else if (dev_maps
) {
2349 /* fill in the new device map from the old device map */
2350 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2351 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2354 /* copy maps belonging to foreign traffic classes */
2355 for (i
= num_tc
- tc
, tci
++; dev_maps
&& --i
; tci
++) {
2356 /* fill in the new device map from the old device map */
2357 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2358 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2363 rcu_assign_pointer(dev
->xps_rxqs_map
, new_dev_maps
);
2365 rcu_assign_pointer(dev
->xps_cpus_map
, new_dev_maps
);
2367 /* Cleanup old maps */
2369 goto out_no_old_maps
;
2371 for (j
= -1; j
= netif_attrmask_next(j
, possible_mask
, nr_ids
),
2373 for (i
= num_tc
, tci
= j
* num_tc
; i
--; tci
++) {
2374 new_map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2375 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2376 if (map
&& map
!= new_map
)
2377 kfree_rcu(map
, rcu
);
2381 kfree_rcu(dev_maps
, rcu
);
2384 dev_maps
= new_dev_maps
;
2389 /* update Tx queue numa node */
2390 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, index
),
2391 (numa_node_id
>= 0) ?
2392 numa_node_id
: NUMA_NO_NODE
);
2398 /* removes tx-queue from unused CPUs/rx-queues */
2399 for (j
= -1; j
= netif_attrmask_next(j
, possible_mask
, nr_ids
),
2401 for (i
= tc
, tci
= j
* num_tc
; i
--; tci
++)
2402 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2403 if (!netif_attr_test_mask(j
, mask
, nr_ids
) ||
2404 !netif_attr_test_online(j
, online_mask
, nr_ids
))
2405 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2406 for (i
= num_tc
- tc
, tci
++; --i
; tci
++)
2407 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2410 /* free map if not active */
2412 reset_xps_maps(dev
, dev_maps
, is_rxqs_map
);
2415 mutex_unlock(&xps_map_mutex
);
2419 /* remove any maps that we added */
2420 for (j
= -1; j
= netif_attrmask_next(j
, possible_mask
, nr_ids
),
2422 for (i
= num_tc
, tci
= j
* num_tc
; i
--; tci
++) {
2423 new_map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2425 xmap_dereference(dev_maps
->attr_map
[tci
]) :
2427 if (new_map
&& new_map
!= map
)
2432 mutex_unlock(&xps_map_mutex
);
2434 kfree(new_dev_maps
);
2437 EXPORT_SYMBOL_GPL(__netif_set_xps_queue
);
2439 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
2445 ret
= __netif_set_xps_queue(dev
, cpumask_bits(mask
), index
, false);
2450 EXPORT_SYMBOL(netif_set_xps_queue
);
2453 static void netdev_unbind_all_sb_channels(struct net_device
*dev
)
2455 struct netdev_queue
*txq
= &dev
->_tx
[dev
->num_tx_queues
];
2457 /* Unbind any subordinate channels */
2458 while (txq
-- != &dev
->_tx
[0]) {
2460 netdev_unbind_sb_channel(dev
, txq
->sb_dev
);
2464 void netdev_reset_tc(struct net_device
*dev
)
2467 netif_reset_xps_queues_gt(dev
, 0);
2469 netdev_unbind_all_sb_channels(dev
);
2471 /* Reset TC configuration of device */
2473 memset(dev
->tc_to_txq
, 0, sizeof(dev
->tc_to_txq
));
2474 memset(dev
->prio_tc_map
, 0, sizeof(dev
->prio_tc_map
));
2476 EXPORT_SYMBOL(netdev_reset_tc
);
2478 int netdev_set_tc_queue(struct net_device
*dev
, u8 tc
, u16 count
, u16 offset
)
2480 if (tc
>= dev
->num_tc
)
2484 netif_reset_xps_queues(dev
, offset
, count
);
2486 dev
->tc_to_txq
[tc
].count
= count
;
2487 dev
->tc_to_txq
[tc
].offset
= offset
;
2490 EXPORT_SYMBOL(netdev_set_tc_queue
);
2492 int netdev_set_num_tc(struct net_device
*dev
, u8 num_tc
)
2494 if (num_tc
> TC_MAX_QUEUE
)
2498 netif_reset_xps_queues_gt(dev
, 0);
2500 netdev_unbind_all_sb_channels(dev
);
2502 dev
->num_tc
= num_tc
;
2505 EXPORT_SYMBOL(netdev_set_num_tc
);
2507 void netdev_unbind_sb_channel(struct net_device
*dev
,
2508 struct net_device
*sb_dev
)
2510 struct netdev_queue
*txq
= &dev
->_tx
[dev
->num_tx_queues
];
2513 netif_reset_xps_queues_gt(sb_dev
, 0);
2515 memset(sb_dev
->tc_to_txq
, 0, sizeof(sb_dev
->tc_to_txq
));
2516 memset(sb_dev
->prio_tc_map
, 0, sizeof(sb_dev
->prio_tc_map
));
2518 while (txq
-- != &dev
->_tx
[0]) {
2519 if (txq
->sb_dev
== sb_dev
)
2523 EXPORT_SYMBOL(netdev_unbind_sb_channel
);
2525 int netdev_bind_sb_channel_queue(struct net_device
*dev
,
2526 struct net_device
*sb_dev
,
2527 u8 tc
, u16 count
, u16 offset
)
2529 /* Make certain the sb_dev and dev are already configured */
2530 if (sb_dev
->num_tc
>= 0 || tc
>= dev
->num_tc
)
2533 /* We cannot hand out queues we don't have */
2534 if ((offset
+ count
) > dev
->real_num_tx_queues
)
2537 /* Record the mapping */
2538 sb_dev
->tc_to_txq
[tc
].count
= count
;
2539 sb_dev
->tc_to_txq
[tc
].offset
= offset
;
2541 /* Provide a way for Tx queue to find the tc_to_txq map or
2542 * XPS map for itself.
2545 netdev_get_tx_queue(dev
, count
+ offset
)->sb_dev
= sb_dev
;
2549 EXPORT_SYMBOL(netdev_bind_sb_channel_queue
);
2551 int netdev_set_sb_channel(struct net_device
*dev
, u16 channel
)
2553 /* Do not use a multiqueue device to represent a subordinate channel */
2554 if (netif_is_multiqueue(dev
))
2557 /* We allow channels 1 - 32767 to be used for subordinate channels.
2558 * Channel 0 is meant to be "native" mode and used only to represent
2559 * the main root device. We allow writing 0 to reset the device back
2560 * to normal mode after being used as a subordinate channel.
2562 if (channel
> S16_MAX
)
2565 dev
->num_tc
= -channel
;
2569 EXPORT_SYMBOL(netdev_set_sb_channel
);
2572 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2573 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2575 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
)
2580 disabling
= txq
< dev
->real_num_tx_queues
;
2582 if (txq
< 1 || txq
> dev
->num_tx_queues
)
2585 if (dev
->reg_state
== NETREG_REGISTERED
||
2586 dev
->reg_state
== NETREG_UNREGISTERING
) {
2589 rc
= netdev_queue_update_kobjects(dev
, dev
->real_num_tx_queues
,
2595 netif_setup_tc(dev
, txq
);
2597 dev
->real_num_tx_queues
= txq
;
2601 qdisc_reset_all_tx_gt(dev
, txq
);
2603 netif_reset_xps_queues_gt(dev
, txq
);
2607 dev
->real_num_tx_queues
= txq
;
2612 EXPORT_SYMBOL(netif_set_real_num_tx_queues
);
2616 * netif_set_real_num_rx_queues - set actual number of RX queues used
2617 * @dev: Network device
2618 * @rxq: Actual number of RX queues
2620 * This must be called either with the rtnl_lock held or before
2621 * registration of the net device. Returns 0 on success, or a
2622 * negative error code. If called before registration, it always
2625 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
)
2629 if (rxq
< 1 || rxq
> dev
->num_rx_queues
)
2632 if (dev
->reg_state
== NETREG_REGISTERED
) {
2635 rc
= net_rx_queue_update_kobjects(dev
, dev
->real_num_rx_queues
,
2641 dev
->real_num_rx_queues
= rxq
;
2644 EXPORT_SYMBOL(netif_set_real_num_rx_queues
);
2648 * netif_get_num_default_rss_queues - default number of RSS queues
2650 * This routine should set an upper limit on the number of RSS queues
2651 * used by default by multiqueue devices.
2653 int netif_get_num_default_rss_queues(void)
2655 return is_kdump_kernel() ?
2656 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES
, num_online_cpus());
2658 EXPORT_SYMBOL(netif_get_num_default_rss_queues
);
2660 static void __netif_reschedule(struct Qdisc
*q
)
2662 struct softnet_data
*sd
;
2663 unsigned long flags
;
2665 local_irq_save(flags
);
2666 sd
= this_cpu_ptr(&softnet_data
);
2667 q
->next_sched
= NULL
;
2668 *sd
->output_queue_tailp
= q
;
2669 sd
->output_queue_tailp
= &q
->next_sched
;
2670 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2671 local_irq_restore(flags
);
2674 void __netif_schedule(struct Qdisc
*q
)
2676 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
2677 __netif_reschedule(q
);
2679 EXPORT_SYMBOL(__netif_schedule
);
2681 struct dev_kfree_skb_cb
{
2682 enum skb_free_reason reason
;
2685 static struct dev_kfree_skb_cb
*get_kfree_skb_cb(const struct sk_buff
*skb
)
2687 return (struct dev_kfree_skb_cb
*)skb
->cb
;
2690 void netif_schedule_queue(struct netdev_queue
*txq
)
2693 if (!(txq
->state
& QUEUE_STATE_ANY_XOFF
)) {
2694 struct Qdisc
*q
= rcu_dereference(txq
->qdisc
);
2696 __netif_schedule(q
);
2700 EXPORT_SYMBOL(netif_schedule_queue
);
2702 void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
2704 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
)) {
2708 q
= rcu_dereference(dev_queue
->qdisc
);
2709 __netif_schedule(q
);
2713 EXPORT_SYMBOL(netif_tx_wake_queue
);
2715 void __dev_kfree_skb_irq(struct sk_buff
*skb
, enum skb_free_reason reason
)
2717 unsigned long flags
;
2722 if (likely(refcount_read(&skb
->users
) == 1)) {
2724 refcount_set(&skb
->users
, 0);
2725 } else if (likely(!refcount_dec_and_test(&skb
->users
))) {
2728 get_kfree_skb_cb(skb
)->reason
= reason
;
2729 local_irq_save(flags
);
2730 skb
->next
= __this_cpu_read(softnet_data
.completion_queue
);
2731 __this_cpu_write(softnet_data
.completion_queue
, skb
);
2732 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2733 local_irq_restore(flags
);
2735 EXPORT_SYMBOL(__dev_kfree_skb_irq
);
2737 void __dev_kfree_skb_any(struct sk_buff
*skb
, enum skb_free_reason reason
)
2739 if (in_irq() || irqs_disabled())
2740 __dev_kfree_skb_irq(skb
, reason
);
2744 EXPORT_SYMBOL(__dev_kfree_skb_any
);
2748 * netif_device_detach - mark device as removed
2749 * @dev: network device
2751 * Mark device as removed from system and therefore no longer available.
2753 void netif_device_detach(struct net_device
*dev
)
2755 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2756 netif_running(dev
)) {
2757 netif_tx_stop_all_queues(dev
);
2760 EXPORT_SYMBOL(netif_device_detach
);
2763 * netif_device_attach - mark device as attached
2764 * @dev: network device
2766 * Mark device as attached from system and restart if needed.
2768 void netif_device_attach(struct net_device
*dev
)
2770 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2771 netif_running(dev
)) {
2772 netif_tx_wake_all_queues(dev
);
2773 __netdev_watchdog_up(dev
);
2776 EXPORT_SYMBOL(netif_device_attach
);
2779 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2780 * to be used as a distribution range.
2782 static u16
skb_tx_hash(const struct net_device
*dev
,
2783 const struct net_device
*sb_dev
,
2784 struct sk_buff
*skb
)
2788 u16 qcount
= dev
->real_num_tx_queues
;
2791 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
2793 qoffset
= sb_dev
->tc_to_txq
[tc
].offset
;
2794 qcount
= sb_dev
->tc_to_txq
[tc
].count
;
2797 if (skb_rx_queue_recorded(skb
)) {
2798 hash
= skb_get_rx_queue(skb
);
2799 while (unlikely(hash
>= qcount
))
2801 return hash
+ qoffset
;
2804 return (u16
) reciprocal_scale(skb_get_hash(skb
), qcount
) + qoffset
;
2807 static void skb_warn_bad_offload(const struct sk_buff
*skb
)
2809 static const netdev_features_t null_features
;
2810 struct net_device
*dev
= skb
->dev
;
2811 const char *name
= "";
2813 if (!net_ratelimit())
2817 if (dev
->dev
.parent
)
2818 name
= dev_driver_string(dev
->dev
.parent
);
2820 name
= netdev_name(dev
);
2822 skb_dump(KERN_WARNING
, skb
, false);
2823 WARN(1, "%s: caps=(%pNF, %pNF)\n",
2824 name
, dev
? &dev
->features
: &null_features
,
2825 skb
->sk
? &skb
->sk
->sk_route_caps
: &null_features
);
2829 * Invalidate hardware checksum when packet is to be mangled, and
2830 * complete checksum manually on outgoing path.
2832 int skb_checksum_help(struct sk_buff
*skb
)
2835 int ret
= 0, offset
;
2837 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
2838 goto out_set_summed
;
2840 if (unlikely(skb_shinfo(skb
)->gso_size
)) {
2841 skb_warn_bad_offload(skb
);
2845 /* Before computing a checksum, we should make sure no frag could
2846 * be modified by an external entity : checksum could be wrong.
2848 if (skb_has_shared_frag(skb
)) {
2849 ret
= __skb_linearize(skb
);
2854 offset
= skb_checksum_start_offset(skb
);
2855 BUG_ON(offset
>= skb_headlen(skb
));
2856 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
2858 offset
+= skb
->csum_offset
;
2859 BUG_ON(offset
+ sizeof(__sum16
) > skb_headlen(skb
));
2861 if (skb_cloned(skb
) &&
2862 !skb_clone_writable(skb
, offset
+ sizeof(__sum16
))) {
2863 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2868 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
) ?: CSUM_MANGLED_0
;
2870 skb
->ip_summed
= CHECKSUM_NONE
;
2874 EXPORT_SYMBOL(skb_checksum_help
);
2876 int skb_crc32c_csum_help(struct sk_buff
*skb
)
2879 int ret
= 0, offset
, start
;
2881 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2884 if (unlikely(skb_is_gso(skb
)))
2887 /* Before computing a checksum, we should make sure no frag could
2888 * be modified by an external entity : checksum could be wrong.
2890 if (unlikely(skb_has_shared_frag(skb
))) {
2891 ret
= __skb_linearize(skb
);
2895 start
= skb_checksum_start_offset(skb
);
2896 offset
= start
+ offsetof(struct sctphdr
, checksum
);
2897 if (WARN_ON_ONCE(offset
>= skb_headlen(skb
))) {
2901 if (skb_cloned(skb
) &&
2902 !skb_clone_writable(skb
, offset
+ sizeof(__le32
))) {
2903 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2907 crc32c_csum
= cpu_to_le32(~__skb_checksum(skb
, start
,
2908 skb
->len
- start
, ~(__u32
)0,
2910 *(__le32
*)(skb
->data
+ offset
) = crc32c_csum
;
2911 skb
->ip_summed
= CHECKSUM_NONE
;
2912 skb
->csum_not_inet
= 0;
2917 __be16
skb_network_protocol(struct sk_buff
*skb
, int *depth
)
2919 __be16 type
= skb
->protocol
;
2921 /* Tunnel gso handlers can set protocol to ethernet. */
2922 if (type
== htons(ETH_P_TEB
)) {
2925 if (unlikely(!pskb_may_pull(skb
, sizeof(struct ethhdr
))))
2928 eth
= (struct ethhdr
*)skb
->data
;
2929 type
= eth
->h_proto
;
2932 return __vlan_get_protocol(skb
, type
, depth
);
2936 * skb_mac_gso_segment - mac layer segmentation handler.
2937 * @skb: buffer to segment
2938 * @features: features for the output path (see dev->features)
2940 struct sk_buff
*skb_mac_gso_segment(struct sk_buff
*skb
,
2941 netdev_features_t features
)
2943 struct sk_buff
*segs
= ERR_PTR(-EPROTONOSUPPORT
);
2944 struct packet_offload
*ptype
;
2945 int vlan_depth
= skb
->mac_len
;
2946 __be16 type
= skb_network_protocol(skb
, &vlan_depth
);
2948 if (unlikely(!type
))
2949 return ERR_PTR(-EINVAL
);
2951 __skb_pull(skb
, vlan_depth
);
2954 list_for_each_entry_rcu(ptype
, &offload_base
, list
) {
2955 if (ptype
->type
== type
&& ptype
->callbacks
.gso_segment
) {
2956 segs
= ptype
->callbacks
.gso_segment(skb
, features
);
2962 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
2966 EXPORT_SYMBOL(skb_mac_gso_segment
);
2969 /* openvswitch calls this on rx path, so we need a different check.
2971 static inline bool skb_needs_check(struct sk_buff
*skb
, bool tx_path
)
2974 return skb
->ip_summed
!= CHECKSUM_PARTIAL
&&
2975 skb
->ip_summed
!= CHECKSUM_UNNECESSARY
;
2977 return skb
->ip_summed
== CHECKSUM_NONE
;
2981 * __skb_gso_segment - Perform segmentation on skb.
2982 * @skb: buffer to segment
2983 * @features: features for the output path (see dev->features)
2984 * @tx_path: whether it is called in TX path
2986 * This function segments the given skb and returns a list of segments.
2988 * It may return NULL if the skb requires no segmentation. This is
2989 * only possible when GSO is used for verifying header integrity.
2991 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
2993 struct sk_buff
*__skb_gso_segment(struct sk_buff
*skb
,
2994 netdev_features_t features
, bool tx_path
)
2996 struct sk_buff
*segs
;
2998 if (unlikely(skb_needs_check(skb
, tx_path
))) {
3001 /* We're going to init ->check field in TCP or UDP header */
3002 err
= skb_cow_head(skb
, 0);
3004 return ERR_PTR(err
);
3007 /* Only report GSO partial support if it will enable us to
3008 * support segmentation on this frame without needing additional
3011 if (features
& NETIF_F_GSO_PARTIAL
) {
3012 netdev_features_t partial_features
= NETIF_F_GSO_ROBUST
;
3013 struct net_device
*dev
= skb
->dev
;
3015 partial_features
|= dev
->features
& dev
->gso_partial_features
;
3016 if (!skb_gso_ok(skb
, features
| partial_features
))
3017 features
&= ~NETIF_F_GSO_PARTIAL
;
3020 BUILD_BUG_ON(SKB_SGO_CB_OFFSET
+
3021 sizeof(*SKB_GSO_CB(skb
)) > sizeof(skb
->cb
));
3023 SKB_GSO_CB(skb
)->mac_offset
= skb_headroom(skb
);
3024 SKB_GSO_CB(skb
)->encap_level
= 0;
3026 skb_reset_mac_header(skb
);
3027 skb_reset_mac_len(skb
);
3029 segs
= skb_mac_gso_segment(skb
, features
);
3031 if (unlikely(skb_needs_check(skb
, tx_path
) && !IS_ERR(segs
)))
3032 skb_warn_bad_offload(skb
);
3036 EXPORT_SYMBOL(__skb_gso_segment
);
3038 /* Take action when hardware reception checksum errors are detected. */
3040 void netdev_rx_csum_fault(struct net_device
*dev
, struct sk_buff
*skb
)
3042 if (net_ratelimit()) {
3043 pr_err("%s: hw csum failure\n", dev
? dev
->name
: "<unknown>");
3044 skb_dump(KERN_ERR
, skb
, true);
3048 EXPORT_SYMBOL(netdev_rx_csum_fault
);
3051 /* XXX: check that highmem exists at all on the given machine. */
3052 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
3054 #ifdef CONFIG_HIGHMEM
3057 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
3058 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3059 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3061 if (PageHighMem(skb_frag_page(frag
)))
3069 /* If MPLS offload request, verify we are testing hardware MPLS features
3070 * instead of standard features for the netdev.
3072 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3073 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
3074 netdev_features_t features
,
3077 if (eth_p_mpls(type
))
3078 features
&= skb
->dev
->mpls_features
;
3083 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
3084 netdev_features_t features
,
3091 static netdev_features_t
harmonize_features(struct sk_buff
*skb
,
3092 netdev_features_t features
)
3097 type
= skb_network_protocol(skb
, &tmp
);
3098 features
= net_mpls_features(skb
, features
, type
);
3100 if (skb
->ip_summed
!= CHECKSUM_NONE
&&
3101 !can_checksum_protocol(features
, type
)) {
3102 features
&= ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
3104 if (illegal_highdma(skb
->dev
, skb
))
3105 features
&= ~NETIF_F_SG
;
3110 netdev_features_t
passthru_features_check(struct sk_buff
*skb
,
3111 struct net_device
*dev
,
3112 netdev_features_t features
)
3116 EXPORT_SYMBOL(passthru_features_check
);
3118 static netdev_features_t
dflt_features_check(struct sk_buff
*skb
,
3119 struct net_device
*dev
,
3120 netdev_features_t features
)
3122 return vlan_features_check(skb
, features
);
3125 static netdev_features_t
gso_features_check(const struct sk_buff
*skb
,
3126 struct net_device
*dev
,
3127 netdev_features_t features
)
3129 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
;
3131 if (gso_segs
> dev
->gso_max_segs
)
3132 return features
& ~NETIF_F_GSO_MASK
;
3134 /* Support for GSO partial features requires software
3135 * intervention before we can actually process the packets
3136 * so we need to strip support for any partial features now
3137 * and we can pull them back in after we have partially
3138 * segmented the frame.
3140 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
))
3141 features
&= ~dev
->gso_partial_features
;
3143 /* Make sure to clear the IPv4 ID mangling feature if the
3144 * IPv4 header has the potential to be fragmented.
3146 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
3147 struct iphdr
*iph
= skb
->encapsulation
?
3148 inner_ip_hdr(skb
) : ip_hdr(skb
);
3150 if (!(iph
->frag_off
& htons(IP_DF
)))
3151 features
&= ~NETIF_F_TSO_MANGLEID
;
3157 netdev_features_t
netif_skb_features(struct sk_buff
*skb
)
3159 struct net_device
*dev
= skb
->dev
;
3160 netdev_features_t features
= dev
->features
;
3162 if (skb_is_gso(skb
))
3163 features
= gso_features_check(skb
, dev
, features
);
3165 /* If encapsulation offload request, verify we are testing
3166 * hardware encapsulation features instead of standard
3167 * features for the netdev
3169 if (skb
->encapsulation
)
3170 features
&= dev
->hw_enc_features
;
3172 if (skb_vlan_tagged(skb
))
3173 features
= netdev_intersect_features(features
,
3174 dev
->vlan_features
|
3175 NETIF_F_HW_VLAN_CTAG_TX
|
3176 NETIF_F_HW_VLAN_STAG_TX
);
3178 if (dev
->netdev_ops
->ndo_features_check
)
3179 features
&= dev
->netdev_ops
->ndo_features_check(skb
, dev
,
3182 features
&= dflt_features_check(skb
, dev
, features
);
3184 return harmonize_features(skb
, features
);
3186 EXPORT_SYMBOL(netif_skb_features
);
3188 static int xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
3189 struct netdev_queue
*txq
, bool more
)
3194 if (dev_nit_active(dev
))
3195 dev_queue_xmit_nit(skb
, dev
);
3198 trace_net_dev_start_xmit(skb
, dev
);
3199 rc
= netdev_start_xmit(skb
, dev
, txq
, more
);
3200 trace_net_dev_xmit(skb
, rc
, dev
, len
);
3205 struct sk_buff
*dev_hard_start_xmit(struct sk_buff
*first
, struct net_device
*dev
,
3206 struct netdev_queue
*txq
, int *ret
)
3208 struct sk_buff
*skb
= first
;
3209 int rc
= NETDEV_TX_OK
;
3212 struct sk_buff
*next
= skb
->next
;
3214 skb_mark_not_on_list(skb
);
3215 rc
= xmit_one(skb
, dev
, txq
, next
!= NULL
);
3216 if (unlikely(!dev_xmit_complete(rc
))) {
3222 if (netif_tx_queue_stopped(txq
) && skb
) {
3223 rc
= NETDEV_TX_BUSY
;
3233 static struct sk_buff
*validate_xmit_vlan(struct sk_buff
*skb
,
3234 netdev_features_t features
)
3236 if (skb_vlan_tag_present(skb
) &&
3237 !vlan_hw_offload_capable(features
, skb
->vlan_proto
))
3238 skb
= __vlan_hwaccel_push_inside(skb
);
3242 int skb_csum_hwoffload_help(struct sk_buff
*skb
,
3243 const netdev_features_t features
)
3245 if (unlikely(skb
->csum_not_inet
))
3246 return !!(features
& NETIF_F_SCTP_CRC
) ? 0 :
3247 skb_crc32c_csum_help(skb
);
3249 return !!(features
& NETIF_F_CSUM_MASK
) ? 0 : skb_checksum_help(skb
);
3251 EXPORT_SYMBOL(skb_csum_hwoffload_help
);
3253 static struct sk_buff
*validate_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3255 netdev_features_t features
;
3257 features
= netif_skb_features(skb
);
3258 skb
= validate_xmit_vlan(skb
, features
);
3262 skb
= sk_validate_xmit_skb(skb
, dev
);
3266 if (netif_needs_gso(skb
, features
)) {
3267 struct sk_buff
*segs
;
3269 segs
= skb_gso_segment(skb
, features
);
3277 if (skb_needs_linearize(skb
, features
) &&
3278 __skb_linearize(skb
))
3281 /* If packet is not checksummed and device does not
3282 * support checksumming for this protocol, complete
3283 * checksumming here.
3285 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3286 if (skb
->encapsulation
)
3287 skb_set_inner_transport_header(skb
,
3288 skb_checksum_start_offset(skb
));
3290 skb_set_transport_header(skb
,
3291 skb_checksum_start_offset(skb
));
3292 if (skb_csum_hwoffload_help(skb
, features
))
3297 skb
= validate_xmit_xfrm(skb
, features
, again
);
3304 atomic_long_inc(&dev
->tx_dropped
);
3308 struct sk_buff
*validate_xmit_skb_list(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3310 struct sk_buff
*next
, *head
= NULL
, *tail
;
3312 for (; skb
!= NULL
; skb
= next
) {
3314 skb_mark_not_on_list(skb
);
3316 /* in case skb wont be segmented, point to itself */
3319 skb
= validate_xmit_skb(skb
, dev
, again
);
3327 /* If skb was segmented, skb->prev points to
3328 * the last segment. If not, it still contains skb.
3334 EXPORT_SYMBOL_GPL(validate_xmit_skb_list
);
3336 static void qdisc_pkt_len_init(struct sk_buff
*skb
)
3338 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
3340 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
3342 /* To get more precise estimation of bytes sent on wire,
3343 * we add to pkt_len the headers size of all segments
3345 if (shinfo
->gso_size
&& skb_transport_header_was_set(skb
)) {
3346 unsigned int hdr_len
;
3347 u16 gso_segs
= shinfo
->gso_segs
;
3349 /* mac layer + network layer */
3350 hdr_len
= skb_transport_header(skb
) - skb_mac_header(skb
);
3352 /* + transport layer */
3353 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))) {
3354 const struct tcphdr
*th
;
3355 struct tcphdr _tcphdr
;
3357 th
= skb_header_pointer(skb
, skb_transport_offset(skb
),
3358 sizeof(_tcphdr
), &_tcphdr
);
3360 hdr_len
+= __tcp_hdrlen(th
);
3362 struct udphdr _udphdr
;
3364 if (skb_header_pointer(skb
, skb_transport_offset(skb
),
3365 sizeof(_udphdr
), &_udphdr
))
3366 hdr_len
+= sizeof(struct udphdr
);
3369 if (shinfo
->gso_type
& SKB_GSO_DODGY
)
3370 gso_segs
= DIV_ROUND_UP(skb
->len
- hdr_len
,
3373 qdisc_skb_cb(skb
)->pkt_len
+= (gso_segs
- 1) * hdr_len
;
3377 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
3378 struct net_device
*dev
,
3379 struct netdev_queue
*txq
)
3381 spinlock_t
*root_lock
= qdisc_lock(q
);
3382 struct sk_buff
*to_free
= NULL
;
3386 qdisc_calculate_pkt_len(skb
, q
);
3388 if (q
->flags
& TCQ_F_NOLOCK
) {
3389 if ((q
->flags
& TCQ_F_CAN_BYPASS
) && q
->empty
&&
3390 qdisc_run_begin(q
)) {
3391 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
,
3393 __qdisc_drop(skb
, &to_free
);
3397 qdisc_bstats_cpu_update(q
, skb
);
3399 rc
= NET_XMIT_SUCCESS
;
3400 if (sch_direct_xmit(skb
, q
, dev
, txq
, NULL
, true))
3406 rc
= q
->enqueue(skb
, q
, &to_free
) & NET_XMIT_MASK
;
3410 if (unlikely(to_free
))
3411 kfree_skb_list(to_free
);
3416 * Heuristic to force contended enqueues to serialize on a
3417 * separate lock before trying to get qdisc main lock.
3418 * This permits qdisc->running owner to get the lock more
3419 * often and dequeue packets faster.
3421 contended
= qdisc_is_running(q
);
3422 if (unlikely(contended
))
3423 spin_lock(&q
->busylock
);
3425 spin_lock(root_lock
);
3426 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
3427 __qdisc_drop(skb
, &to_free
);
3429 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
3430 qdisc_run_begin(q
)) {
3432 * This is a work-conserving queue; there are no old skbs
3433 * waiting to be sent out; and the qdisc is not running -
3434 * xmit the skb directly.
3437 qdisc_bstats_update(q
, skb
);
3439 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, true)) {
3440 if (unlikely(contended
)) {
3441 spin_unlock(&q
->busylock
);
3448 rc
= NET_XMIT_SUCCESS
;
3450 rc
= q
->enqueue(skb
, q
, &to_free
) & NET_XMIT_MASK
;
3451 if (qdisc_run_begin(q
)) {
3452 if (unlikely(contended
)) {
3453 spin_unlock(&q
->busylock
);
3460 spin_unlock(root_lock
);
3461 if (unlikely(to_free
))
3462 kfree_skb_list(to_free
);
3463 if (unlikely(contended
))
3464 spin_unlock(&q
->busylock
);
3468 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3469 static void skb_update_prio(struct sk_buff
*skb
)
3471 const struct netprio_map
*map
;
3472 const struct sock
*sk
;
3473 unsigned int prioidx
;
3477 map
= rcu_dereference_bh(skb
->dev
->priomap
);
3480 sk
= skb_to_full_sk(skb
);
3484 prioidx
= sock_cgroup_prioidx(&sk
->sk_cgrp_data
);
3486 if (prioidx
< map
->priomap_len
)
3487 skb
->priority
= map
->priomap
[prioidx
];
3490 #define skb_update_prio(skb)
3494 * dev_loopback_xmit - loop back @skb
3495 * @net: network namespace this loopback is happening in
3496 * @sk: sk needed to be a netfilter okfn
3497 * @skb: buffer to transmit
3499 int dev_loopback_xmit(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
3501 skb_reset_mac_header(skb
);
3502 __skb_pull(skb
, skb_network_offset(skb
));
3503 skb
->pkt_type
= PACKET_LOOPBACK
;
3504 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3505 WARN_ON(!skb_dst(skb
));
3510 EXPORT_SYMBOL(dev_loopback_xmit
);
3512 #ifdef CONFIG_NET_EGRESS
3513 static struct sk_buff
*
3514 sch_handle_egress(struct sk_buff
*skb
, int *ret
, struct net_device
*dev
)
3516 struct mini_Qdisc
*miniq
= rcu_dereference_bh(dev
->miniq_egress
);
3517 struct tcf_result cl_res
;
3522 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3523 mini_qdisc_bstats_cpu_update(miniq
, skb
);
3525 switch (tcf_classify(skb
, miniq
->filter_list
, &cl_res
, false)) {
3527 case TC_ACT_RECLASSIFY
:
3528 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
3531 mini_qdisc_qstats_cpu_drop(miniq
);
3532 *ret
= NET_XMIT_DROP
;
3538 *ret
= NET_XMIT_SUCCESS
;
3541 case TC_ACT_REDIRECT
:
3542 /* No need to push/pop skb's mac_header here on egress! */
3543 skb_do_redirect(skb
);
3544 *ret
= NET_XMIT_SUCCESS
;
3552 #endif /* CONFIG_NET_EGRESS */
3555 static int __get_xps_queue_idx(struct net_device
*dev
, struct sk_buff
*skb
,
3556 struct xps_dev_maps
*dev_maps
, unsigned int tci
)
3558 struct xps_map
*map
;
3559 int queue_index
= -1;
3563 tci
+= netdev_get_prio_tc_map(dev
, skb
->priority
);
3566 map
= rcu_dereference(dev_maps
->attr_map
[tci
]);
3569 queue_index
= map
->queues
[0];
3571 queue_index
= map
->queues
[reciprocal_scale(
3572 skb_get_hash(skb
), map
->len
)];
3573 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
3580 static int get_xps_queue(struct net_device
*dev
, struct net_device
*sb_dev
,
3581 struct sk_buff
*skb
)
3584 struct xps_dev_maps
*dev_maps
;
3585 struct sock
*sk
= skb
->sk
;
3586 int queue_index
= -1;
3588 if (!static_key_false(&xps_needed
))
3592 if (!static_key_false(&xps_rxqs_needed
))
3595 dev_maps
= rcu_dereference(sb_dev
->xps_rxqs_map
);
3597 int tci
= sk_rx_queue_get(sk
);
3599 if (tci
>= 0 && tci
< dev
->num_rx_queues
)
3600 queue_index
= __get_xps_queue_idx(dev
, skb
, dev_maps
,
3605 if (queue_index
< 0) {
3606 dev_maps
= rcu_dereference(sb_dev
->xps_cpus_map
);
3608 unsigned int tci
= skb
->sender_cpu
- 1;
3610 queue_index
= __get_xps_queue_idx(dev
, skb
, dev_maps
,
3622 u16
dev_pick_tx_zero(struct net_device
*dev
, struct sk_buff
*skb
,
3623 struct net_device
*sb_dev
)
3627 EXPORT_SYMBOL(dev_pick_tx_zero
);
3629 u16
dev_pick_tx_cpu_id(struct net_device
*dev
, struct sk_buff
*skb
,
3630 struct net_device
*sb_dev
)
3632 return (u16
)raw_smp_processor_id() % dev
->real_num_tx_queues
;
3634 EXPORT_SYMBOL(dev_pick_tx_cpu_id
);
3636 u16
netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
,
3637 struct net_device
*sb_dev
)
3639 struct sock
*sk
= skb
->sk
;
3640 int queue_index
= sk_tx_queue_get(sk
);
3642 sb_dev
= sb_dev
? : dev
;
3644 if (queue_index
< 0 || skb
->ooo_okay
||
3645 queue_index
>= dev
->real_num_tx_queues
) {
3646 int new_index
= get_xps_queue(dev
, sb_dev
, skb
);
3649 new_index
= skb_tx_hash(dev
, sb_dev
, skb
);
3651 if (queue_index
!= new_index
&& sk
&&
3653 rcu_access_pointer(sk
->sk_dst_cache
))
3654 sk_tx_queue_set(sk
, new_index
);
3656 queue_index
= new_index
;
3661 EXPORT_SYMBOL(netdev_pick_tx
);
3663 struct netdev_queue
*netdev_core_pick_tx(struct net_device
*dev
,
3664 struct sk_buff
*skb
,
3665 struct net_device
*sb_dev
)
3667 int queue_index
= 0;
3670 u32 sender_cpu
= skb
->sender_cpu
- 1;
3672 if (sender_cpu
>= (u32
)NR_CPUS
)
3673 skb
->sender_cpu
= raw_smp_processor_id() + 1;
3676 if (dev
->real_num_tx_queues
!= 1) {
3677 const struct net_device_ops
*ops
= dev
->netdev_ops
;
3679 if (ops
->ndo_select_queue
)
3680 queue_index
= ops
->ndo_select_queue(dev
, skb
, sb_dev
);
3682 queue_index
= netdev_pick_tx(dev
, skb
, sb_dev
);
3684 queue_index
= netdev_cap_txqueue(dev
, queue_index
);
3687 skb_set_queue_mapping(skb
, queue_index
);
3688 return netdev_get_tx_queue(dev
, queue_index
);
3692 * __dev_queue_xmit - transmit a buffer
3693 * @skb: buffer to transmit
3694 * @sb_dev: suboordinate device used for L2 forwarding offload
3696 * Queue a buffer for transmission to a network device. The caller must
3697 * have set the device and priority and built the buffer before calling
3698 * this function. The function can be called from an interrupt.
3700 * A negative errno code is returned on a failure. A success does not
3701 * guarantee the frame will be transmitted as it may be dropped due
3702 * to congestion or traffic shaping.
3704 * -----------------------------------------------------------------------------------
3705 * I notice this method can also return errors from the queue disciplines,
3706 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3709 * Regardless of the return value, the skb is consumed, so it is currently
3710 * difficult to retry a send to this method. (You can bump the ref count
3711 * before sending to hold a reference for retry if you are careful.)
3713 * When calling this method, interrupts MUST be enabled. This is because
3714 * the BH enable code must have IRQs enabled so that it will not deadlock.
3717 static int __dev_queue_xmit(struct sk_buff
*skb
, struct net_device
*sb_dev
)
3719 struct net_device
*dev
= skb
->dev
;
3720 struct netdev_queue
*txq
;
3725 skb_reset_mac_header(skb
);
3727 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_SCHED_TSTAMP
))
3728 __skb_tstamp_tx(skb
, NULL
, skb
->sk
, SCM_TSTAMP_SCHED
);
3730 /* Disable soft irqs for various locks below. Also
3731 * stops preemption for RCU.
3735 skb_update_prio(skb
);
3737 qdisc_pkt_len_init(skb
);
3738 #ifdef CONFIG_NET_CLS_ACT
3739 skb
->tc_at_ingress
= 0;
3740 # ifdef CONFIG_NET_EGRESS
3741 if (static_branch_unlikely(&egress_needed_key
)) {
3742 skb
= sch_handle_egress(skb
, &rc
, dev
);
3748 /* If device/qdisc don't need skb->dst, release it right now while
3749 * its hot in this cpu cache.
3751 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
3756 txq
= netdev_core_pick_tx(dev
, skb
, sb_dev
);
3757 q
= rcu_dereference_bh(txq
->qdisc
);
3759 trace_net_dev_queue(skb
);
3761 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
3765 /* The device has no queue. Common case for software devices:
3766 * loopback, all the sorts of tunnels...
3768 * Really, it is unlikely that netif_tx_lock protection is necessary
3769 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3771 * However, it is possible, that they rely on protection
3774 * Check this and shot the lock. It is not prone from deadlocks.
3775 *Either shot noqueue qdisc, it is even simpler 8)
3777 if (dev
->flags
& IFF_UP
) {
3778 int cpu
= smp_processor_id(); /* ok because BHs are off */
3780 if (txq
->xmit_lock_owner
!= cpu
) {
3781 if (dev_xmit_recursion())
3782 goto recursion_alert
;
3784 skb
= validate_xmit_skb(skb
, dev
, &again
);
3788 HARD_TX_LOCK(dev
, txq
, cpu
);
3790 if (!netif_xmit_stopped(txq
)) {
3791 dev_xmit_recursion_inc();
3792 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &rc
);
3793 dev_xmit_recursion_dec();
3794 if (dev_xmit_complete(rc
)) {
3795 HARD_TX_UNLOCK(dev
, txq
);
3799 HARD_TX_UNLOCK(dev
, txq
);
3800 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3803 /* Recursion is detected! It is possible,
3807 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3813 rcu_read_unlock_bh();
3815 atomic_long_inc(&dev
->tx_dropped
);
3816 kfree_skb_list(skb
);
3819 rcu_read_unlock_bh();
3823 int dev_queue_xmit(struct sk_buff
*skb
)
3825 return __dev_queue_xmit(skb
, NULL
);
3827 EXPORT_SYMBOL(dev_queue_xmit
);
3829 int dev_queue_xmit_accel(struct sk_buff
*skb
, struct net_device
*sb_dev
)
3831 return __dev_queue_xmit(skb
, sb_dev
);
3833 EXPORT_SYMBOL(dev_queue_xmit_accel
);
3835 int dev_direct_xmit(struct sk_buff
*skb
, u16 queue_id
)
3837 struct net_device
*dev
= skb
->dev
;
3838 struct sk_buff
*orig_skb
= skb
;
3839 struct netdev_queue
*txq
;
3840 int ret
= NETDEV_TX_BUSY
;
3843 if (unlikely(!netif_running(dev
) ||
3844 !netif_carrier_ok(dev
)))
3847 skb
= validate_xmit_skb_list(skb
, dev
, &again
);
3848 if (skb
!= orig_skb
)
3851 skb_set_queue_mapping(skb
, queue_id
);
3852 txq
= skb_get_tx_queue(dev
, skb
);
3856 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
3857 if (!netif_xmit_frozen_or_drv_stopped(txq
))
3858 ret
= netdev_start_xmit(skb
, dev
, txq
, false);
3859 HARD_TX_UNLOCK(dev
, txq
);
3863 if (!dev_xmit_complete(ret
))
3868 atomic_long_inc(&dev
->tx_dropped
);
3869 kfree_skb_list(skb
);
3870 return NET_XMIT_DROP
;
3872 EXPORT_SYMBOL(dev_direct_xmit
);
3874 /*************************************************************************
3876 *************************************************************************/
3878 int netdev_max_backlog __read_mostly
= 1000;
3879 EXPORT_SYMBOL(netdev_max_backlog
);
3881 int netdev_tstamp_prequeue __read_mostly
= 1;
3882 int netdev_budget __read_mostly
= 300;
3883 unsigned int __read_mostly netdev_budget_usecs
= 2000;
3884 int weight_p __read_mostly
= 64; /* old backlog weight */
3885 int dev_weight_rx_bias __read_mostly
= 1; /* bias for backlog weight */
3886 int dev_weight_tx_bias __read_mostly
= 1; /* bias for output_queue quota */
3887 int dev_rx_weight __read_mostly
= 64;
3888 int dev_tx_weight __read_mostly
= 64;
3889 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
3890 int gro_normal_batch __read_mostly
= 8;
3892 /* Called with irq disabled */
3893 static inline void ____napi_schedule(struct softnet_data
*sd
,
3894 struct napi_struct
*napi
)
3896 list_add_tail(&napi
->poll_list
, &sd
->poll_list
);
3897 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3902 /* One global table that all flow-based protocols share. */
3903 struct rps_sock_flow_table __rcu
*rps_sock_flow_table __read_mostly
;
3904 EXPORT_SYMBOL(rps_sock_flow_table
);
3905 u32 rps_cpu_mask __read_mostly
;
3906 EXPORT_SYMBOL(rps_cpu_mask
);
3908 struct static_key_false rps_needed __read_mostly
;
3909 EXPORT_SYMBOL(rps_needed
);
3910 struct static_key_false rfs_needed __read_mostly
;
3911 EXPORT_SYMBOL(rfs_needed
);
3913 static struct rps_dev_flow
*
3914 set_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3915 struct rps_dev_flow
*rflow
, u16 next_cpu
)
3917 if (next_cpu
< nr_cpu_ids
) {
3918 #ifdef CONFIG_RFS_ACCEL
3919 struct netdev_rx_queue
*rxqueue
;
3920 struct rps_dev_flow_table
*flow_table
;
3921 struct rps_dev_flow
*old_rflow
;
3926 /* Should we steer this flow to a different hardware queue? */
3927 if (!skb_rx_queue_recorded(skb
) || !dev
->rx_cpu_rmap
||
3928 !(dev
->features
& NETIF_F_NTUPLE
))
3930 rxq_index
= cpu_rmap_lookup_index(dev
->rx_cpu_rmap
, next_cpu
);
3931 if (rxq_index
== skb_get_rx_queue(skb
))
3934 rxqueue
= dev
->_rx
+ rxq_index
;
3935 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3938 flow_id
= skb_get_hash(skb
) & flow_table
->mask
;
3939 rc
= dev
->netdev_ops
->ndo_rx_flow_steer(dev
, skb
,
3940 rxq_index
, flow_id
);
3944 rflow
= &flow_table
->flows
[flow_id
];
3946 if (old_rflow
->filter
== rflow
->filter
)
3947 old_rflow
->filter
= RPS_NO_FILTER
;
3951 per_cpu(softnet_data
, next_cpu
).input_queue_head
;
3954 rflow
->cpu
= next_cpu
;
3959 * get_rps_cpu is called from netif_receive_skb and returns the target
3960 * CPU from the RPS map of the receiving queue for a given skb.
3961 * rcu_read_lock must be held on entry.
3963 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3964 struct rps_dev_flow
**rflowp
)
3966 const struct rps_sock_flow_table
*sock_flow_table
;
3967 struct netdev_rx_queue
*rxqueue
= dev
->_rx
;
3968 struct rps_dev_flow_table
*flow_table
;
3969 struct rps_map
*map
;
3974 if (skb_rx_queue_recorded(skb
)) {
3975 u16 index
= skb_get_rx_queue(skb
);
3977 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
3978 WARN_ONCE(dev
->real_num_rx_queues
> 1,
3979 "%s received packet on queue %u, but number "
3980 "of RX queues is %u\n",
3981 dev
->name
, index
, dev
->real_num_rx_queues
);
3987 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3989 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3990 map
= rcu_dereference(rxqueue
->rps_map
);
3991 if (!flow_table
&& !map
)
3994 skb_reset_network_header(skb
);
3995 hash
= skb_get_hash(skb
);
3999 sock_flow_table
= rcu_dereference(rps_sock_flow_table
);
4000 if (flow_table
&& sock_flow_table
) {
4001 struct rps_dev_flow
*rflow
;
4005 /* First check into global flow table if there is a match */
4006 ident
= sock_flow_table
->ents
[hash
& sock_flow_table
->mask
];
4007 if ((ident
^ hash
) & ~rps_cpu_mask
)
4010 next_cpu
= ident
& rps_cpu_mask
;
4012 /* OK, now we know there is a match,
4013 * we can look at the local (per receive queue) flow table
4015 rflow
= &flow_table
->flows
[hash
& flow_table
->mask
];
4019 * If the desired CPU (where last recvmsg was done) is
4020 * different from current CPU (one in the rx-queue flow
4021 * table entry), switch if one of the following holds:
4022 * - Current CPU is unset (>= nr_cpu_ids).
4023 * - Current CPU is offline.
4024 * - The current CPU's queue tail has advanced beyond the
4025 * last packet that was enqueued using this table entry.
4026 * This guarantees that all previous packets for the flow
4027 * have been dequeued, thus preserving in order delivery.
4029 if (unlikely(tcpu
!= next_cpu
) &&
4030 (tcpu
>= nr_cpu_ids
|| !cpu_online(tcpu
) ||
4031 ((int)(per_cpu(softnet_data
, tcpu
).input_queue_head
-
4032 rflow
->last_qtail
)) >= 0)) {
4034 rflow
= set_rps_cpu(dev
, skb
, rflow
, next_cpu
);
4037 if (tcpu
< nr_cpu_ids
&& cpu_online(tcpu
)) {
4047 tcpu
= map
->cpus
[reciprocal_scale(hash
, map
->len
)];
4048 if (cpu_online(tcpu
)) {
4058 #ifdef CONFIG_RFS_ACCEL
4061 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4062 * @dev: Device on which the filter was set
4063 * @rxq_index: RX queue index
4064 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4065 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4067 * Drivers that implement ndo_rx_flow_steer() should periodically call
4068 * this function for each installed filter and remove the filters for
4069 * which it returns %true.
4071 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
4072 u32 flow_id
, u16 filter_id
)
4074 struct netdev_rx_queue
*rxqueue
= dev
->_rx
+ rxq_index
;
4075 struct rps_dev_flow_table
*flow_table
;
4076 struct rps_dev_flow
*rflow
;
4081 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4082 if (flow_table
&& flow_id
<= flow_table
->mask
) {
4083 rflow
= &flow_table
->flows
[flow_id
];
4084 cpu
= READ_ONCE(rflow
->cpu
);
4085 if (rflow
->filter
== filter_id
&& cpu
< nr_cpu_ids
&&
4086 ((int)(per_cpu(softnet_data
, cpu
).input_queue_head
-
4087 rflow
->last_qtail
) <
4088 (int)(10 * flow_table
->mask
)))
4094 EXPORT_SYMBOL(rps_may_expire_flow
);
4096 #endif /* CONFIG_RFS_ACCEL */
4098 /* Called from hardirq (IPI) context */
4099 static void rps_trigger_softirq(void *data
)
4101 struct softnet_data
*sd
= data
;
4103 ____napi_schedule(sd
, &sd
->backlog
);
4107 #endif /* CONFIG_RPS */
4110 * Check if this softnet_data structure is another cpu one
4111 * If yes, queue it to our IPI list and return 1
4114 static int rps_ipi_queued(struct softnet_data
*sd
)
4117 struct softnet_data
*mysd
= this_cpu_ptr(&softnet_data
);
4120 sd
->rps_ipi_next
= mysd
->rps_ipi_list
;
4121 mysd
->rps_ipi_list
= sd
;
4123 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4126 #endif /* CONFIG_RPS */
4130 #ifdef CONFIG_NET_FLOW_LIMIT
4131 int netdev_flow_limit_table_len __read_mostly
= (1 << 12);
4134 static bool skb_flow_limit(struct sk_buff
*skb
, unsigned int qlen
)
4136 #ifdef CONFIG_NET_FLOW_LIMIT
4137 struct sd_flow_limit
*fl
;
4138 struct softnet_data
*sd
;
4139 unsigned int old_flow
, new_flow
;
4141 if (qlen
< (netdev_max_backlog
>> 1))
4144 sd
= this_cpu_ptr(&softnet_data
);
4147 fl
= rcu_dereference(sd
->flow_limit
);
4149 new_flow
= skb_get_hash(skb
) & (fl
->num_buckets
- 1);
4150 old_flow
= fl
->history
[fl
->history_head
];
4151 fl
->history
[fl
->history_head
] = new_flow
;
4154 fl
->history_head
&= FLOW_LIMIT_HISTORY
- 1;
4156 if (likely(fl
->buckets
[old_flow
]))
4157 fl
->buckets
[old_flow
]--;
4159 if (++fl
->buckets
[new_flow
] > (FLOW_LIMIT_HISTORY
>> 1)) {
4171 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4172 * queue (may be a remote CPU queue).
4174 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
,
4175 unsigned int *qtail
)
4177 struct softnet_data
*sd
;
4178 unsigned long flags
;
4181 sd
= &per_cpu(softnet_data
, cpu
);
4183 local_irq_save(flags
);
4186 if (!netif_running(skb
->dev
))
4188 qlen
= skb_queue_len(&sd
->input_pkt_queue
);
4189 if (qlen
<= netdev_max_backlog
&& !skb_flow_limit(skb
, qlen
)) {
4192 __skb_queue_tail(&sd
->input_pkt_queue
, skb
);
4193 input_queue_tail_incr_save(sd
, qtail
);
4195 local_irq_restore(flags
);
4196 return NET_RX_SUCCESS
;
4199 /* Schedule NAPI for backlog device
4200 * We can use non atomic operation since we own the queue lock
4202 if (!__test_and_set_bit(NAPI_STATE_SCHED
, &sd
->backlog
.state
)) {
4203 if (!rps_ipi_queued(sd
))
4204 ____napi_schedule(sd
, &sd
->backlog
);
4213 local_irq_restore(flags
);
4215 atomic_long_inc(&skb
->dev
->rx_dropped
);
4220 static struct netdev_rx_queue
*netif_get_rxqueue(struct sk_buff
*skb
)
4222 struct net_device
*dev
= skb
->dev
;
4223 struct netdev_rx_queue
*rxqueue
;
4227 if (skb_rx_queue_recorded(skb
)) {
4228 u16 index
= skb_get_rx_queue(skb
);
4230 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
4231 WARN_ONCE(dev
->real_num_rx_queues
> 1,
4232 "%s received packet on queue %u, but number "
4233 "of RX queues is %u\n",
4234 dev
->name
, index
, dev
->real_num_rx_queues
);
4236 return rxqueue
; /* Return first rxqueue */
4243 static u32
netif_receive_generic_xdp(struct sk_buff
*skb
,
4244 struct xdp_buff
*xdp
,
4245 struct bpf_prog
*xdp_prog
)
4247 struct netdev_rx_queue
*rxqueue
;
4248 void *orig_data
, *orig_data_end
;
4249 u32 metalen
, act
= XDP_DROP
;
4250 __be16 orig_eth_type
;
4256 /* Reinjected packets coming from act_mirred or similar should
4257 * not get XDP generic processing.
4259 if (skb_cloned(skb
) || skb_is_tc_redirected(skb
))
4262 /* XDP packets must be linear and must have sufficient headroom
4263 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4264 * native XDP provides, thus we need to do it here as well.
4266 if (skb_is_nonlinear(skb
) ||
4267 skb_headroom(skb
) < XDP_PACKET_HEADROOM
) {
4268 int hroom
= XDP_PACKET_HEADROOM
- skb_headroom(skb
);
4269 int troom
= skb
->tail
+ skb
->data_len
- skb
->end
;
4271 /* In case we have to go down the path and also linearize,
4272 * then lets do the pskb_expand_head() work just once here.
4274 if (pskb_expand_head(skb
,
4275 hroom
> 0 ? ALIGN(hroom
, NET_SKB_PAD
) : 0,
4276 troom
> 0 ? troom
+ 128 : 0, GFP_ATOMIC
))
4278 if (skb_linearize(skb
))
4282 /* The XDP program wants to see the packet starting at the MAC
4285 mac_len
= skb
->data
- skb_mac_header(skb
);
4286 hlen
= skb_headlen(skb
) + mac_len
;
4287 xdp
->data
= skb
->data
- mac_len
;
4288 xdp
->data_meta
= xdp
->data
;
4289 xdp
->data_end
= xdp
->data
+ hlen
;
4290 xdp
->data_hard_start
= skb
->data
- skb_headroom(skb
);
4291 orig_data_end
= xdp
->data_end
;
4292 orig_data
= xdp
->data
;
4293 eth
= (struct ethhdr
*)xdp
->data
;
4294 orig_bcast
= is_multicast_ether_addr_64bits(eth
->h_dest
);
4295 orig_eth_type
= eth
->h_proto
;
4297 rxqueue
= netif_get_rxqueue(skb
);
4298 xdp
->rxq
= &rxqueue
->xdp_rxq
;
4300 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
4302 /* check if bpf_xdp_adjust_head was used */
4303 off
= xdp
->data
- orig_data
;
4306 __skb_pull(skb
, off
);
4308 __skb_push(skb
, -off
);
4310 skb
->mac_header
+= off
;
4311 skb_reset_network_header(skb
);
4314 /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
4317 off
= orig_data_end
- xdp
->data_end
;
4319 skb_set_tail_pointer(skb
, xdp
->data_end
- xdp
->data
);
4324 /* check if XDP changed eth hdr such SKB needs update */
4325 eth
= (struct ethhdr
*)xdp
->data
;
4326 if ((orig_eth_type
!= eth
->h_proto
) ||
4327 (orig_bcast
!= is_multicast_ether_addr_64bits(eth
->h_dest
))) {
4328 __skb_push(skb
, ETH_HLEN
);
4329 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
4335 __skb_push(skb
, mac_len
);
4338 metalen
= xdp
->data
- xdp
->data_meta
;
4340 skb_metadata_set(skb
, metalen
);
4343 bpf_warn_invalid_xdp_action(act
);
4346 trace_xdp_exception(skb
->dev
, xdp_prog
, act
);
4357 /* When doing generic XDP we have to bypass the qdisc layer and the
4358 * network taps in order to match in-driver-XDP behavior.
4360 void generic_xdp_tx(struct sk_buff
*skb
, struct bpf_prog
*xdp_prog
)
4362 struct net_device
*dev
= skb
->dev
;
4363 struct netdev_queue
*txq
;
4364 bool free_skb
= true;
4367 txq
= netdev_core_pick_tx(dev
, skb
, NULL
);
4368 cpu
= smp_processor_id();
4369 HARD_TX_LOCK(dev
, txq
, cpu
);
4370 if (!netif_xmit_stopped(txq
)) {
4371 rc
= netdev_start_xmit(skb
, dev
, txq
, 0);
4372 if (dev_xmit_complete(rc
))
4375 HARD_TX_UNLOCK(dev
, txq
);
4377 trace_xdp_exception(dev
, xdp_prog
, XDP_TX
);
4381 EXPORT_SYMBOL_GPL(generic_xdp_tx
);
4383 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key
);
4385 int do_xdp_generic(struct bpf_prog
*xdp_prog
, struct sk_buff
*skb
)
4388 struct xdp_buff xdp
;
4392 act
= netif_receive_generic_xdp(skb
, &xdp
, xdp_prog
);
4393 if (act
!= XDP_PASS
) {
4396 err
= xdp_do_generic_redirect(skb
->dev
, skb
,
4402 generic_xdp_tx(skb
, xdp_prog
);
4413 EXPORT_SYMBOL_GPL(do_xdp_generic
);
4415 static int netif_rx_internal(struct sk_buff
*skb
)
4419 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
4421 trace_netif_rx(skb
);
4424 if (static_branch_unlikely(&rps_needed
)) {
4425 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
4431 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
4433 cpu
= smp_processor_id();
4435 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
4444 ret
= enqueue_to_backlog(skb
, get_cpu(), &qtail
);
4451 * netif_rx - post buffer to the network code
4452 * @skb: buffer to post
4454 * This function receives a packet from a device driver and queues it for
4455 * the upper (protocol) levels to process. It always succeeds. The buffer
4456 * may be dropped during processing for congestion control or by the
4460 * NET_RX_SUCCESS (no congestion)
4461 * NET_RX_DROP (packet was dropped)
4465 int netif_rx(struct sk_buff
*skb
)
4469 trace_netif_rx_entry(skb
);
4471 ret
= netif_rx_internal(skb
);
4472 trace_netif_rx_exit(ret
);
4476 EXPORT_SYMBOL(netif_rx
);
4478 int netif_rx_ni(struct sk_buff
*skb
)
4482 trace_netif_rx_ni_entry(skb
);
4485 err
= netif_rx_internal(skb
);
4486 if (local_softirq_pending())
4489 trace_netif_rx_ni_exit(err
);
4493 EXPORT_SYMBOL(netif_rx_ni
);
4495 static __latent_entropy
void net_tx_action(struct softirq_action
*h
)
4497 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
4499 if (sd
->completion_queue
) {
4500 struct sk_buff
*clist
;
4502 local_irq_disable();
4503 clist
= sd
->completion_queue
;
4504 sd
->completion_queue
= NULL
;
4508 struct sk_buff
*skb
= clist
;
4510 clist
= clist
->next
;
4512 WARN_ON(refcount_read(&skb
->users
));
4513 if (likely(get_kfree_skb_cb(skb
)->reason
== SKB_REASON_CONSUMED
))
4514 trace_consume_skb(skb
);
4516 trace_kfree_skb(skb
, net_tx_action
);
4518 if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
)
4521 __kfree_skb_defer(skb
);
4524 __kfree_skb_flush();
4527 if (sd
->output_queue
) {
4530 local_irq_disable();
4531 head
= sd
->output_queue
;
4532 sd
->output_queue
= NULL
;
4533 sd
->output_queue_tailp
= &sd
->output_queue
;
4537 struct Qdisc
*q
= head
;
4538 spinlock_t
*root_lock
= NULL
;
4540 head
= head
->next_sched
;
4542 if (!(q
->flags
& TCQ_F_NOLOCK
)) {
4543 root_lock
= qdisc_lock(q
);
4544 spin_lock(root_lock
);
4546 /* We need to make sure head->next_sched is read
4547 * before clearing __QDISC_STATE_SCHED
4549 smp_mb__before_atomic();
4550 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
4553 spin_unlock(root_lock
);
4557 xfrm_dev_backlog(sd
);
4560 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4561 /* This hook is defined here for ATM LANE */
4562 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
4563 unsigned char *addr
) __read_mostly
;
4564 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
4567 static inline struct sk_buff
*
4568 sch_handle_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
, int *ret
,
4569 struct net_device
*orig_dev
)
4571 #ifdef CONFIG_NET_CLS_ACT
4572 struct mini_Qdisc
*miniq
= rcu_dereference_bh(skb
->dev
->miniq_ingress
);
4573 struct tcf_result cl_res
;
4575 /* If there's at least one ingress present somewhere (so
4576 * we get here via enabled static key), remaining devices
4577 * that are not configured with an ingress qdisc will bail
4584 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4588 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
4589 skb
->tc_at_ingress
= 1;
4590 mini_qdisc_bstats_cpu_update(miniq
, skb
);
4592 switch (tcf_classify(skb
, miniq
->filter_list
, &cl_res
, false)) {
4594 case TC_ACT_RECLASSIFY
:
4595 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
4598 mini_qdisc_qstats_cpu_drop(miniq
);
4606 case TC_ACT_REDIRECT
:
4607 /* skb_mac_header check was done by cls/act_bpf, so
4608 * we can safely push the L2 header back before
4609 * redirecting to another netdev
4611 __skb_push(skb
, skb
->mac_len
);
4612 skb_do_redirect(skb
);
4614 case TC_ACT_CONSUMED
:
4619 #endif /* CONFIG_NET_CLS_ACT */
4624 * netdev_is_rx_handler_busy - check if receive handler is registered
4625 * @dev: device to check
4627 * Check if a receive handler is already registered for a given device.
4628 * Return true if there one.
4630 * The caller must hold the rtnl_mutex.
4632 bool netdev_is_rx_handler_busy(struct net_device
*dev
)
4635 return dev
&& rtnl_dereference(dev
->rx_handler
);
4637 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy
);
4640 * netdev_rx_handler_register - register receive handler
4641 * @dev: device to register a handler for
4642 * @rx_handler: receive handler to register
4643 * @rx_handler_data: data pointer that is used by rx handler
4645 * Register a receive handler for a device. This handler will then be
4646 * called from __netif_receive_skb. A negative errno code is returned
4649 * The caller must hold the rtnl_mutex.
4651 * For a general description of rx_handler, see enum rx_handler_result.
4653 int netdev_rx_handler_register(struct net_device
*dev
,
4654 rx_handler_func_t
*rx_handler
,
4655 void *rx_handler_data
)
4657 if (netdev_is_rx_handler_busy(dev
))
4660 if (dev
->priv_flags
& IFF_NO_RX_HANDLER
)
4663 /* Note: rx_handler_data must be set before rx_handler */
4664 rcu_assign_pointer(dev
->rx_handler_data
, rx_handler_data
);
4665 rcu_assign_pointer(dev
->rx_handler
, rx_handler
);
4669 EXPORT_SYMBOL_GPL(netdev_rx_handler_register
);
4672 * netdev_rx_handler_unregister - unregister receive handler
4673 * @dev: device to unregister a handler from
4675 * Unregister a receive handler from a device.
4677 * The caller must hold the rtnl_mutex.
4679 void netdev_rx_handler_unregister(struct net_device
*dev
)
4683 RCU_INIT_POINTER(dev
->rx_handler
, NULL
);
4684 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4685 * section has a guarantee to see a non NULL rx_handler_data
4689 RCU_INIT_POINTER(dev
->rx_handler_data
, NULL
);
4691 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister
);
4694 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4695 * the special handling of PFMEMALLOC skbs.
4697 static bool skb_pfmemalloc_protocol(struct sk_buff
*skb
)
4699 switch (skb
->protocol
) {
4700 case htons(ETH_P_ARP
):
4701 case htons(ETH_P_IP
):
4702 case htons(ETH_P_IPV6
):
4703 case htons(ETH_P_8021Q
):
4704 case htons(ETH_P_8021AD
):
4711 static inline int nf_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
,
4712 int *ret
, struct net_device
*orig_dev
)
4714 #ifdef CONFIG_NETFILTER_INGRESS
4715 if (nf_hook_ingress_active(skb
)) {
4719 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4724 ingress_retval
= nf_hook_ingress(skb
);
4726 return ingress_retval
;
4728 #endif /* CONFIG_NETFILTER_INGRESS */
4732 static int __netif_receive_skb_core(struct sk_buff
*skb
, bool pfmemalloc
,
4733 struct packet_type
**ppt_prev
)
4735 struct packet_type
*ptype
, *pt_prev
;
4736 rx_handler_func_t
*rx_handler
;
4737 struct net_device
*orig_dev
;
4738 bool deliver_exact
= false;
4739 int ret
= NET_RX_DROP
;
4742 net_timestamp_check(!netdev_tstamp_prequeue
, skb
);
4744 trace_netif_receive_skb(skb
);
4746 orig_dev
= skb
->dev
;
4748 skb_reset_network_header(skb
);
4749 if (!skb_transport_header_was_set(skb
))
4750 skb_reset_transport_header(skb
);
4751 skb_reset_mac_len(skb
);
4756 skb
->skb_iif
= skb
->dev
->ifindex
;
4758 __this_cpu_inc(softnet_data
.processed
);
4760 if (static_branch_unlikely(&generic_xdp_needed_key
)) {
4764 ret2
= do_xdp_generic(rcu_dereference(skb
->dev
->xdp_prog
), skb
);
4767 if (ret2
!= XDP_PASS
)
4769 skb_reset_mac_len(skb
);
4772 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
) ||
4773 skb
->protocol
== cpu_to_be16(ETH_P_8021AD
)) {
4774 skb
= skb_vlan_untag(skb
);
4779 if (skb_skip_tc_classify(skb
))
4785 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
4787 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4791 list_for_each_entry_rcu(ptype
, &skb
->dev
->ptype_all
, list
) {
4793 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4798 #ifdef CONFIG_NET_INGRESS
4799 if (static_branch_unlikely(&ingress_needed_key
)) {
4800 skb
= sch_handle_ingress(skb
, &pt_prev
, &ret
, orig_dev
);
4804 if (nf_ingress(skb
, &pt_prev
, &ret
, orig_dev
) < 0)
4810 if (pfmemalloc
&& !skb_pfmemalloc_protocol(skb
))
4813 if (skb_vlan_tag_present(skb
)) {
4815 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4818 if (vlan_do_receive(&skb
))
4820 else if (unlikely(!skb
))
4824 rx_handler
= rcu_dereference(skb
->dev
->rx_handler
);
4827 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4830 switch (rx_handler(&skb
)) {
4831 case RX_HANDLER_CONSUMED
:
4832 ret
= NET_RX_SUCCESS
;
4834 case RX_HANDLER_ANOTHER
:
4836 case RX_HANDLER_EXACT
:
4837 deliver_exact
= true;
4838 case RX_HANDLER_PASS
:
4845 if (unlikely(skb_vlan_tag_present(skb
))) {
4847 if (skb_vlan_tag_get_id(skb
)) {
4848 /* Vlan id is non 0 and vlan_do_receive() above couldn't
4851 skb
->pkt_type
= PACKET_OTHERHOST
;
4852 } else if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
) ||
4853 skb
->protocol
== cpu_to_be16(ETH_P_8021AD
)) {
4854 /* Outer header is 802.1P with vlan 0, inner header is
4855 * 802.1Q or 802.1AD and vlan_do_receive() above could
4856 * not find vlan dev for vlan id 0.
4858 __vlan_hwaccel_clear_tag(skb
);
4859 skb
= skb_vlan_untag(skb
);
4862 if (vlan_do_receive(&skb
))
4863 /* After stripping off 802.1P header with vlan 0
4864 * vlan dev is found for inner header.
4867 else if (unlikely(!skb
))
4870 /* We have stripped outer 802.1P vlan 0 header.
4871 * But could not find vlan dev.
4872 * check again for vlan id to set OTHERHOST.
4876 /* Note: we might in the future use prio bits
4877 * and set skb->priority like in vlan_do_receive()
4878 * For the time being, just ignore Priority Code Point
4880 __vlan_hwaccel_clear_tag(skb
);
4883 type
= skb
->protocol
;
4885 /* deliver only exact match when indicated */
4886 if (likely(!deliver_exact
)) {
4887 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4888 &ptype_base
[ntohs(type
) &
4892 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4893 &orig_dev
->ptype_specific
);
4895 if (unlikely(skb
->dev
!= orig_dev
)) {
4896 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4897 &skb
->dev
->ptype_specific
);
4901 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
4903 *ppt_prev
= pt_prev
;
4907 atomic_long_inc(&skb
->dev
->rx_dropped
);
4909 atomic_long_inc(&skb
->dev
->rx_nohandler
);
4911 /* Jamal, now you will not able to escape explaining
4912 * me how you were going to use this. :-)
4921 static int __netif_receive_skb_one_core(struct sk_buff
*skb
, bool pfmemalloc
)
4923 struct net_device
*orig_dev
= skb
->dev
;
4924 struct packet_type
*pt_prev
= NULL
;
4927 ret
= __netif_receive_skb_core(skb
, pfmemalloc
, &pt_prev
);
4929 ret
= INDIRECT_CALL_INET(pt_prev
->func
, ipv6_rcv
, ip_rcv
, skb
,
4930 skb
->dev
, pt_prev
, orig_dev
);
4935 * netif_receive_skb_core - special purpose version of netif_receive_skb
4936 * @skb: buffer to process
4938 * More direct receive version of netif_receive_skb(). It should
4939 * only be used by callers that have a need to skip RPS and Generic XDP.
4940 * Caller must also take care of handling if (page_is_)pfmemalloc.
4942 * This function may only be called from softirq context and interrupts
4943 * should be enabled.
4945 * Return values (usually ignored):
4946 * NET_RX_SUCCESS: no congestion
4947 * NET_RX_DROP: packet was dropped
4949 int netif_receive_skb_core(struct sk_buff
*skb
)
4954 ret
= __netif_receive_skb_one_core(skb
, false);
4959 EXPORT_SYMBOL(netif_receive_skb_core
);
4961 static inline void __netif_receive_skb_list_ptype(struct list_head
*head
,
4962 struct packet_type
*pt_prev
,
4963 struct net_device
*orig_dev
)
4965 struct sk_buff
*skb
, *next
;
4969 if (list_empty(head
))
4971 if (pt_prev
->list_func
!= NULL
)
4972 INDIRECT_CALL_INET(pt_prev
->list_func
, ipv6_list_rcv
,
4973 ip_list_rcv
, head
, pt_prev
, orig_dev
);
4975 list_for_each_entry_safe(skb
, next
, head
, list
) {
4976 skb_list_del_init(skb
);
4977 pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
4981 static void __netif_receive_skb_list_core(struct list_head
*head
, bool pfmemalloc
)
4983 /* Fast-path assumptions:
4984 * - There is no RX handler.
4985 * - Only one packet_type matches.
4986 * If either of these fails, we will end up doing some per-packet
4987 * processing in-line, then handling the 'last ptype' for the whole
4988 * sublist. This can't cause out-of-order delivery to any single ptype,
4989 * because the 'last ptype' must be constant across the sublist, and all
4990 * other ptypes are handled per-packet.
4992 /* Current (common) ptype of sublist */
4993 struct packet_type
*pt_curr
= NULL
;
4994 /* Current (common) orig_dev of sublist */
4995 struct net_device
*od_curr
= NULL
;
4996 struct list_head sublist
;
4997 struct sk_buff
*skb
, *next
;
4999 INIT_LIST_HEAD(&sublist
);
5000 list_for_each_entry_safe(skb
, next
, head
, list
) {
5001 struct net_device
*orig_dev
= skb
->dev
;
5002 struct packet_type
*pt_prev
= NULL
;
5004 skb_list_del_init(skb
);
5005 __netif_receive_skb_core(skb
, pfmemalloc
, &pt_prev
);
5008 if (pt_curr
!= pt_prev
|| od_curr
!= orig_dev
) {
5009 /* dispatch old sublist */
5010 __netif_receive_skb_list_ptype(&sublist
, pt_curr
, od_curr
);
5011 /* start new sublist */
5012 INIT_LIST_HEAD(&sublist
);
5016 list_add_tail(&skb
->list
, &sublist
);
5019 /* dispatch final sublist */
5020 __netif_receive_skb_list_ptype(&sublist
, pt_curr
, od_curr
);
5023 static int __netif_receive_skb(struct sk_buff
*skb
)
5027 if (sk_memalloc_socks() && skb_pfmemalloc(skb
)) {
5028 unsigned int noreclaim_flag
;
5031 * PFMEMALLOC skbs are special, they should
5032 * - be delivered to SOCK_MEMALLOC sockets only
5033 * - stay away from userspace
5034 * - have bounded memory usage
5036 * Use PF_MEMALLOC as this saves us from propagating the allocation
5037 * context down to all allocation sites.
5039 noreclaim_flag
= memalloc_noreclaim_save();
5040 ret
= __netif_receive_skb_one_core(skb
, true);
5041 memalloc_noreclaim_restore(noreclaim_flag
);
5043 ret
= __netif_receive_skb_one_core(skb
, false);
5048 static void __netif_receive_skb_list(struct list_head
*head
)
5050 unsigned long noreclaim_flag
= 0;
5051 struct sk_buff
*skb
, *next
;
5052 bool pfmemalloc
= false; /* Is current sublist PF_MEMALLOC? */
5054 list_for_each_entry_safe(skb
, next
, head
, list
) {
5055 if ((sk_memalloc_socks() && skb_pfmemalloc(skb
)) != pfmemalloc
) {
5056 struct list_head sublist
;
5058 /* Handle the previous sublist */
5059 list_cut_before(&sublist
, head
, &skb
->list
);
5060 if (!list_empty(&sublist
))
5061 __netif_receive_skb_list_core(&sublist
, pfmemalloc
);
5062 pfmemalloc
= !pfmemalloc
;
5063 /* See comments in __netif_receive_skb */
5065 noreclaim_flag
= memalloc_noreclaim_save();
5067 memalloc_noreclaim_restore(noreclaim_flag
);
5070 /* Handle the remaining sublist */
5071 if (!list_empty(head
))
5072 __netif_receive_skb_list_core(head
, pfmemalloc
);
5073 /* Restore pflags */
5075 memalloc_noreclaim_restore(noreclaim_flag
);
5078 static int generic_xdp_install(struct net_device
*dev
, struct netdev_bpf
*xdp
)
5080 struct bpf_prog
*old
= rtnl_dereference(dev
->xdp_prog
);
5081 struct bpf_prog
*new = xdp
->prog
;
5084 switch (xdp
->command
) {
5085 case XDP_SETUP_PROG
:
5086 rcu_assign_pointer(dev
->xdp_prog
, new);
5091 static_branch_dec(&generic_xdp_needed_key
);
5092 } else if (new && !old
) {
5093 static_branch_inc(&generic_xdp_needed_key
);
5094 dev_disable_lro(dev
);
5095 dev_disable_gro_hw(dev
);
5099 case XDP_QUERY_PROG
:
5100 xdp
->prog_id
= old
? old
->aux
->id
: 0;
5111 static int netif_receive_skb_internal(struct sk_buff
*skb
)
5115 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
5117 if (skb_defer_rx_timestamp(skb
))
5118 return NET_RX_SUCCESS
;
5122 if (static_branch_unlikely(&rps_needed
)) {
5123 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
5124 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
5127 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
5133 ret
= __netif_receive_skb(skb
);
5138 static void netif_receive_skb_list_internal(struct list_head
*head
)
5140 struct sk_buff
*skb
, *next
;
5141 struct list_head sublist
;
5143 INIT_LIST_HEAD(&sublist
);
5144 list_for_each_entry_safe(skb
, next
, head
, list
) {
5145 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
5146 skb_list_del_init(skb
);
5147 if (!skb_defer_rx_timestamp(skb
))
5148 list_add_tail(&skb
->list
, &sublist
);
5150 list_splice_init(&sublist
, head
);
5154 if (static_branch_unlikely(&rps_needed
)) {
5155 list_for_each_entry_safe(skb
, next
, head
, list
) {
5156 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
5157 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
5160 /* Will be handled, remove from list */
5161 skb_list_del_init(skb
);
5162 enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
5167 __netif_receive_skb_list(head
);
5172 * netif_receive_skb - process receive buffer from network
5173 * @skb: buffer to process
5175 * netif_receive_skb() is the main receive data processing function.
5176 * It always succeeds. The buffer may be dropped during processing
5177 * for congestion control or by the protocol layers.
5179 * This function may only be called from softirq context and interrupts
5180 * should be enabled.
5182 * Return values (usually ignored):
5183 * NET_RX_SUCCESS: no congestion
5184 * NET_RX_DROP: packet was dropped
5186 int netif_receive_skb(struct sk_buff
*skb
)
5190 trace_netif_receive_skb_entry(skb
);
5192 ret
= netif_receive_skb_internal(skb
);
5193 trace_netif_receive_skb_exit(ret
);
5197 EXPORT_SYMBOL(netif_receive_skb
);
5200 * netif_receive_skb_list - process many receive buffers from network
5201 * @head: list of skbs to process.
5203 * Since return value of netif_receive_skb() is normally ignored, and
5204 * wouldn't be meaningful for a list, this function returns void.
5206 * This function may only be called from softirq context and interrupts
5207 * should be enabled.
5209 void netif_receive_skb_list(struct list_head
*head
)
5211 struct sk_buff
*skb
;
5213 if (list_empty(head
))
5215 if (trace_netif_receive_skb_list_entry_enabled()) {
5216 list_for_each_entry(skb
, head
, list
)
5217 trace_netif_receive_skb_list_entry(skb
);
5219 netif_receive_skb_list_internal(head
);
5220 trace_netif_receive_skb_list_exit(0);
5222 EXPORT_SYMBOL(netif_receive_skb_list
);
5224 DEFINE_PER_CPU(struct work_struct
, flush_works
);
5226 /* Network device is going away, flush any packets still pending */
5227 static void flush_backlog(struct work_struct
*work
)
5229 struct sk_buff
*skb
, *tmp
;
5230 struct softnet_data
*sd
;
5233 sd
= this_cpu_ptr(&softnet_data
);
5235 local_irq_disable();
5237 skb_queue_walk_safe(&sd
->input_pkt_queue
, skb
, tmp
) {
5238 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
5239 __skb_unlink(skb
, &sd
->input_pkt_queue
);
5241 input_queue_head_incr(sd
);
5247 skb_queue_walk_safe(&sd
->process_queue
, skb
, tmp
) {
5248 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
5249 __skb_unlink(skb
, &sd
->process_queue
);
5251 input_queue_head_incr(sd
);
5257 static void flush_all_backlogs(void)
5263 for_each_online_cpu(cpu
)
5264 queue_work_on(cpu
, system_highpri_wq
,
5265 per_cpu_ptr(&flush_works
, cpu
));
5267 for_each_online_cpu(cpu
)
5268 flush_work(per_cpu_ptr(&flush_works
, cpu
));
5273 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff
*, int));
5274 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff
*, int));
5275 static int napi_gro_complete(struct sk_buff
*skb
)
5277 struct packet_offload
*ptype
;
5278 __be16 type
= skb
->protocol
;
5279 struct list_head
*head
= &offload_base
;
5282 BUILD_BUG_ON(sizeof(struct napi_gro_cb
) > sizeof(skb
->cb
));
5284 if (NAPI_GRO_CB(skb
)->count
== 1) {
5285 skb_shinfo(skb
)->gso_size
= 0;
5290 list_for_each_entry_rcu(ptype
, head
, list
) {
5291 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
5294 err
= INDIRECT_CALL_INET(ptype
->callbacks
.gro_complete
,
5295 ipv6_gro_complete
, inet_gro_complete
,
5302 WARN_ON(&ptype
->list
== head
);
5304 return NET_RX_SUCCESS
;
5308 return netif_receive_skb_internal(skb
);
5311 static void __napi_gro_flush_chain(struct napi_struct
*napi
, u32 index
,
5314 struct list_head
*head
= &napi
->gro_hash
[index
].list
;
5315 struct sk_buff
*skb
, *p
;
5317 list_for_each_entry_safe_reverse(skb
, p
, head
, list
) {
5318 if (flush_old
&& NAPI_GRO_CB(skb
)->age
== jiffies
)
5320 skb_list_del_init(skb
);
5321 napi_gro_complete(skb
);
5322 napi
->gro_hash
[index
].count
--;
5325 if (!napi
->gro_hash
[index
].count
)
5326 __clear_bit(index
, &napi
->gro_bitmask
);
5329 /* napi->gro_hash[].list contains packets ordered by age.
5330 * youngest packets at the head of it.
5331 * Complete skbs in reverse order to reduce latencies.
5333 void napi_gro_flush(struct napi_struct
*napi
, bool flush_old
)
5335 unsigned long bitmask
= napi
->gro_bitmask
;
5336 unsigned int i
, base
= ~0U;
5338 while ((i
= ffs(bitmask
)) != 0) {
5341 __napi_gro_flush_chain(napi
, base
, flush_old
);
5344 EXPORT_SYMBOL(napi_gro_flush
);
5346 static struct list_head
*gro_list_prepare(struct napi_struct
*napi
,
5347 struct sk_buff
*skb
)
5349 unsigned int maclen
= skb
->dev
->hard_header_len
;
5350 u32 hash
= skb_get_hash_raw(skb
);
5351 struct list_head
*head
;
5354 head
= &napi
->gro_hash
[hash
& (GRO_HASH_BUCKETS
- 1)].list
;
5355 list_for_each_entry(p
, head
, list
) {
5356 unsigned long diffs
;
5358 NAPI_GRO_CB(p
)->flush
= 0;
5360 if (hash
!= skb_get_hash_raw(p
)) {
5361 NAPI_GRO_CB(p
)->same_flow
= 0;
5365 diffs
= (unsigned long)p
->dev
^ (unsigned long)skb
->dev
;
5366 diffs
|= skb_vlan_tag_present(p
) ^ skb_vlan_tag_present(skb
);
5367 if (skb_vlan_tag_present(p
))
5368 diffs
|= p
->vlan_tci
^ skb
->vlan_tci
;
5369 diffs
|= skb_metadata_dst_cmp(p
, skb
);
5370 diffs
|= skb_metadata_differs(p
, skb
);
5371 if (maclen
== ETH_HLEN
)
5372 diffs
|= compare_ether_header(skb_mac_header(p
),
5373 skb_mac_header(skb
));
5375 diffs
= memcmp(skb_mac_header(p
),
5376 skb_mac_header(skb
),
5378 NAPI_GRO_CB(p
)->same_flow
= !diffs
;
5384 static void skb_gro_reset_offset(struct sk_buff
*skb
)
5386 const struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
5387 const skb_frag_t
*frag0
= &pinfo
->frags
[0];
5389 NAPI_GRO_CB(skb
)->data_offset
= 0;
5390 NAPI_GRO_CB(skb
)->frag0
= NULL
;
5391 NAPI_GRO_CB(skb
)->frag0_len
= 0;
5393 if (skb_mac_header(skb
) == skb_tail_pointer(skb
) &&
5395 !PageHighMem(skb_frag_page(frag0
))) {
5396 NAPI_GRO_CB(skb
)->frag0
= skb_frag_address(frag0
);
5397 NAPI_GRO_CB(skb
)->frag0_len
= min_t(unsigned int,
5398 skb_frag_size(frag0
),
5399 skb
->end
- skb
->tail
);
5403 static void gro_pull_from_frag0(struct sk_buff
*skb
, int grow
)
5405 struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
5407 BUG_ON(skb
->end
- skb
->tail
< grow
);
5409 memcpy(skb_tail_pointer(skb
), NAPI_GRO_CB(skb
)->frag0
, grow
);
5411 skb
->data_len
-= grow
;
5414 skb_frag_off_add(&pinfo
->frags
[0], grow
);
5415 skb_frag_size_sub(&pinfo
->frags
[0], grow
);
5417 if (unlikely(!skb_frag_size(&pinfo
->frags
[0]))) {
5418 skb_frag_unref(skb
, 0);
5419 memmove(pinfo
->frags
, pinfo
->frags
+ 1,
5420 --pinfo
->nr_frags
* sizeof(pinfo
->frags
[0]));
5424 static void gro_flush_oldest(struct list_head
*head
)
5426 struct sk_buff
*oldest
;
5428 oldest
= list_last_entry(head
, struct sk_buff
, list
);
5430 /* We are called with head length >= MAX_GRO_SKBS, so this is
5433 if (WARN_ON_ONCE(!oldest
))
5436 /* Do not adjust napi->gro_hash[].count, caller is adding a new
5439 skb_list_del_init(oldest
);
5440 napi_gro_complete(oldest
);
5443 INDIRECT_CALLABLE_DECLARE(struct sk_buff
*inet_gro_receive(struct list_head
*,
5445 INDIRECT_CALLABLE_DECLARE(struct sk_buff
*ipv6_gro_receive(struct list_head
*,
5447 static enum gro_result
dev_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
5449 u32 hash
= skb_get_hash_raw(skb
) & (GRO_HASH_BUCKETS
- 1);
5450 struct list_head
*head
= &offload_base
;
5451 struct packet_offload
*ptype
;
5452 __be16 type
= skb
->protocol
;
5453 struct list_head
*gro_head
;
5454 struct sk_buff
*pp
= NULL
;
5455 enum gro_result ret
;
5459 if (netif_elide_gro(skb
->dev
))
5462 gro_head
= gro_list_prepare(napi
, skb
);
5465 list_for_each_entry_rcu(ptype
, head
, list
) {
5466 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
5469 skb_set_network_header(skb
, skb_gro_offset(skb
));
5470 skb_reset_mac_len(skb
);
5471 NAPI_GRO_CB(skb
)->same_flow
= 0;
5472 NAPI_GRO_CB(skb
)->flush
= skb_is_gso(skb
) || skb_has_frag_list(skb
);
5473 NAPI_GRO_CB(skb
)->free
= 0;
5474 NAPI_GRO_CB(skb
)->encap_mark
= 0;
5475 NAPI_GRO_CB(skb
)->recursion_counter
= 0;
5476 NAPI_GRO_CB(skb
)->is_fou
= 0;
5477 NAPI_GRO_CB(skb
)->is_atomic
= 1;
5478 NAPI_GRO_CB(skb
)->gro_remcsum_start
= 0;
5480 /* Setup for GRO checksum validation */
5481 switch (skb
->ip_summed
) {
5482 case CHECKSUM_COMPLETE
:
5483 NAPI_GRO_CB(skb
)->csum
= skb
->csum
;
5484 NAPI_GRO_CB(skb
)->csum_valid
= 1;
5485 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
5487 case CHECKSUM_UNNECESSARY
:
5488 NAPI_GRO_CB(skb
)->csum_cnt
= skb
->csum_level
+ 1;
5489 NAPI_GRO_CB(skb
)->csum_valid
= 0;
5492 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
5493 NAPI_GRO_CB(skb
)->csum_valid
= 0;
5496 pp
= INDIRECT_CALL_INET(ptype
->callbacks
.gro_receive
,
5497 ipv6_gro_receive
, inet_gro_receive
,
5503 if (&ptype
->list
== head
)
5506 if (IS_ERR(pp
) && PTR_ERR(pp
) == -EINPROGRESS
) {
5511 same_flow
= NAPI_GRO_CB(skb
)->same_flow
;
5512 ret
= NAPI_GRO_CB(skb
)->free
? GRO_MERGED_FREE
: GRO_MERGED
;
5515 skb_list_del_init(pp
);
5516 napi_gro_complete(pp
);
5517 napi
->gro_hash
[hash
].count
--;
5523 if (NAPI_GRO_CB(skb
)->flush
)
5526 if (unlikely(napi
->gro_hash
[hash
].count
>= MAX_GRO_SKBS
)) {
5527 gro_flush_oldest(gro_head
);
5529 napi
->gro_hash
[hash
].count
++;
5531 NAPI_GRO_CB(skb
)->count
= 1;
5532 NAPI_GRO_CB(skb
)->age
= jiffies
;
5533 NAPI_GRO_CB(skb
)->last
= skb
;
5534 skb_shinfo(skb
)->gso_size
= skb_gro_len(skb
);
5535 list_add(&skb
->list
, gro_head
);
5539 grow
= skb_gro_offset(skb
) - skb_headlen(skb
);
5541 gro_pull_from_frag0(skb
, grow
);
5543 if (napi
->gro_hash
[hash
].count
) {
5544 if (!test_bit(hash
, &napi
->gro_bitmask
))
5545 __set_bit(hash
, &napi
->gro_bitmask
);
5546 } else if (test_bit(hash
, &napi
->gro_bitmask
)) {
5547 __clear_bit(hash
, &napi
->gro_bitmask
);
5557 struct packet_offload
*gro_find_receive_by_type(__be16 type
)
5559 struct list_head
*offload_head
= &offload_base
;
5560 struct packet_offload
*ptype
;
5562 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
5563 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
5569 EXPORT_SYMBOL(gro_find_receive_by_type
);
5571 struct packet_offload
*gro_find_complete_by_type(__be16 type
)
5573 struct list_head
*offload_head
= &offload_base
;
5574 struct packet_offload
*ptype
;
5576 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
5577 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
5583 EXPORT_SYMBOL(gro_find_complete_by_type
);
5585 static void napi_skb_free_stolen_head(struct sk_buff
*skb
)
5589 kmem_cache_free(skbuff_head_cache
, skb
);
5592 static gro_result_t
napi_skb_finish(gro_result_t ret
, struct sk_buff
*skb
)
5596 if (netif_receive_skb_internal(skb
))
5604 case GRO_MERGED_FREE
:
5605 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
5606 napi_skb_free_stolen_head(skb
);
5620 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
5624 skb_mark_napi_id(skb
, napi
);
5625 trace_napi_gro_receive_entry(skb
);
5627 skb_gro_reset_offset(skb
);
5629 ret
= napi_skb_finish(dev_gro_receive(napi
, skb
), skb
);
5630 trace_napi_gro_receive_exit(ret
);
5634 EXPORT_SYMBOL(napi_gro_receive
);
5636 static void napi_reuse_skb(struct napi_struct
*napi
, struct sk_buff
*skb
)
5638 if (unlikely(skb
->pfmemalloc
)) {
5642 __skb_pull(skb
, skb_headlen(skb
));
5643 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
5644 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
- skb_headroom(skb
));
5645 __vlan_hwaccel_clear_tag(skb
);
5646 skb
->dev
= napi
->dev
;
5649 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
5650 skb
->pkt_type
= PACKET_HOST
;
5652 skb
->encapsulation
= 0;
5653 skb_shinfo(skb
)->gso_type
= 0;
5654 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
5660 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
)
5662 struct sk_buff
*skb
= napi
->skb
;
5665 skb
= napi_alloc_skb(napi
, GRO_MAX_HEAD
);
5668 skb_mark_napi_id(skb
, napi
);
5673 EXPORT_SYMBOL(napi_get_frags
);
5675 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
5676 static void gro_normal_list(struct napi_struct
*napi
)
5678 if (!napi
->rx_count
)
5680 netif_receive_skb_list_internal(&napi
->rx_list
);
5681 INIT_LIST_HEAD(&napi
->rx_list
);
5685 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
5686 * pass the whole batch up to the stack.
5688 static void gro_normal_one(struct napi_struct
*napi
, struct sk_buff
*skb
)
5690 list_add_tail(&skb
->list
, &napi
->rx_list
);
5691 if (++napi
->rx_count
>= gro_normal_batch
)
5692 gro_normal_list(napi
);
5695 static gro_result_t
napi_frags_finish(struct napi_struct
*napi
,
5696 struct sk_buff
*skb
,
5702 __skb_push(skb
, ETH_HLEN
);
5703 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
5704 if (ret
== GRO_NORMAL
)
5705 gro_normal_one(napi
, skb
);
5709 napi_reuse_skb(napi
, skb
);
5712 case GRO_MERGED_FREE
:
5713 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
5714 napi_skb_free_stolen_head(skb
);
5716 napi_reuse_skb(napi
, skb
);
5727 /* Upper GRO stack assumes network header starts at gro_offset=0
5728 * Drivers could call both napi_gro_frags() and napi_gro_receive()
5729 * We copy ethernet header into skb->data to have a common layout.
5731 static struct sk_buff
*napi_frags_skb(struct napi_struct
*napi
)
5733 struct sk_buff
*skb
= napi
->skb
;
5734 const struct ethhdr
*eth
;
5735 unsigned int hlen
= sizeof(*eth
);
5739 skb_reset_mac_header(skb
);
5740 skb_gro_reset_offset(skb
);
5742 if (unlikely(skb_gro_header_hard(skb
, hlen
))) {
5743 eth
= skb_gro_header_slow(skb
, hlen
, 0);
5744 if (unlikely(!eth
)) {
5745 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5746 __func__
, napi
->dev
->name
);
5747 napi_reuse_skb(napi
, skb
);
5751 eth
= (const struct ethhdr
*)skb
->data
;
5752 gro_pull_from_frag0(skb
, hlen
);
5753 NAPI_GRO_CB(skb
)->frag0
+= hlen
;
5754 NAPI_GRO_CB(skb
)->frag0_len
-= hlen
;
5756 __skb_pull(skb
, hlen
);
5759 * This works because the only protocols we care about don't require
5761 * We'll fix it up properly in napi_frags_finish()
5763 skb
->protocol
= eth
->h_proto
;
5768 gro_result_t
napi_gro_frags(struct napi_struct
*napi
)
5771 struct sk_buff
*skb
= napi_frags_skb(napi
);
5776 trace_napi_gro_frags_entry(skb
);
5778 ret
= napi_frags_finish(napi
, skb
, dev_gro_receive(napi
, skb
));
5779 trace_napi_gro_frags_exit(ret
);
5783 EXPORT_SYMBOL(napi_gro_frags
);
5785 /* Compute the checksum from gro_offset and return the folded value
5786 * after adding in any pseudo checksum.
5788 __sum16
__skb_gro_checksum_complete(struct sk_buff
*skb
)
5793 wsum
= skb_checksum(skb
, skb_gro_offset(skb
), skb_gro_len(skb
), 0);
5795 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5796 sum
= csum_fold(csum_add(NAPI_GRO_CB(skb
)->csum
, wsum
));
5797 /* See comments in __skb_checksum_complete(). */
5799 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
5800 !skb
->csum_complete_sw
)
5801 netdev_rx_csum_fault(skb
->dev
, skb
);
5804 NAPI_GRO_CB(skb
)->csum
= wsum
;
5805 NAPI_GRO_CB(skb
)->csum_valid
= 1;
5809 EXPORT_SYMBOL(__skb_gro_checksum_complete
);
5811 static void net_rps_send_ipi(struct softnet_data
*remsd
)
5815 struct softnet_data
*next
= remsd
->rps_ipi_next
;
5817 if (cpu_online(remsd
->cpu
))
5818 smp_call_function_single_async(remsd
->cpu
, &remsd
->csd
);
5825 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5826 * Note: called with local irq disabled, but exits with local irq enabled.
5828 static void net_rps_action_and_irq_enable(struct softnet_data
*sd
)
5831 struct softnet_data
*remsd
= sd
->rps_ipi_list
;
5834 sd
->rps_ipi_list
= NULL
;
5838 /* Send pending IPI's to kick RPS processing on remote cpus. */
5839 net_rps_send_ipi(remsd
);
5845 static bool sd_has_rps_ipi_waiting(struct softnet_data
*sd
)
5848 return sd
->rps_ipi_list
!= NULL
;
5854 static int process_backlog(struct napi_struct
*napi
, int quota
)
5856 struct softnet_data
*sd
= container_of(napi
, struct softnet_data
, backlog
);
5860 /* Check if we have pending ipi, its better to send them now,
5861 * not waiting net_rx_action() end.
5863 if (sd_has_rps_ipi_waiting(sd
)) {
5864 local_irq_disable();
5865 net_rps_action_and_irq_enable(sd
);
5868 napi
->weight
= dev_rx_weight
;
5870 struct sk_buff
*skb
;
5872 while ((skb
= __skb_dequeue(&sd
->process_queue
))) {
5874 __netif_receive_skb(skb
);
5876 input_queue_head_incr(sd
);
5877 if (++work
>= quota
)
5882 local_irq_disable();
5884 if (skb_queue_empty(&sd
->input_pkt_queue
)) {
5886 * Inline a custom version of __napi_complete().
5887 * only current cpu owns and manipulates this napi,
5888 * and NAPI_STATE_SCHED is the only possible flag set
5890 * We can use a plain write instead of clear_bit(),
5891 * and we dont need an smp_mb() memory barrier.
5896 skb_queue_splice_tail_init(&sd
->input_pkt_queue
,
5897 &sd
->process_queue
);
5907 * __napi_schedule - schedule for receive
5908 * @n: entry to schedule
5910 * The entry's receive function will be scheduled to run.
5911 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
5913 void __napi_schedule(struct napi_struct
*n
)
5915 unsigned long flags
;
5917 local_irq_save(flags
);
5918 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
5919 local_irq_restore(flags
);
5921 EXPORT_SYMBOL(__napi_schedule
);
5924 * napi_schedule_prep - check if napi can be scheduled
5927 * Test if NAPI routine is already running, and if not mark
5928 * it as running. This is used as a condition variable
5929 * insure only one NAPI poll instance runs. We also make
5930 * sure there is no pending NAPI disable.
5932 bool napi_schedule_prep(struct napi_struct
*n
)
5934 unsigned long val
, new;
5937 val
= READ_ONCE(n
->state
);
5938 if (unlikely(val
& NAPIF_STATE_DISABLE
))
5940 new = val
| NAPIF_STATE_SCHED
;
5942 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5943 * This was suggested by Alexander Duyck, as compiler
5944 * emits better code than :
5945 * if (val & NAPIF_STATE_SCHED)
5946 * new |= NAPIF_STATE_MISSED;
5948 new |= (val
& NAPIF_STATE_SCHED
) / NAPIF_STATE_SCHED
*
5950 } while (cmpxchg(&n
->state
, val
, new) != val
);
5952 return !(val
& NAPIF_STATE_SCHED
);
5954 EXPORT_SYMBOL(napi_schedule_prep
);
5957 * __napi_schedule_irqoff - schedule for receive
5958 * @n: entry to schedule
5960 * Variant of __napi_schedule() assuming hard irqs are masked
5962 void __napi_schedule_irqoff(struct napi_struct
*n
)
5964 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
5966 EXPORT_SYMBOL(__napi_schedule_irqoff
);
5968 bool napi_complete_done(struct napi_struct
*n
, int work_done
)
5970 unsigned long flags
, val
, new;
5973 * 1) Don't let napi dequeue from the cpu poll list
5974 * just in case its running on a different cpu.
5975 * 2) If we are busy polling, do nothing here, we have
5976 * the guarantee we will be called later.
5978 if (unlikely(n
->state
& (NAPIF_STATE_NPSVC
|
5979 NAPIF_STATE_IN_BUSY_POLL
)))
5984 if (n
->gro_bitmask
) {
5985 unsigned long timeout
= 0;
5988 timeout
= n
->dev
->gro_flush_timeout
;
5990 /* When the NAPI instance uses a timeout and keeps postponing
5991 * it, we need to bound somehow the time packets are kept in
5994 napi_gro_flush(n
, !!timeout
);
5996 hrtimer_start(&n
->timer
, ns_to_ktime(timeout
),
5997 HRTIMER_MODE_REL_PINNED
);
5999 if (unlikely(!list_empty(&n
->poll_list
))) {
6000 /* If n->poll_list is not empty, we need to mask irqs */
6001 local_irq_save(flags
);
6002 list_del_init(&n
->poll_list
);
6003 local_irq_restore(flags
);
6007 val
= READ_ONCE(n
->state
);
6009 WARN_ON_ONCE(!(val
& NAPIF_STATE_SCHED
));
6011 new = val
& ~(NAPIF_STATE_MISSED
| NAPIF_STATE_SCHED
);
6013 /* If STATE_MISSED was set, leave STATE_SCHED set,
6014 * because we will call napi->poll() one more time.
6015 * This C code was suggested by Alexander Duyck to help gcc.
6017 new |= (val
& NAPIF_STATE_MISSED
) / NAPIF_STATE_MISSED
*
6019 } while (cmpxchg(&n
->state
, val
, new) != val
);
6021 if (unlikely(val
& NAPIF_STATE_MISSED
)) {
6028 EXPORT_SYMBOL(napi_complete_done
);
6030 /* must be called under rcu_read_lock(), as we dont take a reference */
6031 static struct napi_struct
*napi_by_id(unsigned int napi_id
)
6033 unsigned int hash
= napi_id
% HASH_SIZE(napi_hash
);
6034 struct napi_struct
*napi
;
6036 hlist_for_each_entry_rcu(napi
, &napi_hash
[hash
], napi_hash_node
)
6037 if (napi
->napi_id
== napi_id
)
6043 #if defined(CONFIG_NET_RX_BUSY_POLL)
6045 #define BUSY_POLL_BUDGET 8
6047 static void busy_poll_stop(struct napi_struct
*napi
, void *have_poll_lock
)
6051 /* Busy polling means there is a high chance device driver hard irq
6052 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6053 * set in napi_schedule_prep().
6054 * Since we are about to call napi->poll() once more, we can safely
6055 * clear NAPI_STATE_MISSED.
6057 * Note: x86 could use a single "lock and ..." instruction
6058 * to perform these two clear_bit()
6060 clear_bit(NAPI_STATE_MISSED
, &napi
->state
);
6061 clear_bit(NAPI_STATE_IN_BUSY_POLL
, &napi
->state
);
6065 /* All we really want here is to re-enable device interrupts.
6066 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6068 rc
= napi
->poll(napi
, BUSY_POLL_BUDGET
);
6069 /* We can't gro_normal_list() here, because napi->poll() might have
6070 * rearmed the napi (napi_complete_done()) in which case it could
6071 * already be running on another CPU.
6073 trace_napi_poll(napi
, rc
, BUSY_POLL_BUDGET
);
6074 netpoll_poll_unlock(have_poll_lock
);
6075 if (rc
== BUSY_POLL_BUDGET
) {
6076 /* As the whole budget was spent, we still own the napi so can
6077 * safely handle the rx_list.
6079 gro_normal_list(napi
);
6080 __napi_schedule(napi
);
6085 void napi_busy_loop(unsigned int napi_id
,
6086 bool (*loop_end
)(void *, unsigned long),
6089 unsigned long start_time
= loop_end
? busy_loop_current_time() : 0;
6090 int (*napi_poll
)(struct napi_struct
*napi
, int budget
);
6091 void *have_poll_lock
= NULL
;
6092 struct napi_struct
*napi
;
6099 napi
= napi_by_id(napi_id
);
6109 unsigned long val
= READ_ONCE(napi
->state
);
6111 /* If multiple threads are competing for this napi,
6112 * we avoid dirtying napi->state as much as we can.
6114 if (val
& (NAPIF_STATE_DISABLE
| NAPIF_STATE_SCHED
|
6115 NAPIF_STATE_IN_BUSY_POLL
))
6117 if (cmpxchg(&napi
->state
, val
,
6118 val
| NAPIF_STATE_IN_BUSY_POLL
|
6119 NAPIF_STATE_SCHED
) != val
)
6121 have_poll_lock
= netpoll_poll_lock(napi
);
6122 napi_poll
= napi
->poll
;
6124 work
= napi_poll(napi
, BUSY_POLL_BUDGET
);
6125 trace_napi_poll(napi
, work
, BUSY_POLL_BUDGET
);
6126 gro_normal_list(napi
);
6129 __NET_ADD_STATS(dev_net(napi
->dev
),
6130 LINUX_MIB_BUSYPOLLRXPACKETS
, work
);
6133 if (!loop_end
|| loop_end(loop_end_arg
, start_time
))
6136 if (unlikely(need_resched())) {
6138 busy_poll_stop(napi
, have_poll_lock
);
6142 if (loop_end(loop_end_arg
, start_time
))
6149 busy_poll_stop(napi
, have_poll_lock
);
6154 EXPORT_SYMBOL(napi_busy_loop
);
6156 #endif /* CONFIG_NET_RX_BUSY_POLL */
6158 static void napi_hash_add(struct napi_struct
*napi
)
6160 if (test_bit(NAPI_STATE_NO_BUSY_POLL
, &napi
->state
) ||
6161 test_and_set_bit(NAPI_STATE_HASHED
, &napi
->state
))
6164 spin_lock(&napi_hash_lock
);
6166 /* 0..NR_CPUS range is reserved for sender_cpu use */
6168 if (unlikely(++napi_gen_id
< MIN_NAPI_ID
))
6169 napi_gen_id
= MIN_NAPI_ID
;
6170 } while (napi_by_id(napi_gen_id
));
6171 napi
->napi_id
= napi_gen_id
;
6173 hlist_add_head_rcu(&napi
->napi_hash_node
,
6174 &napi_hash
[napi
->napi_id
% HASH_SIZE(napi_hash
)]);
6176 spin_unlock(&napi_hash_lock
);
6179 /* Warning : caller is responsible to make sure rcu grace period
6180 * is respected before freeing memory containing @napi
6182 bool napi_hash_del(struct napi_struct
*napi
)
6184 bool rcu_sync_needed
= false;
6186 spin_lock(&napi_hash_lock
);
6188 if (test_and_clear_bit(NAPI_STATE_HASHED
, &napi
->state
)) {
6189 rcu_sync_needed
= true;
6190 hlist_del_rcu(&napi
->napi_hash_node
);
6192 spin_unlock(&napi_hash_lock
);
6193 return rcu_sync_needed
;
6195 EXPORT_SYMBOL_GPL(napi_hash_del
);
6197 static enum hrtimer_restart
napi_watchdog(struct hrtimer
*timer
)
6199 struct napi_struct
*napi
;
6201 napi
= container_of(timer
, struct napi_struct
, timer
);
6203 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6204 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6206 if (napi
->gro_bitmask
&& !napi_disable_pending(napi
) &&
6207 !test_and_set_bit(NAPI_STATE_SCHED
, &napi
->state
))
6208 __napi_schedule_irqoff(napi
);
6210 return HRTIMER_NORESTART
;
6213 static void init_gro_hash(struct napi_struct
*napi
)
6217 for (i
= 0; i
< GRO_HASH_BUCKETS
; i
++) {
6218 INIT_LIST_HEAD(&napi
->gro_hash
[i
].list
);
6219 napi
->gro_hash
[i
].count
= 0;
6221 napi
->gro_bitmask
= 0;
6224 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
6225 int (*poll
)(struct napi_struct
*, int), int weight
)
6227 INIT_LIST_HEAD(&napi
->poll_list
);
6228 hrtimer_init(&napi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
6229 napi
->timer
.function
= napi_watchdog
;
6230 init_gro_hash(napi
);
6232 INIT_LIST_HEAD(&napi
->rx_list
);
6235 if (weight
> NAPI_POLL_WEIGHT
)
6236 netdev_err_once(dev
, "%s() called with weight %d\n", __func__
,
6238 napi
->weight
= weight
;
6239 list_add(&napi
->dev_list
, &dev
->napi_list
);
6241 #ifdef CONFIG_NETPOLL
6242 napi
->poll_owner
= -1;
6244 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
6245 napi_hash_add(napi
);
6247 EXPORT_SYMBOL(netif_napi_add
);
6249 void napi_disable(struct napi_struct
*n
)
6252 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
6254 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
6256 while (test_and_set_bit(NAPI_STATE_NPSVC
, &n
->state
))
6259 hrtimer_cancel(&n
->timer
);
6261 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
6263 EXPORT_SYMBOL(napi_disable
);
6265 static void flush_gro_hash(struct napi_struct
*napi
)
6269 for (i
= 0; i
< GRO_HASH_BUCKETS
; i
++) {
6270 struct sk_buff
*skb
, *n
;
6272 list_for_each_entry_safe(skb
, n
, &napi
->gro_hash
[i
].list
, list
)
6274 napi
->gro_hash
[i
].count
= 0;
6278 /* Must be called in process context */
6279 void netif_napi_del(struct napi_struct
*napi
)
6282 if (napi_hash_del(napi
))
6284 list_del_init(&napi
->dev_list
);
6285 napi_free_frags(napi
);
6287 flush_gro_hash(napi
);
6288 napi
->gro_bitmask
= 0;
6290 EXPORT_SYMBOL(netif_napi_del
);
6292 static int napi_poll(struct napi_struct
*n
, struct list_head
*repoll
)
6297 list_del_init(&n
->poll_list
);
6299 have
= netpoll_poll_lock(n
);
6303 /* This NAPI_STATE_SCHED test is for avoiding a race
6304 * with netpoll's poll_napi(). Only the entity which
6305 * obtains the lock and sees NAPI_STATE_SCHED set will
6306 * actually make the ->poll() call. Therefore we avoid
6307 * accidentally calling ->poll() when NAPI is not scheduled.
6310 if (test_bit(NAPI_STATE_SCHED
, &n
->state
)) {
6311 work
= n
->poll(n
, weight
);
6312 trace_napi_poll(n
, work
, weight
);
6315 WARN_ON_ONCE(work
> weight
);
6317 if (likely(work
< weight
))
6320 /* Drivers must not modify the NAPI state if they
6321 * consume the entire weight. In such cases this code
6322 * still "owns" the NAPI instance and therefore can
6323 * move the instance around on the list at-will.
6325 if (unlikely(napi_disable_pending(n
))) {
6332 if (n
->gro_bitmask
) {
6333 /* flush too old packets
6334 * If HZ < 1000, flush all packets.
6336 napi_gro_flush(n
, HZ
>= 1000);
6339 /* Some drivers may have called napi_schedule
6340 * prior to exhausting their budget.
6342 if (unlikely(!list_empty(&n
->poll_list
))) {
6343 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6344 n
->dev
? n
->dev
->name
: "backlog");
6348 list_add_tail(&n
->poll_list
, repoll
);
6351 netpoll_poll_unlock(have
);
6356 static __latent_entropy
void net_rx_action(struct softirq_action
*h
)
6358 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
6359 unsigned long time_limit
= jiffies
+
6360 usecs_to_jiffies(netdev_budget_usecs
);
6361 int budget
= netdev_budget
;
6365 local_irq_disable();
6366 list_splice_init(&sd
->poll_list
, &list
);
6370 struct napi_struct
*n
;
6372 if (list_empty(&list
)) {
6373 if (!sd_has_rps_ipi_waiting(sd
) && list_empty(&repoll
))
6378 n
= list_first_entry(&list
, struct napi_struct
, poll_list
);
6379 budget
-= napi_poll(n
, &repoll
);
6381 /* If softirq window is exhausted then punt.
6382 * Allow this to run for 2 jiffies since which will allow
6383 * an average latency of 1.5/HZ.
6385 if (unlikely(budget
<= 0 ||
6386 time_after_eq(jiffies
, time_limit
))) {
6392 local_irq_disable();
6394 list_splice_tail_init(&sd
->poll_list
, &list
);
6395 list_splice_tail(&repoll
, &list
);
6396 list_splice(&list
, &sd
->poll_list
);
6397 if (!list_empty(&sd
->poll_list
))
6398 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
6400 net_rps_action_and_irq_enable(sd
);
6402 __kfree_skb_flush();
6405 struct netdev_adjacent
{
6406 struct net_device
*dev
;
6408 /* upper master flag, there can only be one master device per list */
6411 /* lookup ignore flag */
6414 /* counter for the number of times this device was added to us */
6417 /* private field for the users */
6420 struct list_head list
;
6421 struct rcu_head rcu
;
6424 static struct netdev_adjacent
*__netdev_find_adj(struct net_device
*adj_dev
,
6425 struct list_head
*adj_list
)
6427 struct netdev_adjacent
*adj
;
6429 list_for_each_entry(adj
, adj_list
, list
) {
6430 if (adj
->dev
== adj_dev
)
6436 static int ____netdev_has_upper_dev(struct net_device
*upper_dev
, void *data
)
6438 struct net_device
*dev
= data
;
6440 return upper_dev
== dev
;
6444 * netdev_has_upper_dev - Check if device is linked to an upper device
6446 * @upper_dev: upper device to check
6448 * Find out if a device is linked to specified upper device and return true
6449 * in case it is. Note that this checks only immediate upper device,
6450 * not through a complete stack of devices. The caller must hold the RTNL lock.
6452 bool netdev_has_upper_dev(struct net_device
*dev
,
6453 struct net_device
*upper_dev
)
6457 return netdev_walk_all_upper_dev_rcu(dev
, ____netdev_has_upper_dev
,
6460 EXPORT_SYMBOL(netdev_has_upper_dev
);
6463 * netdev_has_upper_dev_all - Check if device is linked to an upper device
6465 * @upper_dev: upper device to check
6467 * Find out if a device is linked to specified upper device and return true
6468 * in case it is. Note that this checks the entire upper device chain.
6469 * The caller must hold rcu lock.
6472 bool netdev_has_upper_dev_all_rcu(struct net_device
*dev
,
6473 struct net_device
*upper_dev
)
6475 return !!netdev_walk_all_upper_dev_rcu(dev
, ____netdev_has_upper_dev
,
6478 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu
);
6481 * netdev_has_any_upper_dev - Check if device is linked to some device
6484 * Find out if a device is linked to an upper device and return true in case
6485 * it is. The caller must hold the RTNL lock.
6487 bool netdev_has_any_upper_dev(struct net_device
*dev
)
6491 return !list_empty(&dev
->adj_list
.upper
);
6493 EXPORT_SYMBOL(netdev_has_any_upper_dev
);
6496 * netdev_master_upper_dev_get - Get master upper device
6499 * Find a master upper device and return pointer to it or NULL in case
6500 * it's not there. The caller must hold the RTNL lock.
6502 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
)
6504 struct netdev_adjacent
*upper
;
6508 if (list_empty(&dev
->adj_list
.upper
))
6511 upper
= list_first_entry(&dev
->adj_list
.upper
,
6512 struct netdev_adjacent
, list
);
6513 if (likely(upper
->master
))
6517 EXPORT_SYMBOL(netdev_master_upper_dev_get
);
6519 static struct net_device
*__netdev_master_upper_dev_get(struct net_device
*dev
)
6521 struct netdev_adjacent
*upper
;
6525 if (list_empty(&dev
->adj_list
.upper
))
6528 upper
= list_first_entry(&dev
->adj_list
.upper
,
6529 struct netdev_adjacent
, list
);
6530 if (likely(upper
->master
) && !upper
->ignore
)
6536 * netdev_has_any_lower_dev - Check if device is linked to some device
6539 * Find out if a device is linked to a lower device and return true in case
6540 * it is. The caller must hold the RTNL lock.
6542 static bool netdev_has_any_lower_dev(struct net_device
*dev
)
6546 return !list_empty(&dev
->adj_list
.lower
);
6549 void *netdev_adjacent_get_private(struct list_head
*adj_list
)
6551 struct netdev_adjacent
*adj
;
6553 adj
= list_entry(adj_list
, struct netdev_adjacent
, list
);
6555 return adj
->private;
6557 EXPORT_SYMBOL(netdev_adjacent_get_private
);
6560 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6562 * @iter: list_head ** of the current position
6564 * Gets the next device from the dev's upper list, starting from iter
6565 * position. The caller must hold RCU read lock.
6567 struct net_device
*netdev_upper_get_next_dev_rcu(struct net_device
*dev
,
6568 struct list_head
**iter
)
6570 struct netdev_adjacent
*upper
;
6572 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6574 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6576 if (&upper
->list
== &dev
->adj_list
.upper
)
6579 *iter
= &upper
->list
;
6583 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu
);
6585 static struct net_device
*__netdev_next_upper_dev(struct net_device
*dev
,
6586 struct list_head
**iter
,
6589 struct netdev_adjacent
*upper
;
6591 upper
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
6593 if (&upper
->list
== &dev
->adj_list
.upper
)
6596 *iter
= &upper
->list
;
6597 *ignore
= upper
->ignore
;
6602 static struct net_device
*netdev_next_upper_dev_rcu(struct net_device
*dev
,
6603 struct list_head
**iter
)
6605 struct netdev_adjacent
*upper
;
6607 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6609 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6611 if (&upper
->list
== &dev
->adj_list
.upper
)
6614 *iter
= &upper
->list
;
6619 static int __netdev_walk_all_upper_dev(struct net_device
*dev
,
6620 int (*fn
)(struct net_device
*dev
,
6624 struct net_device
*udev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
6625 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
6630 iter
= &dev
->adj_list
.upper
;
6634 ret
= fn(now
, data
);
6641 udev
= __netdev_next_upper_dev(now
, &iter
, &ignore
);
6648 niter
= &udev
->adj_list
.upper
;
6649 dev_stack
[cur
] = now
;
6650 iter_stack
[cur
++] = iter
;
6657 next
= dev_stack
[--cur
];
6658 niter
= iter_stack
[cur
];
6668 int netdev_walk_all_upper_dev_rcu(struct net_device
*dev
,
6669 int (*fn
)(struct net_device
*dev
,
6673 struct net_device
*udev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
6674 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
6678 iter
= &dev
->adj_list
.upper
;
6682 ret
= fn(now
, data
);
6689 udev
= netdev_next_upper_dev_rcu(now
, &iter
);
6694 niter
= &udev
->adj_list
.upper
;
6695 dev_stack
[cur
] = now
;
6696 iter_stack
[cur
++] = iter
;
6703 next
= dev_stack
[--cur
];
6704 niter
= iter_stack
[cur
];
6713 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu
);
6715 static bool __netdev_has_upper_dev(struct net_device
*dev
,
6716 struct net_device
*upper_dev
)
6720 return __netdev_walk_all_upper_dev(dev
, ____netdev_has_upper_dev
,
6725 * netdev_lower_get_next_private - Get the next ->private from the
6726 * lower neighbour list
6728 * @iter: list_head ** of the current position
6730 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6731 * list, starting from iter position. The caller must hold either hold the
6732 * RTNL lock or its own locking that guarantees that the neighbour lower
6733 * list will remain unchanged.
6735 void *netdev_lower_get_next_private(struct net_device
*dev
,
6736 struct list_head
**iter
)
6738 struct netdev_adjacent
*lower
;
6740 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
6742 if (&lower
->list
== &dev
->adj_list
.lower
)
6745 *iter
= lower
->list
.next
;
6747 return lower
->private;
6749 EXPORT_SYMBOL(netdev_lower_get_next_private
);
6752 * netdev_lower_get_next_private_rcu - Get the next ->private from the
6753 * lower neighbour list, RCU
6756 * @iter: list_head ** of the current position
6758 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6759 * list, starting from iter position. The caller must hold RCU read lock.
6761 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
6762 struct list_head
**iter
)
6764 struct netdev_adjacent
*lower
;
6766 WARN_ON_ONCE(!rcu_read_lock_held());
6768 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6770 if (&lower
->list
== &dev
->adj_list
.lower
)
6773 *iter
= &lower
->list
;
6775 return lower
->private;
6777 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu
);
6780 * netdev_lower_get_next - Get the next device from the lower neighbour
6783 * @iter: list_head ** of the current position
6785 * Gets the next netdev_adjacent from the dev's lower neighbour
6786 * list, starting from iter position. The caller must hold RTNL lock or
6787 * its own locking that guarantees that the neighbour lower
6788 * list will remain unchanged.
6790 void *netdev_lower_get_next(struct net_device
*dev
, struct list_head
**iter
)
6792 struct netdev_adjacent
*lower
;
6794 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
6796 if (&lower
->list
== &dev
->adj_list
.lower
)
6799 *iter
= lower
->list
.next
;
6803 EXPORT_SYMBOL(netdev_lower_get_next
);
6805 static struct net_device
*netdev_next_lower_dev(struct net_device
*dev
,
6806 struct list_head
**iter
)
6808 struct netdev_adjacent
*lower
;
6810 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
6812 if (&lower
->list
== &dev
->adj_list
.lower
)
6815 *iter
= &lower
->list
;
6820 static struct net_device
*__netdev_next_lower_dev(struct net_device
*dev
,
6821 struct list_head
**iter
,
6824 struct netdev_adjacent
*lower
;
6826 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
6828 if (&lower
->list
== &dev
->adj_list
.lower
)
6831 *iter
= &lower
->list
;
6832 *ignore
= lower
->ignore
;
6837 int netdev_walk_all_lower_dev(struct net_device
*dev
,
6838 int (*fn
)(struct net_device
*dev
,
6842 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
6843 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
6847 iter
= &dev
->adj_list
.lower
;
6851 ret
= fn(now
, data
);
6858 ldev
= netdev_next_lower_dev(now
, &iter
);
6863 niter
= &ldev
->adj_list
.lower
;
6864 dev_stack
[cur
] = now
;
6865 iter_stack
[cur
++] = iter
;
6872 next
= dev_stack
[--cur
];
6873 niter
= iter_stack
[cur
];
6882 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev
);
6884 static int __netdev_walk_all_lower_dev(struct net_device
*dev
,
6885 int (*fn
)(struct net_device
*dev
,
6889 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
6890 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
6895 iter
= &dev
->adj_list
.lower
;
6899 ret
= fn(now
, data
);
6906 ldev
= __netdev_next_lower_dev(now
, &iter
, &ignore
);
6913 niter
= &ldev
->adj_list
.lower
;
6914 dev_stack
[cur
] = now
;
6915 iter_stack
[cur
++] = iter
;
6922 next
= dev_stack
[--cur
];
6923 niter
= iter_stack
[cur
];
6933 static struct net_device
*netdev_next_lower_dev_rcu(struct net_device
*dev
,
6934 struct list_head
**iter
)
6936 struct netdev_adjacent
*lower
;
6938 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6939 if (&lower
->list
== &dev
->adj_list
.lower
)
6942 *iter
= &lower
->list
;
6947 static u8
__netdev_upper_depth(struct net_device
*dev
)
6949 struct net_device
*udev
;
6950 struct list_head
*iter
;
6954 for (iter
= &dev
->adj_list
.upper
,
6955 udev
= __netdev_next_upper_dev(dev
, &iter
, &ignore
);
6957 udev
= __netdev_next_upper_dev(dev
, &iter
, &ignore
)) {
6960 if (max_depth
< udev
->upper_level
)
6961 max_depth
= udev
->upper_level
;
6967 static u8
__netdev_lower_depth(struct net_device
*dev
)
6969 struct net_device
*ldev
;
6970 struct list_head
*iter
;
6974 for (iter
= &dev
->adj_list
.lower
,
6975 ldev
= __netdev_next_lower_dev(dev
, &iter
, &ignore
);
6977 ldev
= __netdev_next_lower_dev(dev
, &iter
, &ignore
)) {
6980 if (max_depth
< ldev
->lower_level
)
6981 max_depth
= ldev
->lower_level
;
6987 static int __netdev_update_upper_level(struct net_device
*dev
, void *data
)
6989 dev
->upper_level
= __netdev_upper_depth(dev
) + 1;
6993 static int __netdev_update_lower_level(struct net_device
*dev
, void *data
)
6995 dev
->lower_level
= __netdev_lower_depth(dev
) + 1;
6999 int netdev_walk_all_lower_dev_rcu(struct net_device
*dev
,
7000 int (*fn
)(struct net_device
*dev
,
7004 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7005 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7009 iter
= &dev
->adj_list
.lower
;
7013 ret
= fn(now
, data
);
7020 ldev
= netdev_next_lower_dev_rcu(now
, &iter
);
7025 niter
= &ldev
->adj_list
.lower
;
7026 dev_stack
[cur
] = now
;
7027 iter_stack
[cur
++] = iter
;
7034 next
= dev_stack
[--cur
];
7035 niter
= iter_stack
[cur
];
7044 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu
);
7047 * netdev_lower_get_first_private_rcu - Get the first ->private from the
7048 * lower neighbour list, RCU
7052 * Gets the first netdev_adjacent->private from the dev's lower neighbour
7053 * list. The caller must hold RCU read lock.
7055 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
)
7057 struct netdev_adjacent
*lower
;
7059 lower
= list_first_or_null_rcu(&dev
->adj_list
.lower
,
7060 struct netdev_adjacent
, list
);
7062 return lower
->private;
7065 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu
);
7068 * netdev_master_upper_dev_get_rcu - Get master upper device
7071 * Find a master upper device and return pointer to it or NULL in case
7072 * it's not there. The caller must hold the RCU read lock.
7074 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
)
7076 struct netdev_adjacent
*upper
;
7078 upper
= list_first_or_null_rcu(&dev
->adj_list
.upper
,
7079 struct netdev_adjacent
, list
);
7080 if (upper
&& likely(upper
->master
))
7084 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu
);
7086 static int netdev_adjacent_sysfs_add(struct net_device
*dev
,
7087 struct net_device
*adj_dev
,
7088 struct list_head
*dev_list
)
7090 char linkname
[IFNAMSIZ
+7];
7092 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
7093 "upper_%s" : "lower_%s", adj_dev
->name
);
7094 return sysfs_create_link(&(dev
->dev
.kobj
), &(adj_dev
->dev
.kobj
),
7097 static void netdev_adjacent_sysfs_del(struct net_device
*dev
,
7099 struct list_head
*dev_list
)
7101 char linkname
[IFNAMSIZ
+7];
7103 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
7104 "upper_%s" : "lower_%s", name
);
7105 sysfs_remove_link(&(dev
->dev
.kobj
), linkname
);
7108 static inline bool netdev_adjacent_is_neigh_list(struct net_device
*dev
,
7109 struct net_device
*adj_dev
,
7110 struct list_head
*dev_list
)
7112 return (dev_list
== &dev
->adj_list
.upper
||
7113 dev_list
== &dev
->adj_list
.lower
) &&
7114 net_eq(dev_net(dev
), dev_net(adj_dev
));
7117 static int __netdev_adjacent_dev_insert(struct net_device
*dev
,
7118 struct net_device
*adj_dev
,
7119 struct list_head
*dev_list
,
7120 void *private, bool master
)
7122 struct netdev_adjacent
*adj
;
7125 adj
= __netdev_find_adj(adj_dev
, dev_list
);
7129 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7130 dev
->name
, adj_dev
->name
, adj
->ref_nr
);
7135 adj
= kmalloc(sizeof(*adj
), GFP_KERNEL
);
7140 adj
->master
= master
;
7142 adj
->private = private;
7143 adj
->ignore
= false;
7146 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7147 dev
->name
, adj_dev
->name
, adj
->ref_nr
, adj_dev
->name
);
7149 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
)) {
7150 ret
= netdev_adjacent_sysfs_add(dev
, adj_dev
, dev_list
);
7155 /* Ensure that master link is always the first item in list. */
7157 ret
= sysfs_create_link(&(dev
->dev
.kobj
),
7158 &(adj_dev
->dev
.kobj
), "master");
7160 goto remove_symlinks
;
7162 list_add_rcu(&adj
->list
, dev_list
);
7164 list_add_tail_rcu(&adj
->list
, dev_list
);
7170 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
7171 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
7179 static void __netdev_adjacent_dev_remove(struct net_device
*dev
,
7180 struct net_device
*adj_dev
,
7182 struct list_head
*dev_list
)
7184 struct netdev_adjacent
*adj
;
7186 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7187 dev
->name
, adj_dev
->name
, ref_nr
);
7189 adj
= __netdev_find_adj(adj_dev
, dev_list
);
7192 pr_err("Adjacency does not exist for device %s from %s\n",
7193 dev
->name
, adj_dev
->name
);
7198 if (adj
->ref_nr
> ref_nr
) {
7199 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7200 dev
->name
, adj_dev
->name
, ref_nr
,
7201 adj
->ref_nr
- ref_nr
);
7202 adj
->ref_nr
-= ref_nr
;
7207 sysfs_remove_link(&(dev
->dev
.kobj
), "master");
7209 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
7210 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
7212 list_del_rcu(&adj
->list
);
7213 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7214 adj_dev
->name
, dev
->name
, adj_dev
->name
);
7216 kfree_rcu(adj
, rcu
);
7219 static int __netdev_adjacent_dev_link_lists(struct net_device
*dev
,
7220 struct net_device
*upper_dev
,
7221 struct list_head
*up_list
,
7222 struct list_head
*down_list
,
7223 void *private, bool master
)
7227 ret
= __netdev_adjacent_dev_insert(dev
, upper_dev
, up_list
,
7232 ret
= __netdev_adjacent_dev_insert(upper_dev
, dev
, down_list
,
7235 __netdev_adjacent_dev_remove(dev
, upper_dev
, 1, up_list
);
7242 static void __netdev_adjacent_dev_unlink_lists(struct net_device
*dev
,
7243 struct net_device
*upper_dev
,
7245 struct list_head
*up_list
,
7246 struct list_head
*down_list
)
7248 __netdev_adjacent_dev_remove(dev
, upper_dev
, ref_nr
, up_list
);
7249 __netdev_adjacent_dev_remove(upper_dev
, dev
, ref_nr
, down_list
);
7252 static int __netdev_adjacent_dev_link_neighbour(struct net_device
*dev
,
7253 struct net_device
*upper_dev
,
7254 void *private, bool master
)
7256 return __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
7257 &dev
->adj_list
.upper
,
7258 &upper_dev
->adj_list
.lower
,
7262 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device
*dev
,
7263 struct net_device
*upper_dev
)
7265 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
, 1,
7266 &dev
->adj_list
.upper
,
7267 &upper_dev
->adj_list
.lower
);
7270 static int __netdev_upper_dev_link(struct net_device
*dev
,
7271 struct net_device
*upper_dev
, bool master
,
7272 void *upper_priv
, void *upper_info
,
7273 struct netlink_ext_ack
*extack
)
7275 struct netdev_notifier_changeupper_info changeupper_info
= {
7280 .upper_dev
= upper_dev
,
7283 .upper_info
= upper_info
,
7285 struct net_device
*master_dev
;
7290 if (dev
== upper_dev
)
7293 /* To prevent loops, check if dev is not upper device to upper_dev. */
7294 if (__netdev_has_upper_dev(upper_dev
, dev
))
7297 if ((dev
->lower_level
+ upper_dev
->upper_level
) > MAX_NEST_DEV
)
7301 if (__netdev_has_upper_dev(dev
, upper_dev
))
7304 master_dev
= __netdev_master_upper_dev_get(dev
);
7306 return master_dev
== upper_dev
? -EEXIST
: -EBUSY
;
7309 ret
= call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
7310 &changeupper_info
.info
);
7311 ret
= notifier_to_errno(ret
);
7315 ret
= __netdev_adjacent_dev_link_neighbour(dev
, upper_dev
, upper_priv
,
7320 ret
= call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
7321 &changeupper_info
.info
);
7322 ret
= notifier_to_errno(ret
);
7326 __netdev_update_upper_level(dev
, NULL
);
7327 __netdev_walk_all_lower_dev(dev
, __netdev_update_upper_level
, NULL
);
7329 __netdev_update_lower_level(upper_dev
, NULL
);
7330 __netdev_walk_all_upper_dev(upper_dev
, __netdev_update_lower_level
,
7336 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
7342 * netdev_upper_dev_link - Add a link to the upper device
7344 * @upper_dev: new upper device
7345 * @extack: netlink extended ack
7347 * Adds a link to device which is upper to this one. The caller must hold
7348 * the RTNL lock. On a failure a negative errno code is returned.
7349 * On success the reference counts are adjusted and the function
7352 int netdev_upper_dev_link(struct net_device
*dev
,
7353 struct net_device
*upper_dev
,
7354 struct netlink_ext_ack
*extack
)
7356 return __netdev_upper_dev_link(dev
, upper_dev
, false,
7357 NULL
, NULL
, extack
);
7359 EXPORT_SYMBOL(netdev_upper_dev_link
);
7362 * netdev_master_upper_dev_link - Add a master link to the upper device
7364 * @upper_dev: new upper device
7365 * @upper_priv: upper device private
7366 * @upper_info: upper info to be passed down via notifier
7367 * @extack: netlink extended ack
7369 * Adds a link to device which is upper to this one. In this case, only
7370 * one master upper device can be linked, although other non-master devices
7371 * might be linked as well. The caller must hold the RTNL lock.
7372 * On a failure a negative errno code is returned. On success the reference
7373 * counts are adjusted and the function returns zero.
7375 int netdev_master_upper_dev_link(struct net_device
*dev
,
7376 struct net_device
*upper_dev
,
7377 void *upper_priv
, void *upper_info
,
7378 struct netlink_ext_ack
*extack
)
7380 return __netdev_upper_dev_link(dev
, upper_dev
, true,
7381 upper_priv
, upper_info
, extack
);
7383 EXPORT_SYMBOL(netdev_master_upper_dev_link
);
7386 * netdev_upper_dev_unlink - Removes a link to upper device
7388 * @upper_dev: new upper device
7390 * Removes a link to device which is upper to this one. The caller must hold
7393 void netdev_upper_dev_unlink(struct net_device
*dev
,
7394 struct net_device
*upper_dev
)
7396 struct netdev_notifier_changeupper_info changeupper_info
= {
7400 .upper_dev
= upper_dev
,
7406 changeupper_info
.master
= netdev_master_upper_dev_get(dev
) == upper_dev
;
7408 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
7409 &changeupper_info
.info
);
7411 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
7413 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
7414 &changeupper_info
.info
);
7416 __netdev_update_upper_level(dev
, NULL
);
7417 __netdev_walk_all_lower_dev(dev
, __netdev_update_upper_level
, NULL
);
7419 __netdev_update_lower_level(upper_dev
, NULL
);
7420 __netdev_walk_all_upper_dev(upper_dev
, __netdev_update_lower_level
,
7423 EXPORT_SYMBOL(netdev_upper_dev_unlink
);
7425 static void __netdev_adjacent_dev_set(struct net_device
*upper_dev
,
7426 struct net_device
*lower_dev
,
7429 struct netdev_adjacent
*adj
;
7431 adj
= __netdev_find_adj(lower_dev
, &upper_dev
->adj_list
.lower
);
7435 adj
= __netdev_find_adj(upper_dev
, &lower_dev
->adj_list
.upper
);
7440 static void netdev_adjacent_dev_disable(struct net_device
*upper_dev
,
7441 struct net_device
*lower_dev
)
7443 __netdev_adjacent_dev_set(upper_dev
, lower_dev
, true);
7446 static void netdev_adjacent_dev_enable(struct net_device
*upper_dev
,
7447 struct net_device
*lower_dev
)
7449 __netdev_adjacent_dev_set(upper_dev
, lower_dev
, false);
7452 int netdev_adjacent_change_prepare(struct net_device
*old_dev
,
7453 struct net_device
*new_dev
,
7454 struct net_device
*dev
,
7455 struct netlink_ext_ack
*extack
)
7462 if (old_dev
&& new_dev
!= old_dev
)
7463 netdev_adjacent_dev_disable(dev
, old_dev
);
7465 err
= netdev_upper_dev_link(new_dev
, dev
, extack
);
7467 if (old_dev
&& new_dev
!= old_dev
)
7468 netdev_adjacent_dev_enable(dev
, old_dev
);
7474 EXPORT_SYMBOL(netdev_adjacent_change_prepare
);
7476 void netdev_adjacent_change_commit(struct net_device
*old_dev
,
7477 struct net_device
*new_dev
,
7478 struct net_device
*dev
)
7480 if (!new_dev
|| !old_dev
)
7483 if (new_dev
== old_dev
)
7486 netdev_adjacent_dev_enable(dev
, old_dev
);
7487 netdev_upper_dev_unlink(old_dev
, dev
);
7489 EXPORT_SYMBOL(netdev_adjacent_change_commit
);
7491 void netdev_adjacent_change_abort(struct net_device
*old_dev
,
7492 struct net_device
*new_dev
,
7493 struct net_device
*dev
)
7498 if (old_dev
&& new_dev
!= old_dev
)
7499 netdev_adjacent_dev_enable(dev
, old_dev
);
7501 netdev_upper_dev_unlink(new_dev
, dev
);
7503 EXPORT_SYMBOL(netdev_adjacent_change_abort
);
7506 * netdev_bonding_info_change - Dispatch event about slave change
7508 * @bonding_info: info to dispatch
7510 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7511 * The caller must hold the RTNL lock.
7513 void netdev_bonding_info_change(struct net_device
*dev
,
7514 struct netdev_bonding_info
*bonding_info
)
7516 struct netdev_notifier_bonding_info info
= {
7520 memcpy(&info
.bonding_info
, bonding_info
,
7521 sizeof(struct netdev_bonding_info
));
7522 call_netdevice_notifiers_info(NETDEV_BONDING_INFO
,
7525 EXPORT_SYMBOL(netdev_bonding_info_change
);
7527 static void netdev_adjacent_add_links(struct net_device
*dev
)
7529 struct netdev_adjacent
*iter
;
7531 struct net
*net
= dev_net(dev
);
7533 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
7534 if (!net_eq(net
, dev_net(iter
->dev
)))
7536 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
7537 &iter
->dev
->adj_list
.lower
);
7538 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
7539 &dev
->adj_list
.upper
);
7542 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
7543 if (!net_eq(net
, dev_net(iter
->dev
)))
7545 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
7546 &iter
->dev
->adj_list
.upper
);
7547 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
7548 &dev
->adj_list
.lower
);
7552 static void netdev_adjacent_del_links(struct net_device
*dev
)
7554 struct netdev_adjacent
*iter
;
7556 struct net
*net
= dev_net(dev
);
7558 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
7559 if (!net_eq(net
, dev_net(iter
->dev
)))
7561 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
7562 &iter
->dev
->adj_list
.lower
);
7563 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
7564 &dev
->adj_list
.upper
);
7567 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
7568 if (!net_eq(net
, dev_net(iter
->dev
)))
7570 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
7571 &iter
->dev
->adj_list
.upper
);
7572 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
7573 &dev
->adj_list
.lower
);
7577 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
)
7579 struct netdev_adjacent
*iter
;
7581 struct net
*net
= dev_net(dev
);
7583 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
7584 if (!net_eq(net
, dev_net(iter
->dev
)))
7586 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
7587 &iter
->dev
->adj_list
.lower
);
7588 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
7589 &iter
->dev
->adj_list
.lower
);
7592 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
7593 if (!net_eq(net
, dev_net(iter
->dev
)))
7595 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
7596 &iter
->dev
->adj_list
.upper
);
7597 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
7598 &iter
->dev
->adj_list
.upper
);
7602 void *netdev_lower_dev_get_private(struct net_device
*dev
,
7603 struct net_device
*lower_dev
)
7605 struct netdev_adjacent
*lower
;
7609 lower
= __netdev_find_adj(lower_dev
, &dev
->adj_list
.lower
);
7613 return lower
->private;
7615 EXPORT_SYMBOL(netdev_lower_dev_get_private
);
7618 int dev_get_nest_level(struct net_device
*dev
)
7620 struct net_device
*lower
= NULL
;
7621 struct list_head
*iter
;
7627 netdev_for_each_lower_dev(dev
, lower
, iter
) {
7628 nest
= dev_get_nest_level(lower
);
7629 if (max_nest
< nest
)
7633 return max_nest
+ 1;
7635 EXPORT_SYMBOL(dev_get_nest_level
);
7638 * netdev_lower_change - Dispatch event about lower device state change
7639 * @lower_dev: device
7640 * @lower_state_info: state to dispatch
7642 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
7643 * The caller must hold the RTNL lock.
7645 void netdev_lower_state_changed(struct net_device
*lower_dev
,
7646 void *lower_state_info
)
7648 struct netdev_notifier_changelowerstate_info changelowerstate_info
= {
7649 .info
.dev
= lower_dev
,
7653 changelowerstate_info
.lower_state_info
= lower_state_info
;
7654 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE
,
7655 &changelowerstate_info
.info
);
7657 EXPORT_SYMBOL(netdev_lower_state_changed
);
7659 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
7661 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7663 if (ops
->ndo_change_rx_flags
)
7664 ops
->ndo_change_rx_flags(dev
, flags
);
7667 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
, bool notify
)
7669 unsigned int old_flags
= dev
->flags
;
7675 dev
->flags
|= IFF_PROMISC
;
7676 dev
->promiscuity
+= inc
;
7677 if (dev
->promiscuity
== 0) {
7680 * If inc causes overflow, untouch promisc and return error.
7683 dev
->flags
&= ~IFF_PROMISC
;
7685 dev
->promiscuity
-= inc
;
7686 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
7691 if (dev
->flags
!= old_flags
) {
7692 pr_info("device %s %s promiscuous mode\n",
7694 dev
->flags
& IFF_PROMISC
? "entered" : "left");
7695 if (audit_enabled
) {
7696 current_uid_gid(&uid
, &gid
);
7697 audit_log(audit_context(), GFP_ATOMIC
,
7698 AUDIT_ANOM_PROMISCUOUS
,
7699 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
7700 dev
->name
, (dev
->flags
& IFF_PROMISC
),
7701 (old_flags
& IFF_PROMISC
),
7702 from_kuid(&init_user_ns
, audit_get_loginuid(current
)),
7703 from_kuid(&init_user_ns
, uid
),
7704 from_kgid(&init_user_ns
, gid
),
7705 audit_get_sessionid(current
));
7708 dev_change_rx_flags(dev
, IFF_PROMISC
);
7711 __dev_notify_flags(dev
, old_flags
, IFF_PROMISC
);
7716 * dev_set_promiscuity - update promiscuity count on a device
7720 * Add or remove promiscuity from a device. While the count in the device
7721 * remains above zero the interface remains promiscuous. Once it hits zero
7722 * the device reverts back to normal filtering operation. A negative inc
7723 * value is used to drop promiscuity on the device.
7724 * Return 0 if successful or a negative errno code on error.
7726 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
7728 unsigned int old_flags
= dev
->flags
;
7731 err
= __dev_set_promiscuity(dev
, inc
, true);
7734 if (dev
->flags
!= old_flags
)
7735 dev_set_rx_mode(dev
);
7738 EXPORT_SYMBOL(dev_set_promiscuity
);
7740 static int __dev_set_allmulti(struct net_device
*dev
, int inc
, bool notify
)
7742 unsigned int old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
7746 dev
->flags
|= IFF_ALLMULTI
;
7747 dev
->allmulti
+= inc
;
7748 if (dev
->allmulti
== 0) {
7751 * If inc causes overflow, untouch allmulti and return error.
7754 dev
->flags
&= ~IFF_ALLMULTI
;
7756 dev
->allmulti
-= inc
;
7757 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
7762 if (dev
->flags
^ old_flags
) {
7763 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
7764 dev_set_rx_mode(dev
);
7766 __dev_notify_flags(dev
, old_flags
,
7767 dev
->gflags
^ old_gflags
);
7773 * dev_set_allmulti - update allmulti count on a device
7777 * Add or remove reception of all multicast frames to a device. While the
7778 * count in the device remains above zero the interface remains listening
7779 * to all interfaces. Once it hits zero the device reverts back to normal
7780 * filtering operation. A negative @inc value is used to drop the counter
7781 * when releasing a resource needing all multicasts.
7782 * Return 0 if successful or a negative errno code on error.
7785 int dev_set_allmulti(struct net_device
*dev
, int inc
)
7787 return __dev_set_allmulti(dev
, inc
, true);
7789 EXPORT_SYMBOL(dev_set_allmulti
);
7792 * Upload unicast and multicast address lists to device and
7793 * configure RX filtering. When the device doesn't support unicast
7794 * filtering it is put in promiscuous mode while unicast addresses
7797 void __dev_set_rx_mode(struct net_device
*dev
)
7799 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7801 /* dev_open will call this function so the list will stay sane. */
7802 if (!(dev
->flags
&IFF_UP
))
7805 if (!netif_device_present(dev
))
7808 if (!(dev
->priv_flags
& IFF_UNICAST_FLT
)) {
7809 /* Unicast addresses changes may only happen under the rtnl,
7810 * therefore calling __dev_set_promiscuity here is safe.
7812 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
7813 __dev_set_promiscuity(dev
, 1, false);
7814 dev
->uc_promisc
= true;
7815 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
7816 __dev_set_promiscuity(dev
, -1, false);
7817 dev
->uc_promisc
= false;
7821 if (ops
->ndo_set_rx_mode
)
7822 ops
->ndo_set_rx_mode(dev
);
7825 void dev_set_rx_mode(struct net_device
*dev
)
7827 netif_addr_lock_bh(dev
);
7828 __dev_set_rx_mode(dev
);
7829 netif_addr_unlock_bh(dev
);
7833 * dev_get_flags - get flags reported to userspace
7836 * Get the combination of flag bits exported through APIs to userspace.
7838 unsigned int dev_get_flags(const struct net_device
*dev
)
7842 flags
= (dev
->flags
& ~(IFF_PROMISC
|
7847 (dev
->gflags
& (IFF_PROMISC
|
7850 if (netif_running(dev
)) {
7851 if (netif_oper_up(dev
))
7852 flags
|= IFF_RUNNING
;
7853 if (netif_carrier_ok(dev
))
7854 flags
|= IFF_LOWER_UP
;
7855 if (netif_dormant(dev
))
7856 flags
|= IFF_DORMANT
;
7861 EXPORT_SYMBOL(dev_get_flags
);
7863 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
,
7864 struct netlink_ext_ack
*extack
)
7866 unsigned int old_flags
= dev
->flags
;
7872 * Set the flags on our device.
7875 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
7876 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
7878 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
7882 * Load in the correct multicast list now the flags have changed.
7885 if ((old_flags
^ flags
) & IFF_MULTICAST
)
7886 dev_change_rx_flags(dev
, IFF_MULTICAST
);
7888 dev_set_rx_mode(dev
);
7891 * Have we downed the interface. We handle IFF_UP ourselves
7892 * according to user attempts to set it, rather than blindly
7897 if ((old_flags
^ flags
) & IFF_UP
) {
7898 if (old_flags
& IFF_UP
)
7901 ret
= __dev_open(dev
, extack
);
7904 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
7905 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
7906 unsigned int old_flags
= dev
->flags
;
7908 dev
->gflags
^= IFF_PROMISC
;
7910 if (__dev_set_promiscuity(dev
, inc
, false) >= 0)
7911 if (dev
->flags
!= old_flags
)
7912 dev_set_rx_mode(dev
);
7915 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
7916 * is important. Some (broken) drivers set IFF_PROMISC, when
7917 * IFF_ALLMULTI is requested not asking us and not reporting.
7919 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
7920 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
7922 dev
->gflags
^= IFF_ALLMULTI
;
7923 __dev_set_allmulti(dev
, inc
, false);
7929 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
,
7930 unsigned int gchanges
)
7932 unsigned int changes
= dev
->flags
^ old_flags
;
7935 rtmsg_ifinfo(RTM_NEWLINK
, dev
, gchanges
, GFP_ATOMIC
);
7937 if (changes
& IFF_UP
) {
7938 if (dev
->flags
& IFF_UP
)
7939 call_netdevice_notifiers(NETDEV_UP
, dev
);
7941 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
7944 if (dev
->flags
& IFF_UP
&&
7945 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
))) {
7946 struct netdev_notifier_change_info change_info
= {
7950 .flags_changed
= changes
,
7953 call_netdevice_notifiers_info(NETDEV_CHANGE
, &change_info
.info
);
7958 * dev_change_flags - change device settings
7960 * @flags: device state flags
7961 * @extack: netlink extended ack
7963 * Change settings on device based state flags. The flags are
7964 * in the userspace exported format.
7966 int dev_change_flags(struct net_device
*dev
, unsigned int flags
,
7967 struct netlink_ext_ack
*extack
)
7970 unsigned int changes
, old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
7972 ret
= __dev_change_flags(dev
, flags
, extack
);
7976 changes
= (old_flags
^ dev
->flags
) | (old_gflags
^ dev
->gflags
);
7977 __dev_notify_flags(dev
, old_flags
, changes
);
7980 EXPORT_SYMBOL(dev_change_flags
);
7982 int __dev_set_mtu(struct net_device
*dev
, int new_mtu
)
7984 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7986 if (ops
->ndo_change_mtu
)
7987 return ops
->ndo_change_mtu(dev
, new_mtu
);
7992 EXPORT_SYMBOL(__dev_set_mtu
);
7995 * dev_set_mtu_ext - Change maximum transfer unit
7997 * @new_mtu: new transfer unit
7998 * @extack: netlink extended ack
8000 * Change the maximum transfer size of the network device.
8002 int dev_set_mtu_ext(struct net_device
*dev
, int new_mtu
,
8003 struct netlink_ext_ack
*extack
)
8007 if (new_mtu
== dev
->mtu
)
8010 /* MTU must be positive, and in range */
8011 if (new_mtu
< 0 || new_mtu
< dev
->min_mtu
) {
8012 NL_SET_ERR_MSG(extack
, "mtu less than device minimum");
8016 if (dev
->max_mtu
> 0 && new_mtu
> dev
->max_mtu
) {
8017 NL_SET_ERR_MSG(extack
, "mtu greater than device maximum");
8021 if (!netif_device_present(dev
))
8024 err
= call_netdevice_notifiers(NETDEV_PRECHANGEMTU
, dev
);
8025 err
= notifier_to_errno(err
);
8029 orig_mtu
= dev
->mtu
;
8030 err
= __dev_set_mtu(dev
, new_mtu
);
8033 err
= call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
8035 err
= notifier_to_errno(err
);
8037 /* setting mtu back and notifying everyone again,
8038 * so that they have a chance to revert changes.
8040 __dev_set_mtu(dev
, orig_mtu
);
8041 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
8048 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
8050 struct netlink_ext_ack extack
;
8053 memset(&extack
, 0, sizeof(extack
));
8054 err
= dev_set_mtu_ext(dev
, new_mtu
, &extack
);
8055 if (err
&& extack
._msg
)
8056 net_err_ratelimited("%s: %s\n", dev
->name
, extack
._msg
);
8059 EXPORT_SYMBOL(dev_set_mtu
);
8062 * dev_change_tx_queue_len - Change TX queue length of a netdevice
8064 * @new_len: new tx queue length
8066 int dev_change_tx_queue_len(struct net_device
*dev
, unsigned long new_len
)
8068 unsigned int orig_len
= dev
->tx_queue_len
;
8071 if (new_len
!= (unsigned int)new_len
)
8074 if (new_len
!= orig_len
) {
8075 dev
->tx_queue_len
= new_len
;
8076 res
= call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN
, dev
);
8077 res
= notifier_to_errno(res
);
8080 res
= dev_qdisc_change_tx_queue_len(dev
);
8088 netdev_err(dev
, "refused to change device tx_queue_len\n");
8089 dev
->tx_queue_len
= orig_len
;
8094 * dev_set_group - Change group this device belongs to
8096 * @new_group: group this device should belong to
8098 void dev_set_group(struct net_device
*dev
, int new_group
)
8100 dev
->group
= new_group
;
8102 EXPORT_SYMBOL(dev_set_group
);
8105 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8107 * @addr: new address
8108 * @extack: netlink extended ack
8110 int dev_pre_changeaddr_notify(struct net_device
*dev
, const char *addr
,
8111 struct netlink_ext_ack
*extack
)
8113 struct netdev_notifier_pre_changeaddr_info info
= {
8115 .info
.extack
= extack
,
8120 rc
= call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR
, &info
.info
);
8121 return notifier_to_errno(rc
);
8123 EXPORT_SYMBOL(dev_pre_changeaddr_notify
);
8126 * dev_set_mac_address - Change Media Access Control Address
8129 * @extack: netlink extended ack
8131 * Change the hardware (MAC) address of the device
8133 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
,
8134 struct netlink_ext_ack
*extack
)
8136 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8139 if (!ops
->ndo_set_mac_address
)
8141 if (sa
->sa_family
!= dev
->type
)
8143 if (!netif_device_present(dev
))
8145 err
= dev_pre_changeaddr_notify(dev
, sa
->sa_data
, extack
);
8148 err
= ops
->ndo_set_mac_address(dev
, sa
);
8151 dev
->addr_assign_type
= NET_ADDR_SET
;
8152 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
8153 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
8156 EXPORT_SYMBOL(dev_set_mac_address
);
8159 * dev_change_carrier - Change device carrier
8161 * @new_carrier: new value
8163 * Change device carrier
8165 int dev_change_carrier(struct net_device
*dev
, bool new_carrier
)
8167 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8169 if (!ops
->ndo_change_carrier
)
8171 if (!netif_device_present(dev
))
8173 return ops
->ndo_change_carrier(dev
, new_carrier
);
8175 EXPORT_SYMBOL(dev_change_carrier
);
8178 * dev_get_phys_port_id - Get device physical port ID
8182 * Get device physical port ID
8184 int dev_get_phys_port_id(struct net_device
*dev
,
8185 struct netdev_phys_item_id
*ppid
)
8187 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8189 if (!ops
->ndo_get_phys_port_id
)
8191 return ops
->ndo_get_phys_port_id(dev
, ppid
);
8193 EXPORT_SYMBOL(dev_get_phys_port_id
);
8196 * dev_get_phys_port_name - Get device physical port name
8199 * @len: limit of bytes to copy to name
8201 * Get device physical port name
8203 int dev_get_phys_port_name(struct net_device
*dev
,
8204 char *name
, size_t len
)
8206 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8209 if (ops
->ndo_get_phys_port_name
) {
8210 err
= ops
->ndo_get_phys_port_name(dev
, name
, len
);
8211 if (err
!= -EOPNOTSUPP
)
8214 return devlink_compat_phys_port_name_get(dev
, name
, len
);
8216 EXPORT_SYMBOL(dev_get_phys_port_name
);
8219 * dev_get_port_parent_id - Get the device's port parent identifier
8220 * @dev: network device
8221 * @ppid: pointer to a storage for the port's parent identifier
8222 * @recurse: allow/disallow recursion to lower devices
8224 * Get the devices's port parent identifier
8226 int dev_get_port_parent_id(struct net_device
*dev
,
8227 struct netdev_phys_item_id
*ppid
,
8230 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8231 struct netdev_phys_item_id first
= { };
8232 struct net_device
*lower_dev
;
8233 struct list_head
*iter
;
8236 if (ops
->ndo_get_port_parent_id
) {
8237 err
= ops
->ndo_get_port_parent_id(dev
, ppid
);
8238 if (err
!= -EOPNOTSUPP
)
8242 err
= devlink_compat_switch_id_get(dev
, ppid
);
8243 if (!err
|| err
!= -EOPNOTSUPP
)
8249 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
8250 err
= dev_get_port_parent_id(lower_dev
, ppid
, recurse
);
8255 else if (memcmp(&first
, ppid
, sizeof(*ppid
)))
8261 EXPORT_SYMBOL(dev_get_port_parent_id
);
8264 * netdev_port_same_parent_id - Indicate if two network devices have
8265 * the same port parent identifier
8266 * @a: first network device
8267 * @b: second network device
8269 bool netdev_port_same_parent_id(struct net_device
*a
, struct net_device
*b
)
8271 struct netdev_phys_item_id a_id
= { };
8272 struct netdev_phys_item_id b_id
= { };
8274 if (dev_get_port_parent_id(a
, &a_id
, true) ||
8275 dev_get_port_parent_id(b
, &b_id
, true))
8278 return netdev_phys_item_id_same(&a_id
, &b_id
);
8280 EXPORT_SYMBOL(netdev_port_same_parent_id
);
8283 * dev_change_proto_down - update protocol port state information
8285 * @proto_down: new value
8287 * This info can be used by switch drivers to set the phys state of the
8290 int dev_change_proto_down(struct net_device
*dev
, bool proto_down
)
8292 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8294 if (!ops
->ndo_change_proto_down
)
8296 if (!netif_device_present(dev
))
8298 return ops
->ndo_change_proto_down(dev
, proto_down
);
8300 EXPORT_SYMBOL(dev_change_proto_down
);
8303 * dev_change_proto_down_generic - generic implementation for
8304 * ndo_change_proto_down that sets carrier according to
8308 * @proto_down: new value
8310 int dev_change_proto_down_generic(struct net_device
*dev
, bool proto_down
)
8313 netif_carrier_off(dev
);
8315 netif_carrier_on(dev
);
8316 dev
->proto_down
= proto_down
;
8319 EXPORT_SYMBOL(dev_change_proto_down_generic
);
8321 u32
__dev_xdp_query(struct net_device
*dev
, bpf_op_t bpf_op
,
8322 enum bpf_netdev_command cmd
)
8324 struct netdev_bpf xdp
;
8329 memset(&xdp
, 0, sizeof(xdp
));
8332 /* Query must always succeed. */
8333 WARN_ON(bpf_op(dev
, &xdp
) < 0 && cmd
== XDP_QUERY_PROG
);
8338 static int dev_xdp_install(struct net_device
*dev
, bpf_op_t bpf_op
,
8339 struct netlink_ext_ack
*extack
, u32 flags
,
8340 struct bpf_prog
*prog
)
8342 struct netdev_bpf xdp
;
8344 memset(&xdp
, 0, sizeof(xdp
));
8345 if (flags
& XDP_FLAGS_HW_MODE
)
8346 xdp
.command
= XDP_SETUP_PROG_HW
;
8348 xdp
.command
= XDP_SETUP_PROG
;
8349 xdp
.extack
= extack
;
8353 return bpf_op(dev
, &xdp
);
8356 static void dev_xdp_uninstall(struct net_device
*dev
)
8358 struct netdev_bpf xdp
;
8361 /* Remove generic XDP */
8362 WARN_ON(dev_xdp_install(dev
, generic_xdp_install
, NULL
, 0, NULL
));
8364 /* Remove from the driver */
8365 ndo_bpf
= dev
->netdev_ops
->ndo_bpf
;
8369 memset(&xdp
, 0, sizeof(xdp
));
8370 xdp
.command
= XDP_QUERY_PROG
;
8371 WARN_ON(ndo_bpf(dev
, &xdp
));
8373 WARN_ON(dev_xdp_install(dev
, ndo_bpf
, NULL
, xdp
.prog_flags
,
8376 /* Remove HW offload */
8377 memset(&xdp
, 0, sizeof(xdp
));
8378 xdp
.command
= XDP_QUERY_PROG_HW
;
8379 if (!ndo_bpf(dev
, &xdp
) && xdp
.prog_id
)
8380 WARN_ON(dev_xdp_install(dev
, ndo_bpf
, NULL
, xdp
.prog_flags
,
8385 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
8387 * @extack: netlink extended ack
8388 * @fd: new program fd or negative value to clear
8389 * @flags: xdp-related flags
8391 * Set or clear a bpf program for a device
8393 int dev_change_xdp_fd(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
8396 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8397 enum bpf_netdev_command query
;
8398 struct bpf_prog
*prog
= NULL
;
8399 bpf_op_t bpf_op
, bpf_chk
;
8405 offload
= flags
& XDP_FLAGS_HW_MODE
;
8406 query
= offload
? XDP_QUERY_PROG_HW
: XDP_QUERY_PROG
;
8408 bpf_op
= bpf_chk
= ops
->ndo_bpf
;
8409 if (!bpf_op
&& (flags
& (XDP_FLAGS_DRV_MODE
| XDP_FLAGS_HW_MODE
))) {
8410 NL_SET_ERR_MSG(extack
, "underlying driver does not support XDP in native mode");
8413 if (!bpf_op
|| (flags
& XDP_FLAGS_SKB_MODE
))
8414 bpf_op
= generic_xdp_install
;
8415 if (bpf_op
== bpf_chk
)
8416 bpf_chk
= generic_xdp_install
;
8421 if (!offload
&& __dev_xdp_query(dev
, bpf_chk
, XDP_QUERY_PROG
)) {
8422 NL_SET_ERR_MSG(extack
, "native and generic XDP can't be active at the same time");
8426 prog_id
= __dev_xdp_query(dev
, bpf_op
, query
);
8427 if ((flags
& XDP_FLAGS_UPDATE_IF_NOEXIST
) && prog_id
) {
8428 NL_SET_ERR_MSG(extack
, "XDP program already attached");
8432 prog
= bpf_prog_get_type_dev(fd
, BPF_PROG_TYPE_XDP
,
8433 bpf_op
== ops
->ndo_bpf
);
8435 return PTR_ERR(prog
);
8437 if (!offload
&& bpf_prog_is_dev_bound(prog
->aux
)) {
8438 NL_SET_ERR_MSG(extack
, "using device-bound program without HW_MODE flag is not supported");
8443 if (prog
->aux
->id
== prog_id
) {
8448 if (!__dev_xdp_query(dev
, bpf_op
, query
))
8452 err
= dev_xdp_install(dev
, bpf_op
, extack
, flags
, prog
);
8453 if (err
< 0 && prog
)
8460 * dev_new_index - allocate an ifindex
8461 * @net: the applicable net namespace
8463 * Returns a suitable unique value for a new device interface
8464 * number. The caller must hold the rtnl semaphore or the
8465 * dev_base_lock to be sure it remains unique.
8467 static int dev_new_index(struct net
*net
)
8469 int ifindex
= net
->ifindex
;
8474 if (!__dev_get_by_index(net
, ifindex
))
8475 return net
->ifindex
= ifindex
;
8479 /* Delayed registration/unregisteration */
8480 static LIST_HEAD(net_todo_list
);
8481 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq
);
8483 static void net_set_todo(struct net_device
*dev
)
8485 list_add_tail(&dev
->todo_list
, &net_todo_list
);
8486 dev_net(dev
)->dev_unreg_count
++;
8489 static void rollback_registered_many(struct list_head
*head
)
8491 struct net_device
*dev
, *tmp
;
8492 LIST_HEAD(close_head
);
8494 BUG_ON(dev_boot_phase
);
8497 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
8498 /* Some devices call without registering
8499 * for initialization unwind. Remove those
8500 * devices and proceed with the remaining.
8502 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
8503 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
8507 list_del(&dev
->unreg_list
);
8510 dev
->dismantle
= true;
8511 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
8514 /* If device is running, close it first. */
8515 list_for_each_entry(dev
, head
, unreg_list
)
8516 list_add_tail(&dev
->close_list
, &close_head
);
8517 dev_close_many(&close_head
, true);
8519 list_for_each_entry(dev
, head
, unreg_list
) {
8520 /* And unlink it from device chain. */
8521 unlist_netdevice(dev
);
8523 dev
->reg_state
= NETREG_UNREGISTERING
;
8525 flush_all_backlogs();
8529 list_for_each_entry(dev
, head
, unreg_list
) {
8530 struct sk_buff
*skb
= NULL
;
8532 /* Shutdown queueing discipline. */
8535 dev_xdp_uninstall(dev
);
8537 /* Notify protocols, that we are about to destroy
8538 * this device. They should clean all the things.
8540 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
8542 if (!dev
->rtnl_link_ops
||
8543 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
8544 skb
= rtmsg_ifinfo_build_skb(RTM_DELLINK
, dev
, ~0U, 0,
8545 GFP_KERNEL
, NULL
, 0);
8548 * Flush the unicast and multicast chains
8553 if (dev
->netdev_ops
->ndo_uninit
)
8554 dev
->netdev_ops
->ndo_uninit(dev
);
8557 rtmsg_ifinfo_send(skb
, dev
, GFP_KERNEL
);
8559 /* Notifier chain MUST detach us all upper devices. */
8560 WARN_ON(netdev_has_any_upper_dev(dev
));
8561 WARN_ON(netdev_has_any_lower_dev(dev
));
8563 /* Remove entries from kobject tree */
8564 netdev_unregister_kobject(dev
);
8566 /* Remove XPS queueing entries */
8567 netif_reset_xps_queues_gt(dev
, 0);
8573 list_for_each_entry(dev
, head
, unreg_list
)
8577 static void rollback_registered(struct net_device
*dev
)
8581 list_add(&dev
->unreg_list
, &single
);
8582 rollback_registered_many(&single
);
8586 static netdev_features_t
netdev_sync_upper_features(struct net_device
*lower
,
8587 struct net_device
*upper
, netdev_features_t features
)
8589 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
8590 netdev_features_t feature
;
8593 for_each_netdev_feature(upper_disables
, feature_bit
) {
8594 feature
= __NETIF_F_BIT(feature_bit
);
8595 if (!(upper
->wanted_features
& feature
)
8596 && (features
& feature
)) {
8597 netdev_dbg(lower
, "Dropping feature %pNF, upper dev %s has it off.\n",
8598 &feature
, upper
->name
);
8599 features
&= ~feature
;
8606 static void netdev_sync_lower_features(struct net_device
*upper
,
8607 struct net_device
*lower
, netdev_features_t features
)
8609 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
8610 netdev_features_t feature
;
8613 for_each_netdev_feature(upper_disables
, feature_bit
) {
8614 feature
= __NETIF_F_BIT(feature_bit
);
8615 if (!(features
& feature
) && (lower
->features
& feature
)) {
8616 netdev_dbg(upper
, "Disabling feature %pNF on lower dev %s.\n",
8617 &feature
, lower
->name
);
8618 lower
->wanted_features
&= ~feature
;
8619 netdev_update_features(lower
);
8621 if (unlikely(lower
->features
& feature
))
8622 netdev_WARN(upper
, "failed to disable %pNF on %s!\n",
8623 &feature
, lower
->name
);
8628 static netdev_features_t
netdev_fix_features(struct net_device
*dev
,
8629 netdev_features_t features
)
8631 /* Fix illegal checksum combinations */
8632 if ((features
& NETIF_F_HW_CSUM
) &&
8633 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
8634 netdev_warn(dev
, "mixed HW and IP checksum settings.\n");
8635 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
8638 /* TSO requires that SG is present as well. */
8639 if ((features
& NETIF_F_ALL_TSO
) && !(features
& NETIF_F_SG
)) {
8640 netdev_dbg(dev
, "Dropping TSO features since no SG feature.\n");
8641 features
&= ~NETIF_F_ALL_TSO
;
8644 if ((features
& NETIF_F_TSO
) && !(features
& NETIF_F_HW_CSUM
) &&
8645 !(features
& NETIF_F_IP_CSUM
)) {
8646 netdev_dbg(dev
, "Dropping TSO features since no CSUM feature.\n");
8647 features
&= ~NETIF_F_TSO
;
8648 features
&= ~NETIF_F_TSO_ECN
;
8651 if ((features
& NETIF_F_TSO6
) && !(features
& NETIF_F_HW_CSUM
) &&
8652 !(features
& NETIF_F_IPV6_CSUM
)) {
8653 netdev_dbg(dev
, "Dropping TSO6 features since no CSUM feature.\n");
8654 features
&= ~NETIF_F_TSO6
;
8657 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
8658 if ((features
& NETIF_F_TSO_MANGLEID
) && !(features
& NETIF_F_TSO
))
8659 features
&= ~NETIF_F_TSO_MANGLEID
;
8661 /* TSO ECN requires that TSO is present as well. */
8662 if ((features
& NETIF_F_ALL_TSO
) == NETIF_F_TSO_ECN
)
8663 features
&= ~NETIF_F_TSO_ECN
;
8665 /* Software GSO depends on SG. */
8666 if ((features
& NETIF_F_GSO
) && !(features
& NETIF_F_SG
)) {
8667 netdev_dbg(dev
, "Dropping NETIF_F_GSO since no SG feature.\n");
8668 features
&= ~NETIF_F_GSO
;
8671 /* GSO partial features require GSO partial be set */
8672 if ((features
& dev
->gso_partial_features
) &&
8673 !(features
& NETIF_F_GSO_PARTIAL
)) {
8675 "Dropping partially supported GSO features since no GSO partial.\n");
8676 features
&= ~dev
->gso_partial_features
;
8679 if (!(features
& NETIF_F_RXCSUM
)) {
8680 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
8681 * successfully merged by hardware must also have the
8682 * checksum verified by hardware. If the user does not
8683 * want to enable RXCSUM, logically, we should disable GRO_HW.
8685 if (features
& NETIF_F_GRO_HW
) {
8686 netdev_dbg(dev
, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
8687 features
&= ~NETIF_F_GRO_HW
;
8691 /* LRO/HW-GRO features cannot be combined with RX-FCS */
8692 if (features
& NETIF_F_RXFCS
) {
8693 if (features
& NETIF_F_LRO
) {
8694 netdev_dbg(dev
, "Dropping LRO feature since RX-FCS is requested.\n");
8695 features
&= ~NETIF_F_LRO
;
8698 if (features
& NETIF_F_GRO_HW
) {
8699 netdev_dbg(dev
, "Dropping HW-GRO feature since RX-FCS is requested.\n");
8700 features
&= ~NETIF_F_GRO_HW
;
8707 int __netdev_update_features(struct net_device
*dev
)
8709 struct net_device
*upper
, *lower
;
8710 netdev_features_t features
;
8711 struct list_head
*iter
;
8716 features
= netdev_get_wanted_features(dev
);
8718 if (dev
->netdev_ops
->ndo_fix_features
)
8719 features
= dev
->netdev_ops
->ndo_fix_features(dev
, features
);
8721 /* driver might be less strict about feature dependencies */
8722 features
= netdev_fix_features(dev
, features
);
8724 /* some features can't be enabled if they're off an an upper device */
8725 netdev_for_each_upper_dev_rcu(dev
, upper
, iter
)
8726 features
= netdev_sync_upper_features(dev
, upper
, features
);
8728 if (dev
->features
== features
)
8731 netdev_dbg(dev
, "Features changed: %pNF -> %pNF\n",
8732 &dev
->features
, &features
);
8734 if (dev
->netdev_ops
->ndo_set_features
)
8735 err
= dev
->netdev_ops
->ndo_set_features(dev
, features
);
8739 if (unlikely(err
< 0)) {
8741 "set_features() failed (%d); wanted %pNF, left %pNF\n",
8742 err
, &features
, &dev
->features
);
8743 /* return non-0 since some features might have changed and
8744 * it's better to fire a spurious notification than miss it
8750 /* some features must be disabled on lower devices when disabled
8751 * on an upper device (think: bonding master or bridge)
8753 netdev_for_each_lower_dev(dev
, lower
, iter
)
8754 netdev_sync_lower_features(dev
, lower
, features
);
8757 netdev_features_t diff
= features
^ dev
->features
;
8759 if (diff
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
8760 /* udp_tunnel_{get,drop}_rx_info both need
8761 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
8762 * device, or they won't do anything.
8763 * Thus we need to update dev->features
8764 * *before* calling udp_tunnel_get_rx_info,
8765 * but *after* calling udp_tunnel_drop_rx_info.
8767 if (features
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
8768 dev
->features
= features
;
8769 udp_tunnel_get_rx_info(dev
);
8771 udp_tunnel_drop_rx_info(dev
);
8775 if (diff
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
8776 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
8777 dev
->features
= features
;
8778 err
|= vlan_get_rx_ctag_filter_info(dev
);
8780 vlan_drop_rx_ctag_filter_info(dev
);
8784 if (diff
& NETIF_F_HW_VLAN_STAG_FILTER
) {
8785 if (features
& NETIF_F_HW_VLAN_STAG_FILTER
) {
8786 dev
->features
= features
;
8787 err
|= vlan_get_rx_stag_filter_info(dev
);
8789 vlan_drop_rx_stag_filter_info(dev
);
8793 dev
->features
= features
;
8796 return err
< 0 ? 0 : 1;
8800 * netdev_update_features - recalculate device features
8801 * @dev: the device to check
8803 * Recalculate dev->features set and send notifications if it
8804 * has changed. Should be called after driver or hardware dependent
8805 * conditions might have changed that influence the features.
8807 void netdev_update_features(struct net_device
*dev
)
8809 if (__netdev_update_features(dev
))
8810 netdev_features_change(dev
);
8812 EXPORT_SYMBOL(netdev_update_features
);
8815 * netdev_change_features - recalculate device features
8816 * @dev: the device to check
8818 * Recalculate dev->features set and send notifications even
8819 * if they have not changed. Should be called instead of
8820 * netdev_update_features() if also dev->vlan_features might
8821 * have changed to allow the changes to be propagated to stacked
8824 void netdev_change_features(struct net_device
*dev
)
8826 __netdev_update_features(dev
);
8827 netdev_features_change(dev
);
8829 EXPORT_SYMBOL(netdev_change_features
);
8832 * netif_stacked_transfer_operstate - transfer operstate
8833 * @rootdev: the root or lower level device to transfer state from
8834 * @dev: the device to transfer operstate to
8836 * Transfer operational state from root to device. This is normally
8837 * called when a stacking relationship exists between the root
8838 * device and the device(a leaf device).
8840 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
8841 struct net_device
*dev
)
8843 if (rootdev
->operstate
== IF_OPER_DORMANT
)
8844 netif_dormant_on(dev
);
8846 netif_dormant_off(dev
);
8848 if (netif_carrier_ok(rootdev
))
8849 netif_carrier_on(dev
);
8851 netif_carrier_off(dev
);
8853 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
8855 static int netif_alloc_rx_queues(struct net_device
*dev
)
8857 unsigned int i
, count
= dev
->num_rx_queues
;
8858 struct netdev_rx_queue
*rx
;
8859 size_t sz
= count
* sizeof(*rx
);
8864 rx
= kvzalloc(sz
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
8870 for (i
= 0; i
< count
; i
++) {
8873 /* XDP RX-queue setup */
8874 err
= xdp_rxq_info_reg(&rx
[i
].xdp_rxq
, dev
, i
);
8881 /* Rollback successful reg's and free other resources */
8883 xdp_rxq_info_unreg(&rx
[i
].xdp_rxq
);
8889 static void netif_free_rx_queues(struct net_device
*dev
)
8891 unsigned int i
, count
= dev
->num_rx_queues
;
8893 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
8897 for (i
= 0; i
< count
; i
++)
8898 xdp_rxq_info_unreg(&dev
->_rx
[i
].xdp_rxq
);
8903 static void netdev_init_one_queue(struct net_device
*dev
,
8904 struct netdev_queue
*queue
, void *_unused
)
8906 /* Initialize queue lock */
8907 spin_lock_init(&queue
->_xmit_lock
);
8908 lockdep_set_class(&queue
->_xmit_lock
, &dev
->qdisc_xmit_lock_key
);
8909 queue
->xmit_lock_owner
= -1;
8910 netdev_queue_numa_node_write(queue
, NUMA_NO_NODE
);
8913 dql_init(&queue
->dql
, HZ
);
8917 static void netif_free_tx_queues(struct net_device
*dev
)
8922 static int netif_alloc_netdev_queues(struct net_device
*dev
)
8924 unsigned int count
= dev
->num_tx_queues
;
8925 struct netdev_queue
*tx
;
8926 size_t sz
= count
* sizeof(*tx
);
8928 if (count
< 1 || count
> 0xffff)
8931 tx
= kvzalloc(sz
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
8937 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
8938 spin_lock_init(&dev
->tx_global_lock
);
8943 void netif_tx_stop_all_queues(struct net_device
*dev
)
8947 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
8948 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
8950 netif_tx_stop_queue(txq
);
8953 EXPORT_SYMBOL(netif_tx_stop_all_queues
);
8955 static void netdev_register_lockdep_key(struct net_device
*dev
)
8957 lockdep_register_key(&dev
->qdisc_tx_busylock_key
);
8958 lockdep_register_key(&dev
->qdisc_running_key
);
8959 lockdep_register_key(&dev
->qdisc_xmit_lock_key
);
8960 lockdep_register_key(&dev
->addr_list_lock_key
);
8963 static void netdev_unregister_lockdep_key(struct net_device
*dev
)
8965 lockdep_unregister_key(&dev
->qdisc_tx_busylock_key
);
8966 lockdep_unregister_key(&dev
->qdisc_running_key
);
8967 lockdep_unregister_key(&dev
->qdisc_xmit_lock_key
);
8968 lockdep_unregister_key(&dev
->addr_list_lock_key
);
8971 void netdev_update_lockdep_key(struct net_device
*dev
)
8973 struct netdev_queue
*queue
;
8976 lockdep_unregister_key(&dev
->qdisc_xmit_lock_key
);
8977 lockdep_unregister_key(&dev
->addr_list_lock_key
);
8979 lockdep_register_key(&dev
->qdisc_xmit_lock_key
);
8980 lockdep_register_key(&dev
->addr_list_lock_key
);
8982 lockdep_set_class(&dev
->addr_list_lock
, &dev
->addr_list_lock_key
);
8983 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
8984 queue
= netdev_get_tx_queue(dev
, i
);
8986 lockdep_set_class(&queue
->_xmit_lock
,
8987 &dev
->qdisc_xmit_lock_key
);
8990 EXPORT_SYMBOL(netdev_update_lockdep_key
);
8993 * register_netdevice - register a network device
8994 * @dev: device to register
8996 * Take a completed network device structure and add it to the kernel
8997 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
8998 * chain. 0 is returned on success. A negative errno code is returned
8999 * on a failure to set up the device, or if the name is a duplicate.
9001 * Callers must hold the rtnl semaphore. You may want
9002 * register_netdev() instead of this.
9005 * The locking appears insufficient to guarantee two parallel registers
9006 * will not get the same name.
9009 int register_netdevice(struct net_device
*dev
)
9012 struct net
*net
= dev_net(dev
);
9014 BUILD_BUG_ON(sizeof(netdev_features_t
) * BITS_PER_BYTE
<
9015 NETDEV_FEATURE_COUNT
);
9016 BUG_ON(dev_boot_phase
);
9021 /* When net_device's are persistent, this will be fatal. */
9022 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
9025 spin_lock_init(&dev
->addr_list_lock
);
9026 lockdep_set_class(&dev
->addr_list_lock
, &dev
->addr_list_lock_key
);
9028 ret
= dev_get_valid_name(net
, dev
, dev
->name
);
9032 /* Init, if this function is available */
9033 if (dev
->netdev_ops
->ndo_init
) {
9034 ret
= dev
->netdev_ops
->ndo_init(dev
);
9042 if (((dev
->hw_features
| dev
->features
) &
9043 NETIF_F_HW_VLAN_CTAG_FILTER
) &&
9044 (!dev
->netdev_ops
->ndo_vlan_rx_add_vid
||
9045 !dev
->netdev_ops
->ndo_vlan_rx_kill_vid
)) {
9046 netdev_WARN(dev
, "Buggy VLAN acceleration in driver!\n");
9053 dev
->ifindex
= dev_new_index(net
);
9054 else if (__dev_get_by_index(net
, dev
->ifindex
))
9057 /* Transfer changeable features to wanted_features and enable
9058 * software offloads (GSO and GRO).
9060 dev
->hw_features
|= NETIF_F_SOFT_FEATURES
;
9061 dev
->features
|= NETIF_F_SOFT_FEATURES
;
9063 if (dev
->netdev_ops
->ndo_udp_tunnel_add
) {
9064 dev
->features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
9065 dev
->hw_features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
9068 dev
->wanted_features
= dev
->features
& dev
->hw_features
;
9070 if (!(dev
->flags
& IFF_LOOPBACK
))
9071 dev
->hw_features
|= NETIF_F_NOCACHE_COPY
;
9073 /* If IPv4 TCP segmentation offload is supported we should also
9074 * allow the device to enable segmenting the frame with the option
9075 * of ignoring a static IP ID value. This doesn't enable the
9076 * feature itself but allows the user to enable it later.
9078 if (dev
->hw_features
& NETIF_F_TSO
)
9079 dev
->hw_features
|= NETIF_F_TSO_MANGLEID
;
9080 if (dev
->vlan_features
& NETIF_F_TSO
)
9081 dev
->vlan_features
|= NETIF_F_TSO_MANGLEID
;
9082 if (dev
->mpls_features
& NETIF_F_TSO
)
9083 dev
->mpls_features
|= NETIF_F_TSO_MANGLEID
;
9084 if (dev
->hw_enc_features
& NETIF_F_TSO
)
9085 dev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
9087 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
9089 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
9091 /* Make NETIF_F_SG inheritable to tunnel devices.
9093 dev
->hw_enc_features
|= NETIF_F_SG
| NETIF_F_GSO_PARTIAL
;
9095 /* Make NETIF_F_SG inheritable to MPLS.
9097 dev
->mpls_features
|= NETIF_F_SG
;
9099 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
9100 ret
= notifier_to_errno(ret
);
9104 ret
= netdev_register_kobject(dev
);
9107 dev
->reg_state
= NETREG_REGISTERED
;
9109 __netdev_update_features(dev
);
9112 * Default initial state at registry is that the
9113 * device is present.
9116 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
9118 linkwatch_init_dev(dev
);
9120 dev_init_scheduler(dev
);
9122 list_netdevice(dev
);
9123 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
9125 /* If the device has permanent device address, driver should
9126 * set dev_addr and also addr_assign_type should be set to
9127 * NET_ADDR_PERM (default value).
9129 if (dev
->addr_assign_type
== NET_ADDR_PERM
)
9130 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
9132 /* Notify protocols, that a new device appeared. */
9133 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
9134 ret
= notifier_to_errno(ret
);
9136 rollback_registered(dev
);
9139 dev
->reg_state
= NETREG_UNREGISTERED
;
9142 * Prevent userspace races by waiting until the network
9143 * device is fully setup before sending notifications.
9145 if (!dev
->rtnl_link_ops
||
9146 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
9147 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
9153 if (dev
->netdev_ops
->ndo_uninit
)
9154 dev
->netdev_ops
->ndo_uninit(dev
);
9155 if (dev
->priv_destructor
)
9156 dev
->priv_destructor(dev
);
9159 EXPORT_SYMBOL(register_netdevice
);
9162 * init_dummy_netdev - init a dummy network device for NAPI
9163 * @dev: device to init
9165 * This takes a network device structure and initialize the minimum
9166 * amount of fields so it can be used to schedule NAPI polls without
9167 * registering a full blown interface. This is to be used by drivers
9168 * that need to tie several hardware interfaces to a single NAPI
9169 * poll scheduler due to HW limitations.
9171 int init_dummy_netdev(struct net_device
*dev
)
9173 /* Clear everything. Note we don't initialize spinlocks
9174 * are they aren't supposed to be taken by any of the
9175 * NAPI code and this dummy netdev is supposed to be
9176 * only ever used for NAPI polls
9178 memset(dev
, 0, sizeof(struct net_device
));
9180 /* make sure we BUG if trying to hit standard
9181 * register/unregister code path
9183 dev
->reg_state
= NETREG_DUMMY
;
9185 /* NAPI wants this */
9186 INIT_LIST_HEAD(&dev
->napi_list
);
9188 /* a dummy interface is started by default */
9189 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
9190 set_bit(__LINK_STATE_START
, &dev
->state
);
9192 /* napi_busy_loop stats accounting wants this */
9193 dev_net_set(dev
, &init_net
);
9195 /* Note : We dont allocate pcpu_refcnt for dummy devices,
9196 * because users of this 'device' dont need to change
9202 EXPORT_SYMBOL_GPL(init_dummy_netdev
);
9206 * register_netdev - register a network device
9207 * @dev: device to register
9209 * Take a completed network device structure and add it to the kernel
9210 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
9211 * chain. 0 is returned on success. A negative errno code is returned
9212 * on a failure to set up the device, or if the name is a duplicate.
9214 * This is a wrapper around register_netdevice that takes the rtnl semaphore
9215 * and expands the device name if you passed a format string to
9218 int register_netdev(struct net_device
*dev
)
9222 if (rtnl_lock_killable())
9224 err
= register_netdevice(dev
);
9228 EXPORT_SYMBOL(register_netdev
);
9230 int netdev_refcnt_read(const struct net_device
*dev
)
9234 for_each_possible_cpu(i
)
9235 refcnt
+= *per_cpu_ptr(dev
->pcpu_refcnt
, i
);
9238 EXPORT_SYMBOL(netdev_refcnt_read
);
9241 * netdev_wait_allrefs - wait until all references are gone.
9242 * @dev: target net_device
9244 * This is called when unregistering network devices.
9246 * Any protocol or device that holds a reference should register
9247 * for netdevice notification, and cleanup and put back the
9248 * reference if they receive an UNREGISTER event.
9249 * We can get stuck here if buggy protocols don't correctly
9252 static void netdev_wait_allrefs(struct net_device
*dev
)
9254 unsigned long rebroadcast_time
, warning_time
;
9257 linkwatch_forget_dev(dev
);
9259 rebroadcast_time
= warning_time
= jiffies
;
9260 refcnt
= netdev_refcnt_read(dev
);
9262 while (refcnt
!= 0) {
9263 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
9266 /* Rebroadcast unregister notification */
9267 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
9273 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
9275 /* We must not have linkwatch events
9276 * pending on unregister. If this
9277 * happens, we simply run the queue
9278 * unscheduled, resulting in a noop
9281 linkwatch_run_queue();
9286 rebroadcast_time
= jiffies
;
9291 refcnt
= netdev_refcnt_read(dev
);
9293 if (refcnt
&& time_after(jiffies
, warning_time
+ 10 * HZ
)) {
9294 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
9296 warning_time
= jiffies
;
9305 * register_netdevice(x1);
9306 * register_netdevice(x2);
9308 * unregister_netdevice(y1);
9309 * unregister_netdevice(y2);
9315 * We are invoked by rtnl_unlock().
9316 * This allows us to deal with problems:
9317 * 1) We can delete sysfs objects which invoke hotplug
9318 * without deadlocking with linkwatch via keventd.
9319 * 2) Since we run with the RTNL semaphore not held, we can sleep
9320 * safely in order to wait for the netdev refcnt to drop to zero.
9322 * We must not return until all unregister events added during
9323 * the interval the lock was held have been completed.
9325 void netdev_run_todo(void)
9327 struct list_head list
;
9329 /* Snapshot list, allow later requests */
9330 list_replace_init(&net_todo_list
, &list
);
9335 /* Wait for rcu callbacks to finish before next phase */
9336 if (!list_empty(&list
))
9339 while (!list_empty(&list
)) {
9340 struct net_device
*dev
9341 = list_first_entry(&list
, struct net_device
, todo_list
);
9342 list_del(&dev
->todo_list
);
9344 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
9345 pr_err("network todo '%s' but state %d\n",
9346 dev
->name
, dev
->reg_state
);
9351 dev
->reg_state
= NETREG_UNREGISTERED
;
9353 netdev_wait_allrefs(dev
);
9356 BUG_ON(netdev_refcnt_read(dev
));
9357 BUG_ON(!list_empty(&dev
->ptype_all
));
9358 BUG_ON(!list_empty(&dev
->ptype_specific
));
9359 WARN_ON(rcu_access_pointer(dev
->ip_ptr
));
9360 WARN_ON(rcu_access_pointer(dev
->ip6_ptr
));
9361 #if IS_ENABLED(CONFIG_DECNET)
9362 WARN_ON(dev
->dn_ptr
);
9364 if (dev
->priv_destructor
)
9365 dev
->priv_destructor(dev
);
9366 if (dev
->needs_free_netdev
)
9369 /* Report a network device has been unregistered */
9371 dev_net(dev
)->dev_unreg_count
--;
9373 wake_up(&netdev_unregistering_wq
);
9375 /* Free network device */
9376 kobject_put(&dev
->dev
.kobj
);
9380 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
9381 * all the same fields in the same order as net_device_stats, with only
9382 * the type differing, but rtnl_link_stats64 may have additional fields
9383 * at the end for newer counters.
9385 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
9386 const struct net_device_stats
*netdev_stats
)
9388 #if BITS_PER_LONG == 64
9389 BUILD_BUG_ON(sizeof(*stats64
) < sizeof(*netdev_stats
));
9390 memcpy(stats64
, netdev_stats
, sizeof(*netdev_stats
));
9391 /* zero out counters that only exist in rtnl_link_stats64 */
9392 memset((char *)stats64
+ sizeof(*netdev_stats
), 0,
9393 sizeof(*stats64
) - sizeof(*netdev_stats
));
9395 size_t i
, n
= sizeof(*netdev_stats
) / sizeof(unsigned long);
9396 const unsigned long *src
= (const unsigned long *)netdev_stats
;
9397 u64
*dst
= (u64
*)stats64
;
9399 BUILD_BUG_ON(n
> sizeof(*stats64
) / sizeof(u64
));
9400 for (i
= 0; i
< n
; i
++)
9402 /* zero out counters that only exist in rtnl_link_stats64 */
9403 memset((char *)stats64
+ n
* sizeof(u64
), 0,
9404 sizeof(*stats64
) - n
* sizeof(u64
));
9407 EXPORT_SYMBOL(netdev_stats_to_stats64
);
9410 * dev_get_stats - get network device statistics
9411 * @dev: device to get statistics from
9412 * @storage: place to store stats
9414 * Get network statistics from device. Return @storage.
9415 * The device driver may provide its own method by setting
9416 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
9417 * otherwise the internal statistics structure is used.
9419 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
9420 struct rtnl_link_stats64
*storage
)
9422 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9424 if (ops
->ndo_get_stats64
) {
9425 memset(storage
, 0, sizeof(*storage
));
9426 ops
->ndo_get_stats64(dev
, storage
);
9427 } else if (ops
->ndo_get_stats
) {
9428 netdev_stats_to_stats64(storage
, ops
->ndo_get_stats(dev
));
9430 netdev_stats_to_stats64(storage
, &dev
->stats
);
9432 storage
->rx_dropped
+= (unsigned long)atomic_long_read(&dev
->rx_dropped
);
9433 storage
->tx_dropped
+= (unsigned long)atomic_long_read(&dev
->tx_dropped
);
9434 storage
->rx_nohandler
+= (unsigned long)atomic_long_read(&dev
->rx_nohandler
);
9437 EXPORT_SYMBOL(dev_get_stats
);
9439 struct netdev_queue
*dev_ingress_queue_create(struct net_device
*dev
)
9441 struct netdev_queue
*queue
= dev_ingress_queue(dev
);
9443 #ifdef CONFIG_NET_CLS_ACT
9446 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
9449 netdev_init_one_queue(dev
, queue
, NULL
);
9450 RCU_INIT_POINTER(queue
->qdisc
, &noop_qdisc
);
9451 queue
->qdisc_sleeping
= &noop_qdisc
;
9452 rcu_assign_pointer(dev
->ingress_queue
, queue
);
9457 static const struct ethtool_ops default_ethtool_ops
;
9459 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
9460 const struct ethtool_ops
*ops
)
9462 if (dev
->ethtool_ops
== &default_ethtool_ops
)
9463 dev
->ethtool_ops
= ops
;
9465 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops
);
9467 void netdev_freemem(struct net_device
*dev
)
9469 char *addr
= (char *)dev
- dev
->padded
;
9475 * alloc_netdev_mqs - allocate network device
9476 * @sizeof_priv: size of private data to allocate space for
9477 * @name: device name format string
9478 * @name_assign_type: origin of device name
9479 * @setup: callback to initialize device
9480 * @txqs: the number of TX subqueues to allocate
9481 * @rxqs: the number of RX subqueues to allocate
9483 * Allocates a struct net_device with private data area for driver use
9484 * and performs basic initialization. Also allocates subqueue structs
9485 * for each queue on the device.
9487 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
9488 unsigned char name_assign_type
,
9489 void (*setup
)(struct net_device
*),
9490 unsigned int txqs
, unsigned int rxqs
)
9492 struct net_device
*dev
;
9493 unsigned int alloc_size
;
9494 struct net_device
*p
;
9496 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
9499 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
9504 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
9508 alloc_size
= sizeof(struct net_device
);
9510 /* ensure 32-byte alignment of private area */
9511 alloc_size
= ALIGN(alloc_size
, NETDEV_ALIGN
);
9512 alloc_size
+= sizeof_priv
;
9514 /* ensure 32-byte alignment of whole construct */
9515 alloc_size
+= NETDEV_ALIGN
- 1;
9517 p
= kvzalloc(alloc_size
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
9521 dev
= PTR_ALIGN(p
, NETDEV_ALIGN
);
9522 dev
->padded
= (char *)dev
- (char *)p
;
9524 dev
->pcpu_refcnt
= alloc_percpu(int);
9525 if (!dev
->pcpu_refcnt
)
9528 if (dev_addr_init(dev
))
9534 dev_net_set(dev
, &init_net
);
9536 netdev_register_lockdep_key(dev
);
9538 dev
->gso_max_size
= GSO_MAX_SIZE
;
9539 dev
->gso_max_segs
= GSO_MAX_SEGS
;
9540 dev
->upper_level
= 1;
9541 dev
->lower_level
= 1;
9543 INIT_LIST_HEAD(&dev
->napi_list
);
9544 INIT_LIST_HEAD(&dev
->unreg_list
);
9545 INIT_LIST_HEAD(&dev
->close_list
);
9546 INIT_LIST_HEAD(&dev
->link_watch_list
);
9547 INIT_LIST_HEAD(&dev
->adj_list
.upper
);
9548 INIT_LIST_HEAD(&dev
->adj_list
.lower
);
9549 INIT_LIST_HEAD(&dev
->ptype_all
);
9550 INIT_LIST_HEAD(&dev
->ptype_specific
);
9551 #ifdef CONFIG_NET_SCHED
9552 hash_init(dev
->qdisc_hash
);
9554 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
;
9557 if (!dev
->tx_queue_len
) {
9558 dev
->priv_flags
|= IFF_NO_QUEUE
;
9559 dev
->tx_queue_len
= DEFAULT_TX_QUEUE_LEN
;
9562 dev
->num_tx_queues
= txqs
;
9563 dev
->real_num_tx_queues
= txqs
;
9564 if (netif_alloc_netdev_queues(dev
))
9567 dev
->num_rx_queues
= rxqs
;
9568 dev
->real_num_rx_queues
= rxqs
;
9569 if (netif_alloc_rx_queues(dev
))
9572 strcpy(dev
->name
, name
);
9573 dev
->name_assign_type
= name_assign_type
;
9574 dev
->group
= INIT_NETDEV_GROUP
;
9575 if (!dev
->ethtool_ops
)
9576 dev
->ethtool_ops
= &default_ethtool_ops
;
9578 nf_hook_ingress_init(dev
);
9587 free_percpu(dev
->pcpu_refcnt
);
9589 netdev_freemem(dev
);
9592 EXPORT_SYMBOL(alloc_netdev_mqs
);
9595 * free_netdev - free network device
9598 * This function does the last stage of destroying an allocated device
9599 * interface. The reference to the device object is released. If this
9600 * is the last reference then it will be freed.Must be called in process
9603 void free_netdev(struct net_device
*dev
)
9605 struct napi_struct
*p
, *n
;
9608 netif_free_tx_queues(dev
);
9609 netif_free_rx_queues(dev
);
9611 kfree(rcu_dereference_protected(dev
->ingress_queue
, 1));
9613 /* Flush device addresses */
9614 dev_addr_flush(dev
);
9616 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
9619 free_percpu(dev
->pcpu_refcnt
);
9620 dev
->pcpu_refcnt
= NULL
;
9622 netdev_unregister_lockdep_key(dev
);
9624 /* Compatibility with error handling in drivers */
9625 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
9626 netdev_freemem(dev
);
9630 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
9631 dev
->reg_state
= NETREG_RELEASED
;
9633 /* will free via device release */
9634 put_device(&dev
->dev
);
9636 EXPORT_SYMBOL(free_netdev
);
9639 * synchronize_net - Synchronize with packet receive processing
9641 * Wait for packets currently being received to be done.
9642 * Does not block later packets from starting.
9644 void synchronize_net(void)
9647 if (rtnl_is_locked())
9648 synchronize_rcu_expedited();
9652 EXPORT_SYMBOL(synchronize_net
);
9655 * unregister_netdevice_queue - remove device from the kernel
9659 * This function shuts down a device interface and removes it
9660 * from the kernel tables.
9661 * If head not NULL, device is queued to be unregistered later.
9663 * Callers must hold the rtnl semaphore. You may want
9664 * unregister_netdev() instead of this.
9667 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
9672 list_move_tail(&dev
->unreg_list
, head
);
9674 rollback_registered(dev
);
9675 /* Finish processing unregister after unlock */
9679 EXPORT_SYMBOL(unregister_netdevice_queue
);
9682 * unregister_netdevice_many - unregister many devices
9683 * @head: list of devices
9685 * Note: As most callers use a stack allocated list_head,
9686 * we force a list_del() to make sure stack wont be corrupted later.
9688 void unregister_netdevice_many(struct list_head
*head
)
9690 struct net_device
*dev
;
9692 if (!list_empty(head
)) {
9693 rollback_registered_many(head
);
9694 list_for_each_entry(dev
, head
, unreg_list
)
9699 EXPORT_SYMBOL(unregister_netdevice_many
);
9702 * unregister_netdev - remove device from the kernel
9705 * This function shuts down a device interface and removes it
9706 * from the kernel tables.
9708 * This is just a wrapper for unregister_netdevice that takes
9709 * the rtnl semaphore. In general you want to use this and not
9710 * unregister_netdevice.
9712 void unregister_netdev(struct net_device
*dev
)
9715 unregister_netdevice(dev
);
9718 EXPORT_SYMBOL(unregister_netdev
);
9721 * dev_change_net_namespace - move device to different nethost namespace
9723 * @net: network namespace
9724 * @pat: If not NULL name pattern to try if the current device name
9725 * is already taken in the destination network namespace.
9727 * This function shuts down a device interface and moves it
9728 * to a new network namespace. On success 0 is returned, on
9729 * a failure a netagive errno code is returned.
9731 * Callers must hold the rtnl semaphore.
9734 int dev_change_net_namespace(struct net_device
*dev
, struct net
*net
, const char *pat
)
9736 int err
, new_nsid
, new_ifindex
;
9740 /* Don't allow namespace local devices to be moved. */
9742 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
9745 /* Ensure the device has been registrered */
9746 if (dev
->reg_state
!= NETREG_REGISTERED
)
9749 /* Get out if there is nothing todo */
9751 if (net_eq(dev_net(dev
), net
))
9754 /* Pick the destination device name, and ensure
9755 * we can use it in the destination network namespace.
9758 if (__dev_get_by_name(net
, dev
->name
)) {
9759 /* We get here if we can't use the current device name */
9762 err
= dev_get_valid_name(net
, dev
, pat
);
9768 * And now a mini version of register_netdevice unregister_netdevice.
9771 /* If device is running close it first. */
9774 /* And unlink it from device chain */
9775 unlist_netdevice(dev
);
9779 /* Shutdown queueing discipline. */
9782 /* Notify protocols, that we are about to destroy
9783 * this device. They should clean all the things.
9785 * Note that dev->reg_state stays at NETREG_REGISTERED.
9786 * This is wanted because this way 8021q and macvlan know
9787 * the device is just moving and can keep their slaves up.
9789 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
9792 new_nsid
= peernet2id_alloc(dev_net(dev
), net
);
9793 /* If there is an ifindex conflict assign a new one */
9794 if (__dev_get_by_index(net
, dev
->ifindex
))
9795 new_ifindex
= dev_new_index(net
);
9797 new_ifindex
= dev
->ifindex
;
9799 rtmsg_ifinfo_newnet(RTM_DELLINK
, dev
, ~0U, GFP_KERNEL
, &new_nsid
,
9803 * Flush the unicast and multicast chains
9808 /* Send a netdev-removed uevent to the old namespace */
9809 kobject_uevent(&dev
->dev
.kobj
, KOBJ_REMOVE
);
9810 netdev_adjacent_del_links(dev
);
9812 /* Actually switch the network namespace */
9813 dev_net_set(dev
, net
);
9814 dev
->ifindex
= new_ifindex
;
9816 /* Send a netdev-add uevent to the new namespace */
9817 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ADD
);
9818 netdev_adjacent_add_links(dev
);
9820 /* Fixup kobjects */
9821 err
= device_rename(&dev
->dev
, dev
->name
);
9824 /* Add the device back in the hashes */
9825 list_netdevice(dev
);
9827 /* Notify protocols, that a new device appeared. */
9828 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
9831 * Prevent userspace races by waiting until the network
9832 * device is fully setup before sending notifications.
9834 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
9841 EXPORT_SYMBOL_GPL(dev_change_net_namespace
);
9843 static int dev_cpu_dead(unsigned int oldcpu
)
9845 struct sk_buff
**list_skb
;
9846 struct sk_buff
*skb
;
9848 struct softnet_data
*sd
, *oldsd
, *remsd
= NULL
;
9850 local_irq_disable();
9851 cpu
= smp_processor_id();
9852 sd
= &per_cpu(softnet_data
, cpu
);
9853 oldsd
= &per_cpu(softnet_data
, oldcpu
);
9855 /* Find end of our completion_queue. */
9856 list_skb
= &sd
->completion_queue
;
9858 list_skb
= &(*list_skb
)->next
;
9859 /* Append completion queue from offline CPU. */
9860 *list_skb
= oldsd
->completion_queue
;
9861 oldsd
->completion_queue
= NULL
;
9863 /* Append output queue from offline CPU. */
9864 if (oldsd
->output_queue
) {
9865 *sd
->output_queue_tailp
= oldsd
->output_queue
;
9866 sd
->output_queue_tailp
= oldsd
->output_queue_tailp
;
9867 oldsd
->output_queue
= NULL
;
9868 oldsd
->output_queue_tailp
= &oldsd
->output_queue
;
9870 /* Append NAPI poll list from offline CPU, with one exception :
9871 * process_backlog() must be called by cpu owning percpu backlog.
9872 * We properly handle process_queue & input_pkt_queue later.
9874 while (!list_empty(&oldsd
->poll_list
)) {
9875 struct napi_struct
*napi
= list_first_entry(&oldsd
->poll_list
,
9879 list_del_init(&napi
->poll_list
);
9880 if (napi
->poll
== process_backlog
)
9883 ____napi_schedule(sd
, napi
);
9886 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
9890 remsd
= oldsd
->rps_ipi_list
;
9891 oldsd
->rps_ipi_list
= NULL
;
9893 /* send out pending IPI's on offline CPU */
9894 net_rps_send_ipi(remsd
);
9896 /* Process offline CPU's input_pkt_queue */
9897 while ((skb
= __skb_dequeue(&oldsd
->process_queue
))) {
9899 input_queue_head_incr(oldsd
);
9901 while ((skb
= skb_dequeue(&oldsd
->input_pkt_queue
))) {
9903 input_queue_head_incr(oldsd
);
9910 * netdev_increment_features - increment feature set by one
9911 * @all: current feature set
9912 * @one: new feature set
9913 * @mask: mask feature set
9915 * Computes a new feature set after adding a device with feature set
9916 * @one to the master device with current feature set @all. Will not
9917 * enable anything that is off in @mask. Returns the new feature set.
9919 netdev_features_t
netdev_increment_features(netdev_features_t all
,
9920 netdev_features_t one
, netdev_features_t mask
)
9922 if (mask
& NETIF_F_HW_CSUM
)
9923 mask
|= NETIF_F_CSUM_MASK
;
9924 mask
|= NETIF_F_VLAN_CHALLENGED
;
9926 all
|= one
& (NETIF_F_ONE_FOR_ALL
| NETIF_F_CSUM_MASK
) & mask
;
9927 all
&= one
| ~NETIF_F_ALL_FOR_ALL
;
9929 /* If one device supports hw checksumming, set for all. */
9930 if (all
& NETIF_F_HW_CSUM
)
9931 all
&= ~(NETIF_F_CSUM_MASK
& ~NETIF_F_HW_CSUM
);
9935 EXPORT_SYMBOL(netdev_increment_features
);
9937 static struct hlist_head
* __net_init
netdev_create_hash(void)
9940 struct hlist_head
*hash
;
9942 hash
= kmalloc_array(NETDEV_HASHENTRIES
, sizeof(*hash
), GFP_KERNEL
);
9944 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
9945 INIT_HLIST_HEAD(&hash
[i
]);
9950 /* Initialize per network namespace state */
9951 static int __net_init
netdev_init(struct net
*net
)
9953 BUILD_BUG_ON(GRO_HASH_BUCKETS
>
9954 8 * FIELD_SIZEOF(struct napi_struct
, gro_bitmask
));
9956 if (net
!= &init_net
)
9957 INIT_LIST_HEAD(&net
->dev_base_head
);
9959 net
->dev_name_head
= netdev_create_hash();
9960 if (net
->dev_name_head
== NULL
)
9963 net
->dev_index_head
= netdev_create_hash();
9964 if (net
->dev_index_head
== NULL
)
9970 kfree(net
->dev_name_head
);
9976 * netdev_drivername - network driver for the device
9977 * @dev: network device
9979 * Determine network driver for device.
9981 const char *netdev_drivername(const struct net_device
*dev
)
9983 const struct device_driver
*driver
;
9984 const struct device
*parent
;
9985 const char *empty
= "";
9987 parent
= dev
->dev
.parent
;
9991 driver
= parent
->driver
;
9992 if (driver
&& driver
->name
)
9993 return driver
->name
;
9997 static void __netdev_printk(const char *level
, const struct net_device
*dev
,
9998 struct va_format
*vaf
)
10000 if (dev
&& dev
->dev
.parent
) {
10001 dev_printk_emit(level
[1] - '0',
10004 dev_driver_string(dev
->dev
.parent
),
10005 dev_name(dev
->dev
.parent
),
10006 netdev_name(dev
), netdev_reg_state(dev
),
10009 printk("%s%s%s: %pV",
10010 level
, netdev_name(dev
), netdev_reg_state(dev
), vaf
);
10012 printk("%s(NULL net_device): %pV", level
, vaf
);
10016 void netdev_printk(const char *level
, const struct net_device
*dev
,
10017 const char *format
, ...)
10019 struct va_format vaf
;
10022 va_start(args
, format
);
10027 __netdev_printk(level
, dev
, &vaf
);
10031 EXPORT_SYMBOL(netdev_printk
);
10033 #define define_netdev_printk_level(func, level) \
10034 void func(const struct net_device *dev, const char *fmt, ...) \
10036 struct va_format vaf; \
10039 va_start(args, fmt); \
10044 __netdev_printk(level, dev, &vaf); \
10048 EXPORT_SYMBOL(func);
10050 define_netdev_printk_level(netdev_emerg
, KERN_EMERG
);
10051 define_netdev_printk_level(netdev_alert
, KERN_ALERT
);
10052 define_netdev_printk_level(netdev_crit
, KERN_CRIT
);
10053 define_netdev_printk_level(netdev_err
, KERN_ERR
);
10054 define_netdev_printk_level(netdev_warn
, KERN_WARNING
);
10055 define_netdev_printk_level(netdev_notice
, KERN_NOTICE
);
10056 define_netdev_printk_level(netdev_info
, KERN_INFO
);
10058 static void __net_exit
netdev_exit(struct net
*net
)
10060 kfree(net
->dev_name_head
);
10061 kfree(net
->dev_index_head
);
10062 if (net
!= &init_net
)
10063 WARN_ON_ONCE(!list_empty(&net
->dev_base_head
));
10066 static struct pernet_operations __net_initdata netdev_net_ops
= {
10067 .init
= netdev_init
,
10068 .exit
= netdev_exit
,
10071 static void __net_exit
default_device_exit(struct net
*net
)
10073 struct net_device
*dev
, *aux
;
10075 * Push all migratable network devices back to the
10076 * initial network namespace
10079 for_each_netdev_safe(net
, dev
, aux
) {
10081 char fb_name
[IFNAMSIZ
];
10083 /* Ignore unmoveable devices (i.e. loopback) */
10084 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
10087 /* Leave virtual devices for the generic cleanup */
10088 if (dev
->rtnl_link_ops
)
10091 /* Push remaining network devices to init_net */
10092 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
10093 if (__dev_get_by_name(&init_net
, fb_name
))
10094 snprintf(fb_name
, IFNAMSIZ
, "dev%%d");
10095 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
10097 pr_emerg("%s: failed to move %s to init_net: %d\n",
10098 __func__
, dev
->name
, err
);
10105 static void __net_exit
rtnl_lock_unregistering(struct list_head
*net_list
)
10107 /* Return with the rtnl_lock held when there are no network
10108 * devices unregistering in any network namespace in net_list.
10111 bool unregistering
;
10112 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
10114 add_wait_queue(&netdev_unregistering_wq
, &wait
);
10116 unregistering
= false;
10118 list_for_each_entry(net
, net_list
, exit_list
) {
10119 if (net
->dev_unreg_count
> 0) {
10120 unregistering
= true;
10124 if (!unregistering
)
10128 wait_woken(&wait
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
10130 remove_wait_queue(&netdev_unregistering_wq
, &wait
);
10133 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
10135 /* At exit all network devices most be removed from a network
10136 * namespace. Do this in the reverse order of registration.
10137 * Do this across as many network namespaces as possible to
10138 * improve batching efficiency.
10140 struct net_device
*dev
;
10142 LIST_HEAD(dev_kill_list
);
10144 /* To prevent network device cleanup code from dereferencing
10145 * loopback devices or network devices that have been freed
10146 * wait here for all pending unregistrations to complete,
10147 * before unregistring the loopback device and allowing the
10148 * network namespace be freed.
10150 * The netdev todo list containing all network devices
10151 * unregistrations that happen in default_device_exit_batch
10152 * will run in the rtnl_unlock() at the end of
10153 * default_device_exit_batch.
10155 rtnl_lock_unregistering(net_list
);
10156 list_for_each_entry(net
, net_list
, exit_list
) {
10157 for_each_netdev_reverse(net
, dev
) {
10158 if (dev
->rtnl_link_ops
&& dev
->rtnl_link_ops
->dellink
)
10159 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
10161 unregister_netdevice_queue(dev
, &dev_kill_list
);
10164 unregister_netdevice_many(&dev_kill_list
);
10168 static struct pernet_operations __net_initdata default_device_ops
= {
10169 .exit
= default_device_exit
,
10170 .exit_batch
= default_device_exit_batch
,
10174 * Initialize the DEV module. At boot time this walks the device list and
10175 * unhooks any devices that fail to initialise (normally hardware not
10176 * present) and leaves us with a valid list of present and active devices.
10181 * This is called single threaded during boot, so no need
10182 * to take the rtnl semaphore.
10184 static int __init
net_dev_init(void)
10186 int i
, rc
= -ENOMEM
;
10188 BUG_ON(!dev_boot_phase
);
10190 if (dev_proc_init())
10193 if (netdev_kobject_init())
10196 INIT_LIST_HEAD(&ptype_all
);
10197 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
10198 INIT_LIST_HEAD(&ptype_base
[i
]);
10200 INIT_LIST_HEAD(&offload_base
);
10202 if (register_pernet_subsys(&netdev_net_ops
))
10206 * Initialise the packet receive queues.
10209 for_each_possible_cpu(i
) {
10210 struct work_struct
*flush
= per_cpu_ptr(&flush_works
, i
);
10211 struct softnet_data
*sd
= &per_cpu(softnet_data
, i
);
10213 INIT_WORK(flush
, flush_backlog
);
10215 skb_queue_head_init(&sd
->input_pkt_queue
);
10216 skb_queue_head_init(&sd
->process_queue
);
10217 #ifdef CONFIG_XFRM_OFFLOAD
10218 skb_queue_head_init(&sd
->xfrm_backlog
);
10220 INIT_LIST_HEAD(&sd
->poll_list
);
10221 sd
->output_queue_tailp
= &sd
->output_queue
;
10223 sd
->csd
.func
= rps_trigger_softirq
;
10228 init_gro_hash(&sd
->backlog
);
10229 sd
->backlog
.poll
= process_backlog
;
10230 sd
->backlog
.weight
= weight_p
;
10233 dev_boot_phase
= 0;
10235 /* The loopback device is special if any other network devices
10236 * is present in a network namespace the loopback device must
10237 * be present. Since we now dynamically allocate and free the
10238 * loopback device ensure this invariant is maintained by
10239 * keeping the loopback device as the first device on the
10240 * list of network devices. Ensuring the loopback devices
10241 * is the first device that appears and the last network device
10244 if (register_pernet_device(&loopback_net_ops
))
10247 if (register_pernet_device(&default_device_ops
))
10250 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
10251 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
10253 rc
= cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD
, "net/dev:dead",
10254 NULL
, dev_cpu_dead
);
10261 subsys_initcall(net_dev_init
);