1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET3 Protocol independent device support routines.
5 * Derived from the non IP parts of dev.c 1.0.19
7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Florian la Roche <rzsfl@rz.uni-sb.de>
12 * Alan Cox <gw4pts@gw4pts.ampr.org>
13 * David Hinds <dahinds@users.sourceforge.net>
14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15 * Adam Sulmicki <adam@cfar.umd.edu>
16 * Pekka Riikonen <priikone@poesidon.pspt.fi>
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
20 * to 2 if register_netdev gets called
21 * before net_dev_init & also removed a
22 * few lines of code in the process.
23 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant
25 * stunts to keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into
31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
32 * Alan Cox : 100 backlog just doesn't cut it when
33 * you start doing multicast video 8)
34 * Alan Cox : Rewrote net_bh and list manager.
35 * Alan Cox : Fix ETH_P_ALL echoback lengths.
36 * Alan Cox : Took out transmit every packet pass
37 * Saved a few bytes in the ioctl handler
38 * Alan Cox : Network driver sets packet type before
39 * calling netif_rx. Saves a function
41 * Alan Cox : Hashed net_bh()
42 * Richard Kooijman: Timestamp fixes.
43 * Alan Cox : Wrong field in SIOCGIFDSTADDR
44 * Alan Cox : Device lock protection.
45 * Alan Cox : Fixed nasty side effect of device close
47 * Rudi Cilibrasi : Pass the right thing to
49 * Dave Miller : 32bit quantity for the device lock to
50 * make it work out on a Sparc.
51 * Bjorn Ekwall : Added KERNELD hack.
52 * Alan Cox : Cleaned up the backlog initialise.
53 * Craig Metz : SIOCGIFCONF fix if space for under
55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
56 * is no device open function.
57 * Andi Kleen : Fix error reporting for SIOCGIFCONF
58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
59 * Cyrus Durgin : Cleaned for KMOD
60 * Adam Sulmicki : Bug Fix : Network Device Unload
61 * A network device unload needs to purge
63 * Paul Rusty Russell : SIOCSIFNAME
64 * Pekka Riikonen : Netdev boot-time settings code
65 * Andrew Morton : Make unregister_netdevice wait
66 * indefinitely on dev->refcnt
67 * J Hadi Salim : - Backlog queue sampling
68 * - netif_rx() feedback
71 #include <linux/uaccess.h>
72 #include <linux/bitops.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/mm.h>
81 #include <linux/mutex.h>
82 #include <linux/rwsem.h>
83 #include <linux/string.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/ethtool.h>
93 #include <linux/skbuff.h>
94 #include <linux/bpf.h>
95 #include <linux/bpf_trace.h>
96 #include <net/net_namespace.h>
98 #include <net/busy_poll.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
102 #include <net/dst_metadata.h>
103 #include <net/pkt_sched.h>
104 #include <net/pkt_cls.h>
105 #include <net/checksum.h>
106 #include <net/xfrm.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/module.h>
110 #include <linux/netpoll.h>
111 #include <linux/rcupdate.h>
112 #include <linux/delay.h>
113 #include <net/iw_handler.h>
114 #include <asm/current.h>
115 #include <linux/audit.h>
116 #include <linux/dmaengine.h>
117 #include <linux/err.h>
118 #include <linux/ctype.h>
119 #include <linux/if_arp.h>
120 #include <linux/if_vlan.h>
121 #include <linux/ip.h>
123 #include <net/mpls.h>
124 #include <linux/ipv6.h>
125 #include <linux/in.h>
126 #include <linux/jhash.h>
127 #include <linux/random.h>
128 #include <trace/events/napi.h>
129 #include <trace/events/net.h>
130 #include <trace/events/skb.h>
131 #include <linux/inetdevice.h>
132 #include <linux/cpu_rmap.h>
133 #include <linux/static_key.h>
134 #include <linux/hashtable.h>
135 #include <linux/vmalloc.h>
136 #include <linux/if_macvlan.h>
137 #include <linux/errqueue.h>
138 #include <linux/hrtimer.h>
139 #include <linux/netfilter_ingress.h>
140 #include <linux/crash_dump.h>
141 #include <linux/sctp.h>
142 #include <net/udp_tunnel.h>
143 #include <linux/net_namespace.h>
144 #include <linux/indirect_call_wrapper.h>
145 #include <net/devlink.h>
147 #include "net-sysfs.h"
149 #define MAX_GRO_SKBS 8
151 /* This should be increased if a protocol with a bigger head is added. */
152 #define GRO_MAX_HEAD (MAX_HEADER + 128)
154 static DEFINE_SPINLOCK(ptype_lock
);
155 static DEFINE_SPINLOCK(offload_lock
);
156 struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
157 struct list_head ptype_all __read_mostly
; /* Taps */
158 static struct list_head offload_base __read_mostly
;
160 static int netif_rx_internal(struct sk_buff
*skb
);
161 static int call_netdevice_notifiers_info(unsigned long val
,
162 struct netdev_notifier_info
*info
);
163 static int call_netdevice_notifiers_extack(unsigned long val
,
164 struct net_device
*dev
,
165 struct netlink_ext_ack
*extack
);
166 static struct napi_struct
*napi_by_id(unsigned int napi_id
);
169 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
172 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
174 * Writers must hold the rtnl semaphore while they loop through the
175 * dev_base_head list, and hold dev_base_lock for writing when they do the
176 * actual updates. This allows pure readers to access the list even
177 * while a writer is preparing to update it.
179 * To put it another way, dev_base_lock is held for writing only to
180 * protect against pure readers; the rtnl semaphore provides the
181 * protection against other writers.
183 * See, for example usages, register_netdevice() and
184 * unregister_netdevice(), which must be called with the rtnl
187 DEFINE_RWLOCK(dev_base_lock
);
188 EXPORT_SYMBOL(dev_base_lock
);
190 static DEFINE_MUTEX(ifalias_mutex
);
192 /* protects napi_hash addition/deletion and napi_gen_id */
193 static DEFINE_SPINLOCK(napi_hash_lock
);
195 static unsigned int napi_gen_id
= NR_CPUS
;
196 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash
, 8);
198 static DECLARE_RWSEM(devnet_rename_sem
);
200 static inline void dev_base_seq_inc(struct net
*net
)
202 while (++net
->dev_base_seq
== 0)
206 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
208 unsigned int hash
= full_name_hash(net
, name
, strnlen(name
, IFNAMSIZ
));
210 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
213 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
215 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
218 static inline void rps_lock(struct softnet_data
*sd
)
221 spin_lock(&sd
->input_pkt_queue
.lock
);
225 static inline void rps_unlock(struct softnet_data
*sd
)
228 spin_unlock(&sd
->input_pkt_queue
.lock
);
232 /* Device list insertion */
233 static void list_netdevice(struct net_device
*dev
)
235 struct net
*net
= dev_net(dev
);
239 write_lock_bh(&dev_base_lock
);
240 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
241 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
242 hlist_add_head_rcu(&dev
->index_hlist
,
243 dev_index_hash(net
, dev
->ifindex
));
244 write_unlock_bh(&dev_base_lock
);
246 dev_base_seq_inc(net
);
249 /* Device list removal
250 * caller must respect a RCU grace period before freeing/reusing dev
252 static void unlist_netdevice(struct net_device
*dev
)
256 /* Unlink dev from the device chain */
257 write_lock_bh(&dev_base_lock
);
258 list_del_rcu(&dev
->dev_list
);
259 hlist_del_rcu(&dev
->name_hlist
);
260 hlist_del_rcu(&dev
->index_hlist
);
261 write_unlock_bh(&dev_base_lock
);
263 dev_base_seq_inc(dev_net(dev
));
270 static RAW_NOTIFIER_HEAD(netdev_chain
);
273 * Device drivers call our routines to queue packets here. We empty the
274 * queue in the local softnet handler.
277 DEFINE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
278 EXPORT_PER_CPU_SYMBOL(softnet_data
);
280 /*******************************************************************************
282 * Protocol management and registration routines
284 *******************************************************************************/
288 * Add a protocol ID to the list. Now that the input handler is
289 * smarter we can dispense with all the messy stuff that used to be
292 * BEWARE!!! Protocol handlers, mangling input packets,
293 * MUST BE last in hash buckets and checking protocol handlers
294 * MUST start from promiscuous ptype_all chain in net_bh.
295 * It is true now, do not change it.
296 * Explanation follows: if protocol handler, mangling packet, will
297 * be the first on list, it is not able to sense, that packet
298 * is cloned and should be copied-on-write, so that it will
299 * change it and subsequent readers will get broken packet.
303 static inline struct list_head
*ptype_head(const struct packet_type
*pt
)
305 if (pt
->type
== htons(ETH_P_ALL
))
306 return pt
->dev
? &pt
->dev
->ptype_all
: &ptype_all
;
308 return pt
->dev
? &pt
->dev
->ptype_specific
:
309 &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
313 * dev_add_pack - add packet handler
314 * @pt: packet type declaration
316 * Add a protocol handler to the networking stack. The passed &packet_type
317 * is linked into kernel lists and may not be freed until it has been
318 * removed from the kernel lists.
320 * This call does not sleep therefore it can not
321 * guarantee all CPU's that are in middle of receiving packets
322 * will see the new packet type (until the next received packet).
325 void dev_add_pack(struct packet_type
*pt
)
327 struct list_head
*head
= ptype_head(pt
);
329 spin_lock(&ptype_lock
);
330 list_add_rcu(&pt
->list
, head
);
331 spin_unlock(&ptype_lock
);
333 EXPORT_SYMBOL(dev_add_pack
);
336 * __dev_remove_pack - remove packet handler
337 * @pt: packet type declaration
339 * Remove a protocol handler that was previously added to the kernel
340 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
341 * from the kernel lists and can be freed or reused once this function
344 * The packet type might still be in use by receivers
345 * and must not be freed until after all the CPU's have gone
346 * through a quiescent state.
348 void __dev_remove_pack(struct packet_type
*pt
)
350 struct list_head
*head
= ptype_head(pt
);
351 struct packet_type
*pt1
;
353 spin_lock(&ptype_lock
);
355 list_for_each_entry(pt1
, head
, list
) {
357 list_del_rcu(&pt
->list
);
362 pr_warn("dev_remove_pack: %p not found\n", pt
);
364 spin_unlock(&ptype_lock
);
366 EXPORT_SYMBOL(__dev_remove_pack
);
369 * dev_remove_pack - remove packet handler
370 * @pt: packet type declaration
372 * Remove a protocol handler that was previously added to the kernel
373 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
374 * from the kernel lists and can be freed or reused once this function
377 * This call sleeps to guarantee that no CPU is looking at the packet
380 void dev_remove_pack(struct packet_type
*pt
)
382 __dev_remove_pack(pt
);
386 EXPORT_SYMBOL(dev_remove_pack
);
390 * dev_add_offload - register offload handlers
391 * @po: protocol offload declaration
393 * Add protocol offload handlers to the networking stack. The passed
394 * &proto_offload is linked into kernel lists and may not be freed until
395 * it has been removed from the kernel lists.
397 * This call does not sleep therefore it can not
398 * guarantee all CPU's that are in middle of receiving packets
399 * will see the new offload handlers (until the next received packet).
401 void dev_add_offload(struct packet_offload
*po
)
403 struct packet_offload
*elem
;
405 spin_lock(&offload_lock
);
406 list_for_each_entry(elem
, &offload_base
, list
) {
407 if (po
->priority
< elem
->priority
)
410 list_add_rcu(&po
->list
, elem
->list
.prev
);
411 spin_unlock(&offload_lock
);
413 EXPORT_SYMBOL(dev_add_offload
);
416 * __dev_remove_offload - remove offload handler
417 * @po: packet offload declaration
419 * Remove a protocol offload handler that was previously added to the
420 * kernel offload handlers by dev_add_offload(). The passed &offload_type
421 * is removed from the kernel lists and can be freed or reused once this
424 * The packet type might still be in use by receivers
425 * and must not be freed until after all the CPU's have gone
426 * through a quiescent state.
428 static void __dev_remove_offload(struct packet_offload
*po
)
430 struct list_head
*head
= &offload_base
;
431 struct packet_offload
*po1
;
433 spin_lock(&offload_lock
);
435 list_for_each_entry(po1
, head
, list
) {
437 list_del_rcu(&po
->list
);
442 pr_warn("dev_remove_offload: %p not found\n", po
);
444 spin_unlock(&offload_lock
);
448 * dev_remove_offload - remove packet offload handler
449 * @po: packet offload declaration
451 * Remove a packet offload handler that was previously added to the kernel
452 * offload handlers by dev_add_offload(). The passed &offload_type is
453 * removed from the kernel lists and can be freed or reused once this
456 * This call sleeps to guarantee that no CPU is looking at the packet
459 void dev_remove_offload(struct packet_offload
*po
)
461 __dev_remove_offload(po
);
465 EXPORT_SYMBOL(dev_remove_offload
);
467 /******************************************************************************
469 * Device Boot-time Settings Routines
471 ******************************************************************************/
473 /* Boot time configuration table */
474 static struct netdev_boot_setup dev_boot_setup
[NETDEV_BOOT_SETUP_MAX
];
477 * netdev_boot_setup_add - add new setup entry
478 * @name: name of the device
479 * @map: configured settings for the device
481 * Adds new setup entry to the dev_boot_setup list. The function
482 * returns 0 on error and 1 on success. This is a generic routine to
485 static int netdev_boot_setup_add(char *name
, struct ifmap
*map
)
487 struct netdev_boot_setup
*s
;
491 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
492 if (s
[i
].name
[0] == '\0' || s
[i
].name
[0] == ' ') {
493 memset(s
[i
].name
, 0, sizeof(s
[i
].name
));
494 strlcpy(s
[i
].name
, name
, IFNAMSIZ
);
495 memcpy(&s
[i
].map
, map
, sizeof(s
[i
].map
));
500 return i
>= NETDEV_BOOT_SETUP_MAX
? 0 : 1;
504 * netdev_boot_setup_check - check boot time settings
505 * @dev: the netdevice
507 * Check boot time settings for the device.
508 * The found settings are set for the device to be used
509 * later in the device probing.
510 * Returns 0 if no settings found, 1 if they are.
512 int netdev_boot_setup_check(struct net_device
*dev
)
514 struct netdev_boot_setup
*s
= dev_boot_setup
;
517 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
518 if (s
[i
].name
[0] != '\0' && s
[i
].name
[0] != ' ' &&
519 !strcmp(dev
->name
, s
[i
].name
)) {
520 dev
->irq
= s
[i
].map
.irq
;
521 dev
->base_addr
= s
[i
].map
.base_addr
;
522 dev
->mem_start
= s
[i
].map
.mem_start
;
523 dev
->mem_end
= s
[i
].map
.mem_end
;
529 EXPORT_SYMBOL(netdev_boot_setup_check
);
533 * netdev_boot_base - get address from boot time settings
534 * @prefix: prefix for network device
535 * @unit: id for network device
537 * Check boot time settings for the base address of device.
538 * The found settings are set for the device to be used
539 * later in the device probing.
540 * Returns 0 if no settings found.
542 unsigned long netdev_boot_base(const char *prefix
, int unit
)
544 const struct netdev_boot_setup
*s
= dev_boot_setup
;
548 sprintf(name
, "%s%d", prefix
, unit
);
551 * If device already registered then return base of 1
552 * to indicate not to probe for this interface
554 if (__dev_get_by_name(&init_net
, name
))
557 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++)
558 if (!strcmp(name
, s
[i
].name
))
559 return s
[i
].map
.base_addr
;
564 * Saves at boot time configured settings for any netdevice.
566 int __init
netdev_boot_setup(char *str
)
571 str
= get_options(str
, ARRAY_SIZE(ints
), ints
);
576 memset(&map
, 0, sizeof(map
));
580 map
.base_addr
= ints
[2];
582 map
.mem_start
= ints
[3];
584 map
.mem_end
= ints
[4];
586 /* Add new entry to the list */
587 return netdev_boot_setup_add(str
, &map
);
590 __setup("netdev=", netdev_boot_setup
);
592 /*******************************************************************************
594 * Device Interface Subroutines
596 *******************************************************************************/
599 * dev_get_iflink - get 'iflink' value of a interface
600 * @dev: targeted interface
602 * Indicates the ifindex the interface is linked to.
603 * Physical interfaces have the same 'ifindex' and 'iflink' values.
606 int dev_get_iflink(const struct net_device
*dev
)
608 if (dev
->netdev_ops
&& dev
->netdev_ops
->ndo_get_iflink
)
609 return dev
->netdev_ops
->ndo_get_iflink(dev
);
613 EXPORT_SYMBOL(dev_get_iflink
);
616 * dev_fill_metadata_dst - Retrieve tunnel egress information.
617 * @dev: targeted interface
620 * For better visibility of tunnel traffic OVS needs to retrieve
621 * egress tunnel information for a packet. Following API allows
622 * user to get this info.
624 int dev_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
626 struct ip_tunnel_info
*info
;
628 if (!dev
->netdev_ops
|| !dev
->netdev_ops
->ndo_fill_metadata_dst
)
631 info
= skb_tunnel_info_unclone(skb
);
634 if (unlikely(!(info
->mode
& IP_TUNNEL_INFO_TX
)))
637 return dev
->netdev_ops
->ndo_fill_metadata_dst(dev
, skb
);
639 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst
);
642 * __dev_get_by_name - find a device by its name
643 * @net: the applicable net namespace
644 * @name: name to find
646 * Find an interface by name. Must be called under RTNL semaphore
647 * or @dev_base_lock. If the name is found a pointer to the device
648 * is returned. If the name is not found then %NULL is returned. The
649 * reference counters are not incremented so the caller must be
650 * careful with locks.
653 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
655 struct net_device
*dev
;
656 struct hlist_head
*head
= dev_name_hash(net
, name
);
658 hlist_for_each_entry(dev
, head
, name_hlist
)
659 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
664 EXPORT_SYMBOL(__dev_get_by_name
);
667 * dev_get_by_name_rcu - find a device by its name
668 * @net: the applicable net namespace
669 * @name: name to find
671 * Find an interface by name.
672 * If the name is found a pointer to the device is returned.
673 * If the name is not found then %NULL is returned.
674 * The reference counters are not incremented so the caller must be
675 * careful with locks. The caller must hold RCU lock.
678 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
680 struct net_device
*dev
;
681 struct hlist_head
*head
= dev_name_hash(net
, name
);
683 hlist_for_each_entry_rcu(dev
, head
, name_hlist
)
684 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
689 EXPORT_SYMBOL(dev_get_by_name_rcu
);
692 * dev_get_by_name - find a device by its name
693 * @net: the applicable net namespace
694 * @name: name to find
696 * Find an interface by name. This can be called from any
697 * context and does its own locking. The returned handle has
698 * the usage count incremented and the caller must use dev_put() to
699 * release it when it is no longer needed. %NULL is returned if no
700 * matching device is found.
703 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
705 struct net_device
*dev
;
708 dev
= dev_get_by_name_rcu(net
, name
);
714 EXPORT_SYMBOL(dev_get_by_name
);
717 * __dev_get_by_index - find a device by its ifindex
718 * @net: the applicable net namespace
719 * @ifindex: index of device
721 * Search for an interface by index. Returns %NULL if the device
722 * is not found or a pointer to the device. The device has not
723 * had its reference counter increased so the caller must be careful
724 * about locking. The caller must hold either the RTNL semaphore
728 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
730 struct net_device
*dev
;
731 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
733 hlist_for_each_entry(dev
, head
, index_hlist
)
734 if (dev
->ifindex
== ifindex
)
739 EXPORT_SYMBOL(__dev_get_by_index
);
742 * dev_get_by_index_rcu - find a device by its ifindex
743 * @net: the applicable net namespace
744 * @ifindex: index of device
746 * Search for an interface by index. Returns %NULL if the device
747 * is not found or a pointer to the device. The device has not
748 * had its reference counter increased so the caller must be careful
749 * about locking. The caller must hold RCU lock.
752 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
754 struct net_device
*dev
;
755 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
757 hlist_for_each_entry_rcu(dev
, head
, index_hlist
)
758 if (dev
->ifindex
== ifindex
)
763 EXPORT_SYMBOL(dev_get_by_index_rcu
);
767 * dev_get_by_index - find a device by its ifindex
768 * @net: the applicable net namespace
769 * @ifindex: index of device
771 * Search for an interface by index. Returns NULL if the device
772 * is not found or a pointer to the device. The device returned has
773 * had a reference added and the pointer is safe until the user calls
774 * dev_put to indicate they have finished with it.
777 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
779 struct net_device
*dev
;
782 dev
= dev_get_by_index_rcu(net
, ifindex
);
788 EXPORT_SYMBOL(dev_get_by_index
);
791 * dev_get_by_napi_id - find a device by napi_id
792 * @napi_id: ID of the NAPI struct
794 * Search for an interface by NAPI ID. Returns %NULL if the device
795 * is not found or a pointer to the device. The device has not had
796 * its reference counter increased so the caller must be careful
797 * about locking. The caller must hold RCU lock.
800 struct net_device
*dev_get_by_napi_id(unsigned int napi_id
)
802 struct napi_struct
*napi
;
804 WARN_ON_ONCE(!rcu_read_lock_held());
806 if (napi_id
< MIN_NAPI_ID
)
809 napi
= napi_by_id(napi_id
);
811 return napi
? napi
->dev
: NULL
;
813 EXPORT_SYMBOL(dev_get_by_napi_id
);
816 * netdev_get_name - get a netdevice name, knowing its ifindex.
817 * @net: network namespace
818 * @name: a pointer to the buffer where the name will be stored.
819 * @ifindex: the ifindex of the interface to get the name from.
821 int netdev_get_name(struct net
*net
, char *name
, int ifindex
)
823 struct net_device
*dev
;
826 down_read(&devnet_rename_sem
);
829 dev
= dev_get_by_index_rcu(net
, ifindex
);
835 strcpy(name
, dev
->name
);
840 up_read(&devnet_rename_sem
);
845 * dev_getbyhwaddr_rcu - find a device by its hardware address
846 * @net: the applicable net namespace
847 * @type: media type of device
848 * @ha: hardware address
850 * Search for an interface by MAC address. Returns NULL if the device
851 * is not found or a pointer to the device.
852 * The caller must hold RCU or RTNL.
853 * The returned device has not had its ref count increased
854 * and the caller must therefore be careful about locking
858 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
861 struct net_device
*dev
;
863 for_each_netdev_rcu(net
, dev
)
864 if (dev
->type
== type
&&
865 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
870 EXPORT_SYMBOL(dev_getbyhwaddr_rcu
);
872 struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
874 struct net_device
*dev
;
877 for_each_netdev(net
, dev
)
878 if (dev
->type
== type
)
883 EXPORT_SYMBOL(__dev_getfirstbyhwtype
);
885 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
887 struct net_device
*dev
, *ret
= NULL
;
890 for_each_netdev_rcu(net
, dev
)
891 if (dev
->type
== type
) {
899 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
902 * __dev_get_by_flags - find any device with given flags
903 * @net: the applicable net namespace
904 * @if_flags: IFF_* values
905 * @mask: bitmask of bits in if_flags to check
907 * Search for any interface with the given flags. Returns NULL if a device
908 * is not found or a pointer to the device. Must be called inside
909 * rtnl_lock(), and result refcount is unchanged.
912 struct net_device
*__dev_get_by_flags(struct net
*net
, unsigned short if_flags
,
915 struct net_device
*dev
, *ret
;
920 for_each_netdev(net
, dev
) {
921 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
928 EXPORT_SYMBOL(__dev_get_by_flags
);
931 * dev_valid_name - check if name is okay for network device
934 * Network device names need to be valid file names to
935 * to allow sysfs to work. We also disallow any kind of
938 bool dev_valid_name(const char *name
)
942 if (strnlen(name
, IFNAMSIZ
) == IFNAMSIZ
)
944 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
948 if (*name
== '/' || *name
== ':' || isspace(*name
))
954 EXPORT_SYMBOL(dev_valid_name
);
957 * __dev_alloc_name - allocate a name for a device
958 * @net: network namespace to allocate the device name in
959 * @name: name format string
960 * @buf: scratch buffer and result name string
962 * Passed a format string - eg "lt%d" it will try and find a suitable
963 * id. It scans list of devices to build up a free map, then chooses
964 * the first empty slot. The caller must hold the dev_base or rtnl lock
965 * while allocating the name and adding the device in order to avoid
967 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
968 * Returns the number of the unit assigned or a negative errno code.
971 static int __dev_alloc_name(struct net
*net
, const char *name
, char *buf
)
975 const int max_netdevices
= 8*PAGE_SIZE
;
976 unsigned long *inuse
;
977 struct net_device
*d
;
979 if (!dev_valid_name(name
))
982 p
= strchr(name
, '%');
985 * Verify the string as this thing may have come from
986 * the user. There must be either one "%d" and no other "%"
989 if (p
[1] != 'd' || strchr(p
+ 2, '%'))
992 /* Use one page as a bit array of possible slots */
993 inuse
= (unsigned long *) get_zeroed_page(GFP_ATOMIC
);
997 for_each_netdev(net
, d
) {
998 if (!sscanf(d
->name
, name
, &i
))
1000 if (i
< 0 || i
>= max_netdevices
)
1003 /* avoid cases where sscanf is not exact inverse of printf */
1004 snprintf(buf
, IFNAMSIZ
, name
, i
);
1005 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
1009 i
= find_first_zero_bit(inuse
, max_netdevices
);
1010 free_page((unsigned long) inuse
);
1013 snprintf(buf
, IFNAMSIZ
, name
, i
);
1014 if (!__dev_get_by_name(net
, buf
))
1017 /* It is possible to run out of possible slots
1018 * when the name is long and there isn't enough space left
1019 * for the digits, or if all bits are used.
1024 static int dev_alloc_name_ns(struct net
*net
,
1025 struct net_device
*dev
,
1032 ret
= __dev_alloc_name(net
, name
, buf
);
1034 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1039 * dev_alloc_name - allocate a name for a device
1041 * @name: name format string
1043 * Passed a format string - eg "lt%d" it will try and find a suitable
1044 * id. It scans list of devices to build up a free map, then chooses
1045 * the first empty slot. The caller must hold the dev_base or rtnl lock
1046 * while allocating the name and adding the device in order to avoid
1048 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1049 * Returns the number of the unit assigned or a negative errno code.
1052 int dev_alloc_name(struct net_device
*dev
, const char *name
)
1054 return dev_alloc_name_ns(dev_net(dev
), dev
, name
);
1056 EXPORT_SYMBOL(dev_alloc_name
);
1058 int dev_get_valid_name(struct net
*net
, struct net_device
*dev
,
1063 if (!dev_valid_name(name
))
1066 if (strchr(name
, '%'))
1067 return dev_alloc_name_ns(net
, dev
, name
);
1068 else if (__dev_get_by_name(net
, name
))
1070 else if (dev
->name
!= name
)
1071 strlcpy(dev
->name
, name
, IFNAMSIZ
);
1075 EXPORT_SYMBOL(dev_get_valid_name
);
1078 * dev_change_name - change name of a device
1080 * @newname: name (or format string) must be at least IFNAMSIZ
1082 * Change name of a device, can pass format strings "eth%d".
1085 int dev_change_name(struct net_device
*dev
, const char *newname
)
1087 unsigned char old_assign_type
;
1088 char oldname
[IFNAMSIZ
];
1094 BUG_ON(!dev_net(dev
));
1098 /* Some auto-enslaved devices e.g. failover slaves are
1099 * special, as userspace might rename the device after
1100 * the interface had been brought up and running since
1101 * the point kernel initiated auto-enslavement. Allow
1102 * live name change even when these slave devices are
1105 * Typically, users of these auto-enslaving devices
1106 * don't actually care about slave name change, as
1107 * they are supposed to operate on master interface
1110 if (dev
->flags
& IFF_UP
&&
1111 likely(!(dev
->priv_flags
& IFF_LIVE_RENAME_OK
)))
1114 down_write(&devnet_rename_sem
);
1116 if (strncmp(newname
, dev
->name
, IFNAMSIZ
) == 0) {
1117 up_write(&devnet_rename_sem
);
1121 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
1123 err
= dev_get_valid_name(net
, dev
, newname
);
1125 up_write(&devnet_rename_sem
);
1129 if (oldname
[0] && !strchr(oldname
, '%'))
1130 netdev_info(dev
, "renamed from %s\n", oldname
);
1132 old_assign_type
= dev
->name_assign_type
;
1133 dev
->name_assign_type
= NET_NAME_RENAMED
;
1136 ret
= device_rename(&dev
->dev
, dev
->name
);
1138 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1139 dev
->name_assign_type
= old_assign_type
;
1140 up_write(&devnet_rename_sem
);
1144 up_write(&devnet_rename_sem
);
1146 netdev_adjacent_rename_links(dev
, oldname
);
1148 write_lock_bh(&dev_base_lock
);
1149 hlist_del_rcu(&dev
->name_hlist
);
1150 write_unlock_bh(&dev_base_lock
);
1154 write_lock_bh(&dev_base_lock
);
1155 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
1156 write_unlock_bh(&dev_base_lock
);
1158 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1159 ret
= notifier_to_errno(ret
);
1162 /* err >= 0 after dev_alloc_name() or stores the first errno */
1165 down_write(&devnet_rename_sem
);
1166 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1167 memcpy(oldname
, newname
, IFNAMSIZ
);
1168 dev
->name_assign_type
= old_assign_type
;
1169 old_assign_type
= NET_NAME_RENAMED
;
1172 pr_err("%s: name change rollback failed: %d\n",
1181 * dev_set_alias - change ifalias of a device
1183 * @alias: name up to IFALIASZ
1184 * @len: limit of bytes to copy from info
1186 * Set ifalias for a device,
1188 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1190 struct dev_ifalias
*new_alias
= NULL
;
1192 if (len
>= IFALIASZ
)
1196 new_alias
= kmalloc(sizeof(*new_alias
) + len
+ 1, GFP_KERNEL
);
1200 memcpy(new_alias
->ifalias
, alias
, len
);
1201 new_alias
->ifalias
[len
] = 0;
1204 mutex_lock(&ifalias_mutex
);
1205 rcu_swap_protected(dev
->ifalias
, new_alias
,
1206 mutex_is_locked(&ifalias_mutex
));
1207 mutex_unlock(&ifalias_mutex
);
1210 kfree_rcu(new_alias
, rcuhead
);
1214 EXPORT_SYMBOL(dev_set_alias
);
1217 * dev_get_alias - get ifalias of a device
1219 * @name: buffer to store name of ifalias
1220 * @len: size of buffer
1222 * get ifalias for a device. Caller must make sure dev cannot go
1223 * away, e.g. rcu read lock or own a reference count to device.
1225 int dev_get_alias(const struct net_device
*dev
, char *name
, size_t len
)
1227 const struct dev_ifalias
*alias
;
1231 alias
= rcu_dereference(dev
->ifalias
);
1233 ret
= snprintf(name
, len
, "%s", alias
->ifalias
);
1240 * netdev_features_change - device changes features
1241 * @dev: device to cause notification
1243 * Called to indicate a device has changed features.
1245 void netdev_features_change(struct net_device
*dev
)
1247 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1249 EXPORT_SYMBOL(netdev_features_change
);
1252 * netdev_state_change - device changes state
1253 * @dev: device to cause notification
1255 * Called to indicate a device has changed state. This function calls
1256 * the notifier chains for netdev_chain and sends a NEWLINK message
1257 * to the routing socket.
1259 void netdev_state_change(struct net_device
*dev
)
1261 if (dev
->flags
& IFF_UP
) {
1262 struct netdev_notifier_change_info change_info
= {
1266 call_netdevice_notifiers_info(NETDEV_CHANGE
,
1268 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0, GFP_KERNEL
);
1271 EXPORT_SYMBOL(netdev_state_change
);
1274 * netdev_notify_peers - notify network peers about existence of @dev
1275 * @dev: network device
1277 * Generate traffic such that interested network peers are aware of
1278 * @dev, such as by generating a gratuitous ARP. This may be used when
1279 * a device wants to inform the rest of the network about some sort of
1280 * reconfiguration such as a failover event or virtual machine
1283 void netdev_notify_peers(struct net_device
*dev
)
1286 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, dev
);
1287 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, dev
);
1290 EXPORT_SYMBOL(netdev_notify_peers
);
1292 static int __dev_open(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
1294 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1299 if (!netif_device_present(dev
))
1302 /* Block netpoll from trying to do any rx path servicing.
1303 * If we don't do this there is a chance ndo_poll_controller
1304 * or ndo_poll may be running while we open the device
1306 netpoll_poll_disable(dev
);
1308 ret
= call_netdevice_notifiers_extack(NETDEV_PRE_UP
, dev
, extack
);
1309 ret
= notifier_to_errno(ret
);
1313 set_bit(__LINK_STATE_START
, &dev
->state
);
1315 if (ops
->ndo_validate_addr
)
1316 ret
= ops
->ndo_validate_addr(dev
);
1318 if (!ret
&& ops
->ndo_open
)
1319 ret
= ops
->ndo_open(dev
);
1321 netpoll_poll_enable(dev
);
1324 clear_bit(__LINK_STATE_START
, &dev
->state
);
1326 dev
->flags
|= IFF_UP
;
1327 dev_set_rx_mode(dev
);
1329 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
1336 * dev_open - prepare an interface for use.
1337 * @dev: device to open
1338 * @extack: netlink extended ack
1340 * Takes a device from down to up state. The device's private open
1341 * function is invoked and then the multicast lists are loaded. Finally
1342 * the device is moved into the up state and a %NETDEV_UP message is
1343 * sent to the netdev notifier chain.
1345 * Calling this function on an active interface is a nop. On a failure
1346 * a negative errno code is returned.
1348 int dev_open(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
1352 if (dev
->flags
& IFF_UP
)
1355 ret
= __dev_open(dev
, extack
);
1359 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1360 call_netdevice_notifiers(NETDEV_UP
, dev
);
1364 EXPORT_SYMBOL(dev_open
);
1366 static void __dev_close_many(struct list_head
*head
)
1368 struct net_device
*dev
;
1373 list_for_each_entry(dev
, head
, close_list
) {
1374 /* Temporarily disable netpoll until the interface is down */
1375 netpoll_poll_disable(dev
);
1377 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1379 clear_bit(__LINK_STATE_START
, &dev
->state
);
1381 /* Synchronize to scheduled poll. We cannot touch poll list, it
1382 * can be even on different cpu. So just clear netif_running().
1384 * dev->stop() will invoke napi_disable() on all of it's
1385 * napi_struct instances on this device.
1387 smp_mb__after_atomic(); /* Commit netif_running(). */
1390 dev_deactivate_many(head
);
1392 list_for_each_entry(dev
, head
, close_list
) {
1393 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1396 * Call the device specific close. This cannot fail.
1397 * Only if device is UP
1399 * We allow it to be called even after a DETACH hot-plug
1405 dev
->flags
&= ~IFF_UP
;
1406 netpoll_poll_enable(dev
);
1410 static void __dev_close(struct net_device
*dev
)
1414 list_add(&dev
->close_list
, &single
);
1415 __dev_close_many(&single
);
1419 void dev_close_many(struct list_head
*head
, bool unlink
)
1421 struct net_device
*dev
, *tmp
;
1423 /* Remove the devices that don't need to be closed */
1424 list_for_each_entry_safe(dev
, tmp
, head
, close_list
)
1425 if (!(dev
->flags
& IFF_UP
))
1426 list_del_init(&dev
->close_list
);
1428 __dev_close_many(head
);
1430 list_for_each_entry_safe(dev
, tmp
, head
, close_list
) {
1431 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1432 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1434 list_del_init(&dev
->close_list
);
1437 EXPORT_SYMBOL(dev_close_many
);
1440 * dev_close - shutdown an interface.
1441 * @dev: device to shutdown
1443 * This function moves an active device into down state. A
1444 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1445 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1448 void dev_close(struct net_device
*dev
)
1450 if (dev
->flags
& IFF_UP
) {
1453 list_add(&dev
->close_list
, &single
);
1454 dev_close_many(&single
, true);
1458 EXPORT_SYMBOL(dev_close
);
1462 * dev_disable_lro - disable Large Receive Offload on a device
1465 * Disable Large Receive Offload (LRO) on a net device. Must be
1466 * called under RTNL. This is needed if received packets may be
1467 * forwarded to another interface.
1469 void dev_disable_lro(struct net_device
*dev
)
1471 struct net_device
*lower_dev
;
1472 struct list_head
*iter
;
1474 dev
->wanted_features
&= ~NETIF_F_LRO
;
1475 netdev_update_features(dev
);
1477 if (unlikely(dev
->features
& NETIF_F_LRO
))
1478 netdev_WARN(dev
, "failed to disable LRO!\n");
1480 netdev_for_each_lower_dev(dev
, lower_dev
, iter
)
1481 dev_disable_lro(lower_dev
);
1483 EXPORT_SYMBOL(dev_disable_lro
);
1486 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1489 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1490 * called under RTNL. This is needed if Generic XDP is installed on
1493 static void dev_disable_gro_hw(struct net_device
*dev
)
1495 dev
->wanted_features
&= ~NETIF_F_GRO_HW
;
1496 netdev_update_features(dev
);
1498 if (unlikely(dev
->features
& NETIF_F_GRO_HW
))
1499 netdev_WARN(dev
, "failed to disable GRO_HW!\n");
1502 const char *netdev_cmd_to_name(enum netdev_cmd cmd
)
1505 case NETDEV_##val: \
1506 return "NETDEV_" __stringify(val);
1508 N(UP
) N(DOWN
) N(REBOOT
) N(CHANGE
) N(REGISTER
) N(UNREGISTER
)
1509 N(CHANGEMTU
) N(CHANGEADDR
) N(GOING_DOWN
) N(CHANGENAME
) N(FEAT_CHANGE
)
1510 N(BONDING_FAILOVER
) N(PRE_UP
) N(PRE_TYPE_CHANGE
) N(POST_TYPE_CHANGE
)
1511 N(POST_INIT
) N(RELEASE
) N(NOTIFY_PEERS
) N(JOIN
) N(CHANGEUPPER
)
1512 N(RESEND_IGMP
) N(PRECHANGEMTU
) N(CHANGEINFODATA
) N(BONDING_INFO
)
1513 N(PRECHANGEUPPER
) N(CHANGELOWERSTATE
) N(UDP_TUNNEL_PUSH_INFO
)
1514 N(UDP_TUNNEL_DROP_INFO
) N(CHANGE_TX_QUEUE_LEN
)
1515 N(CVLAN_FILTER_PUSH_INFO
) N(CVLAN_FILTER_DROP_INFO
)
1516 N(SVLAN_FILTER_PUSH_INFO
) N(SVLAN_FILTER_DROP_INFO
)
1520 return "UNKNOWN_NETDEV_EVENT";
1522 EXPORT_SYMBOL_GPL(netdev_cmd_to_name
);
1524 static int call_netdevice_notifier(struct notifier_block
*nb
, unsigned long val
,
1525 struct net_device
*dev
)
1527 struct netdev_notifier_info info
= {
1531 return nb
->notifier_call(nb
, val
, &info
);
1534 static int dev_boot_phase
= 1;
1537 * register_netdevice_notifier - register a network notifier block
1540 * Register a notifier to be called when network device events occur.
1541 * The notifier passed is linked into the kernel structures and must
1542 * not be reused until it has been unregistered. A negative errno code
1543 * is returned on a failure.
1545 * When registered all registration and up events are replayed
1546 * to the new notifier to allow device to have a race free
1547 * view of the network device list.
1550 int register_netdevice_notifier(struct notifier_block
*nb
)
1552 struct net_device
*dev
;
1553 struct net_device
*last
;
1557 /* Close race with setup_net() and cleanup_net() */
1558 down_write(&pernet_ops_rwsem
);
1560 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1566 for_each_netdev(net
, dev
) {
1567 err
= call_netdevice_notifier(nb
, NETDEV_REGISTER
, dev
);
1568 err
= notifier_to_errno(err
);
1572 if (!(dev
->flags
& IFF_UP
))
1575 call_netdevice_notifier(nb
, NETDEV_UP
, dev
);
1581 up_write(&pernet_ops_rwsem
);
1587 for_each_netdev(net
, dev
) {
1591 if (dev
->flags
& IFF_UP
) {
1592 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1594 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1596 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1601 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1604 EXPORT_SYMBOL(register_netdevice_notifier
);
1607 * unregister_netdevice_notifier - unregister a network notifier block
1610 * Unregister a notifier previously registered by
1611 * register_netdevice_notifier(). The notifier is unlinked into the
1612 * kernel structures and may then be reused. A negative errno code
1613 * is returned on a failure.
1615 * After unregistering unregister and down device events are synthesized
1616 * for all devices on the device list to the removed notifier to remove
1617 * the need for special case cleanup code.
1620 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1622 struct net_device
*dev
;
1626 /* Close race with setup_net() and cleanup_net() */
1627 down_write(&pernet_ops_rwsem
);
1629 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1634 for_each_netdev(net
, dev
) {
1635 if (dev
->flags
& IFF_UP
) {
1636 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1638 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1640 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1645 up_write(&pernet_ops_rwsem
);
1648 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1651 * call_netdevice_notifiers_info - call all network notifier blocks
1652 * @val: value passed unmodified to notifier function
1653 * @info: notifier information data
1655 * Call all network notifier blocks. Parameters and return value
1656 * are as for raw_notifier_call_chain().
1659 static int call_netdevice_notifiers_info(unsigned long val
,
1660 struct netdev_notifier_info
*info
)
1663 return raw_notifier_call_chain(&netdev_chain
, val
, info
);
1666 static int call_netdevice_notifiers_extack(unsigned long val
,
1667 struct net_device
*dev
,
1668 struct netlink_ext_ack
*extack
)
1670 struct netdev_notifier_info info
= {
1675 return call_netdevice_notifiers_info(val
, &info
);
1679 * call_netdevice_notifiers - call all network notifier blocks
1680 * @val: value passed unmodified to notifier function
1681 * @dev: net_device pointer passed unmodified to notifier function
1683 * Call all network notifier blocks. Parameters and return value
1684 * are as for raw_notifier_call_chain().
1687 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
1689 return call_netdevice_notifiers_extack(val
, dev
, NULL
);
1691 EXPORT_SYMBOL(call_netdevice_notifiers
);
1694 * call_netdevice_notifiers_mtu - call all network notifier blocks
1695 * @val: value passed unmodified to notifier function
1696 * @dev: net_device pointer passed unmodified to notifier function
1697 * @arg: additional u32 argument passed to the notifier function
1699 * Call all network notifier blocks. Parameters and return value
1700 * are as for raw_notifier_call_chain().
1702 static int call_netdevice_notifiers_mtu(unsigned long val
,
1703 struct net_device
*dev
, u32 arg
)
1705 struct netdev_notifier_info_ext info
= {
1710 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext
, info
) != 0);
1712 return call_netdevice_notifiers_info(val
, &info
.info
);
1715 #ifdef CONFIG_NET_INGRESS
1716 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key
);
1718 void net_inc_ingress_queue(void)
1720 static_branch_inc(&ingress_needed_key
);
1722 EXPORT_SYMBOL_GPL(net_inc_ingress_queue
);
1724 void net_dec_ingress_queue(void)
1726 static_branch_dec(&ingress_needed_key
);
1728 EXPORT_SYMBOL_GPL(net_dec_ingress_queue
);
1731 #ifdef CONFIG_NET_EGRESS
1732 static DEFINE_STATIC_KEY_FALSE(egress_needed_key
);
1734 void net_inc_egress_queue(void)
1736 static_branch_inc(&egress_needed_key
);
1738 EXPORT_SYMBOL_GPL(net_inc_egress_queue
);
1740 void net_dec_egress_queue(void)
1742 static_branch_dec(&egress_needed_key
);
1744 EXPORT_SYMBOL_GPL(net_dec_egress_queue
);
1747 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key
);
1748 #ifdef CONFIG_JUMP_LABEL
1749 static atomic_t netstamp_needed_deferred
;
1750 static atomic_t netstamp_wanted
;
1751 static void netstamp_clear(struct work_struct
*work
)
1753 int deferred
= atomic_xchg(&netstamp_needed_deferred
, 0);
1756 wanted
= atomic_add_return(deferred
, &netstamp_wanted
);
1758 static_branch_enable(&netstamp_needed_key
);
1760 static_branch_disable(&netstamp_needed_key
);
1762 static DECLARE_WORK(netstamp_work
, netstamp_clear
);
1765 void net_enable_timestamp(void)
1767 #ifdef CONFIG_JUMP_LABEL
1771 wanted
= atomic_read(&netstamp_wanted
);
1774 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
+ 1) == wanted
)
1777 atomic_inc(&netstamp_needed_deferred
);
1778 schedule_work(&netstamp_work
);
1780 static_branch_inc(&netstamp_needed_key
);
1783 EXPORT_SYMBOL(net_enable_timestamp
);
1785 void net_disable_timestamp(void)
1787 #ifdef CONFIG_JUMP_LABEL
1791 wanted
= atomic_read(&netstamp_wanted
);
1794 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
- 1) == wanted
)
1797 atomic_dec(&netstamp_needed_deferred
);
1798 schedule_work(&netstamp_work
);
1800 static_branch_dec(&netstamp_needed_key
);
1803 EXPORT_SYMBOL(net_disable_timestamp
);
1805 static inline void net_timestamp_set(struct sk_buff
*skb
)
1808 if (static_branch_unlikely(&netstamp_needed_key
))
1809 __net_timestamp(skb
);
1812 #define net_timestamp_check(COND, SKB) \
1813 if (static_branch_unlikely(&netstamp_needed_key)) { \
1814 if ((COND) && !(SKB)->tstamp) \
1815 __net_timestamp(SKB); \
1818 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
1822 if (!(dev
->flags
& IFF_UP
))
1825 len
= dev
->mtu
+ dev
->hard_header_len
+ VLAN_HLEN
;
1826 if (skb
->len
<= len
)
1829 /* if TSO is enabled, we don't care about the length as the packet
1830 * could be forwarded without being segmented before
1832 if (skb_is_gso(skb
))
1837 EXPORT_SYMBOL_GPL(is_skb_forwardable
);
1839 int __dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1841 int ret
= ____dev_forward_skb(dev
, skb
);
1844 skb
->protocol
= eth_type_trans(skb
, dev
);
1845 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
1850 EXPORT_SYMBOL_GPL(__dev_forward_skb
);
1853 * dev_forward_skb - loopback an skb to another netif
1855 * @dev: destination network device
1856 * @skb: buffer to forward
1859 * NET_RX_SUCCESS (no congestion)
1860 * NET_RX_DROP (packet was dropped, but freed)
1862 * dev_forward_skb can be used for injecting an skb from the
1863 * start_xmit function of one device into the receive queue
1864 * of another device.
1866 * The receiving device may be in another namespace, so
1867 * we have to clear all information in the skb that could
1868 * impact namespace isolation.
1870 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1872 return __dev_forward_skb(dev
, skb
) ?: netif_rx_internal(skb
);
1874 EXPORT_SYMBOL_GPL(dev_forward_skb
);
1876 static inline int deliver_skb(struct sk_buff
*skb
,
1877 struct packet_type
*pt_prev
,
1878 struct net_device
*orig_dev
)
1880 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
1882 refcount_inc(&skb
->users
);
1883 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
1886 static inline void deliver_ptype_list_skb(struct sk_buff
*skb
,
1887 struct packet_type
**pt
,
1888 struct net_device
*orig_dev
,
1890 struct list_head
*ptype_list
)
1892 struct packet_type
*ptype
, *pt_prev
= *pt
;
1894 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1895 if (ptype
->type
!= type
)
1898 deliver_skb(skb
, pt_prev
, orig_dev
);
1904 static inline bool skb_loop_sk(struct packet_type
*ptype
, struct sk_buff
*skb
)
1906 if (!ptype
->af_packet_priv
|| !skb
->sk
)
1909 if (ptype
->id_match
)
1910 return ptype
->id_match(ptype
, skb
->sk
);
1911 else if ((struct sock
*)ptype
->af_packet_priv
== skb
->sk
)
1918 * dev_nit_active - return true if any network interface taps are in use
1920 * @dev: network device to check for the presence of taps
1922 bool dev_nit_active(struct net_device
*dev
)
1924 return !list_empty(&ptype_all
) || !list_empty(&dev
->ptype_all
);
1926 EXPORT_SYMBOL_GPL(dev_nit_active
);
1929 * Support routine. Sends outgoing frames to any network
1930 * taps currently in use.
1933 void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
1935 struct packet_type
*ptype
;
1936 struct sk_buff
*skb2
= NULL
;
1937 struct packet_type
*pt_prev
= NULL
;
1938 struct list_head
*ptype_list
= &ptype_all
;
1942 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1943 if (ptype
->ignore_outgoing
)
1946 /* Never send packets back to the socket
1947 * they originated from - MvS (miquels@drinkel.ow.org)
1949 if (skb_loop_sk(ptype
, skb
))
1953 deliver_skb(skb2
, pt_prev
, skb
->dev
);
1958 /* need to clone skb, done only once */
1959 skb2
= skb_clone(skb
, GFP_ATOMIC
);
1963 net_timestamp_set(skb2
);
1965 /* skb->nh should be correctly
1966 * set by sender, so that the second statement is
1967 * just protection against buggy protocols.
1969 skb_reset_mac_header(skb2
);
1971 if (skb_network_header(skb2
) < skb2
->data
||
1972 skb_network_header(skb2
) > skb_tail_pointer(skb2
)) {
1973 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1974 ntohs(skb2
->protocol
),
1976 skb_reset_network_header(skb2
);
1979 skb2
->transport_header
= skb2
->network_header
;
1980 skb2
->pkt_type
= PACKET_OUTGOING
;
1984 if (ptype_list
== &ptype_all
) {
1985 ptype_list
= &dev
->ptype_all
;
1990 if (!skb_orphan_frags_rx(skb2
, GFP_ATOMIC
))
1991 pt_prev
->func(skb2
, skb
->dev
, pt_prev
, skb
->dev
);
1997 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit
);
2000 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2001 * @dev: Network device
2002 * @txq: number of queues available
2004 * If real_num_tx_queues is changed the tc mappings may no longer be
2005 * valid. To resolve this verify the tc mapping remains valid and if
2006 * not NULL the mapping. With no priorities mapping to this
2007 * offset/count pair it will no longer be used. In the worst case TC0
2008 * is invalid nothing can be done so disable priority mappings. If is
2009 * expected that drivers will fix this mapping if they can before
2010 * calling netif_set_real_num_tx_queues.
2012 static void netif_setup_tc(struct net_device
*dev
, unsigned int txq
)
2015 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2017 /* If TC0 is invalidated disable TC mapping */
2018 if (tc
->offset
+ tc
->count
> txq
) {
2019 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2024 /* Invalidated prio to tc mappings set to TC0 */
2025 for (i
= 1; i
< TC_BITMASK
+ 1; i
++) {
2026 int q
= netdev_get_prio_tc_map(dev
, i
);
2028 tc
= &dev
->tc_to_txq
[q
];
2029 if (tc
->offset
+ tc
->count
> txq
) {
2030 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2032 netdev_set_prio_tc_map(dev
, i
, 0);
2037 int netdev_txq_to_tc(struct net_device
*dev
, unsigned int txq
)
2040 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2043 /* walk through the TCs and see if it falls into any of them */
2044 for (i
= 0; i
< TC_MAX_QUEUE
; i
++, tc
++) {
2045 if ((txq
- tc
->offset
) < tc
->count
)
2049 /* didn't find it, just return -1 to indicate no match */
2055 EXPORT_SYMBOL(netdev_txq_to_tc
);
2058 struct static_key xps_needed __read_mostly
;
2059 EXPORT_SYMBOL(xps_needed
);
2060 struct static_key xps_rxqs_needed __read_mostly
;
2061 EXPORT_SYMBOL(xps_rxqs_needed
);
2062 static DEFINE_MUTEX(xps_map_mutex
);
2063 #define xmap_dereference(P) \
2064 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2066 static bool remove_xps_queue(struct xps_dev_maps
*dev_maps
,
2069 struct xps_map
*map
= NULL
;
2073 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2077 for (pos
= map
->len
; pos
--;) {
2078 if (map
->queues
[pos
] != index
)
2082 map
->queues
[pos
] = map
->queues
[--map
->len
];
2086 RCU_INIT_POINTER(dev_maps
->attr_map
[tci
], NULL
);
2087 kfree_rcu(map
, rcu
);
2094 static bool remove_xps_queue_cpu(struct net_device
*dev
,
2095 struct xps_dev_maps
*dev_maps
,
2096 int cpu
, u16 offset
, u16 count
)
2098 int num_tc
= dev
->num_tc
? : 1;
2099 bool active
= false;
2102 for (tci
= cpu
* num_tc
; num_tc
--; tci
++) {
2105 for (i
= count
, j
= offset
; i
--; j
++) {
2106 if (!remove_xps_queue(dev_maps
, tci
, j
))
2116 static void reset_xps_maps(struct net_device
*dev
,
2117 struct xps_dev_maps
*dev_maps
,
2121 static_key_slow_dec_cpuslocked(&xps_rxqs_needed
);
2122 RCU_INIT_POINTER(dev
->xps_rxqs_map
, NULL
);
2124 RCU_INIT_POINTER(dev
->xps_cpus_map
, NULL
);
2126 static_key_slow_dec_cpuslocked(&xps_needed
);
2127 kfree_rcu(dev_maps
, rcu
);
2130 static void clean_xps_maps(struct net_device
*dev
, const unsigned long *mask
,
2131 struct xps_dev_maps
*dev_maps
, unsigned int nr_ids
,
2132 u16 offset
, u16 count
, bool is_rxqs_map
)
2134 bool active
= false;
2137 for (j
= -1; j
= netif_attrmask_next(j
, mask
, nr_ids
),
2139 active
|= remove_xps_queue_cpu(dev
, dev_maps
, j
, offset
,
2142 reset_xps_maps(dev
, dev_maps
, is_rxqs_map
);
2145 for (i
= offset
+ (count
- 1); count
--; i
--) {
2146 netdev_queue_numa_node_write(
2147 netdev_get_tx_queue(dev
, i
),
2153 static void netif_reset_xps_queues(struct net_device
*dev
, u16 offset
,
2156 const unsigned long *possible_mask
= NULL
;
2157 struct xps_dev_maps
*dev_maps
;
2158 unsigned int nr_ids
;
2160 if (!static_key_false(&xps_needed
))
2164 mutex_lock(&xps_map_mutex
);
2166 if (static_key_false(&xps_rxqs_needed
)) {
2167 dev_maps
= xmap_dereference(dev
->xps_rxqs_map
);
2169 nr_ids
= dev
->num_rx_queues
;
2170 clean_xps_maps(dev
, possible_mask
, dev_maps
, nr_ids
,
2171 offset
, count
, true);
2175 dev_maps
= xmap_dereference(dev
->xps_cpus_map
);
2179 if (num_possible_cpus() > 1)
2180 possible_mask
= cpumask_bits(cpu_possible_mask
);
2181 nr_ids
= nr_cpu_ids
;
2182 clean_xps_maps(dev
, possible_mask
, dev_maps
, nr_ids
, offset
, count
,
2186 mutex_unlock(&xps_map_mutex
);
2190 static void netif_reset_xps_queues_gt(struct net_device
*dev
, u16 index
)
2192 netif_reset_xps_queues(dev
, index
, dev
->num_tx_queues
- index
);
2195 static struct xps_map
*expand_xps_map(struct xps_map
*map
, int attr_index
,
2196 u16 index
, bool is_rxqs_map
)
2198 struct xps_map
*new_map
;
2199 int alloc_len
= XPS_MIN_MAP_ALLOC
;
2202 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
2203 if (map
->queues
[pos
] != index
)
2208 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2210 if (pos
< map
->alloc_len
)
2213 alloc_len
= map
->alloc_len
* 2;
2216 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2220 new_map
= kzalloc(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
);
2222 new_map
= kzalloc_node(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
,
2223 cpu_to_node(attr_index
));
2227 for (i
= 0; i
< pos
; i
++)
2228 new_map
->queues
[i
] = map
->queues
[i
];
2229 new_map
->alloc_len
= alloc_len
;
2235 /* Must be called under cpus_read_lock */
2236 int __netif_set_xps_queue(struct net_device
*dev
, const unsigned long *mask
,
2237 u16 index
, bool is_rxqs_map
)
2239 const unsigned long *online_mask
= NULL
, *possible_mask
= NULL
;
2240 struct xps_dev_maps
*dev_maps
, *new_dev_maps
= NULL
;
2241 int i
, j
, tci
, numa_node_id
= -2;
2242 int maps_sz
, num_tc
= 1, tc
= 0;
2243 struct xps_map
*map
, *new_map
;
2244 bool active
= false;
2245 unsigned int nr_ids
;
2248 /* Do not allow XPS on subordinate device directly */
2249 num_tc
= dev
->num_tc
;
2253 /* If queue belongs to subordinate dev use its map */
2254 dev
= netdev_get_tx_queue(dev
, index
)->sb_dev
? : dev
;
2256 tc
= netdev_txq_to_tc(dev
, index
);
2261 mutex_lock(&xps_map_mutex
);
2263 maps_sz
= XPS_RXQ_DEV_MAPS_SIZE(num_tc
, dev
->num_rx_queues
);
2264 dev_maps
= xmap_dereference(dev
->xps_rxqs_map
);
2265 nr_ids
= dev
->num_rx_queues
;
2267 maps_sz
= XPS_CPU_DEV_MAPS_SIZE(num_tc
);
2268 if (num_possible_cpus() > 1) {
2269 online_mask
= cpumask_bits(cpu_online_mask
);
2270 possible_mask
= cpumask_bits(cpu_possible_mask
);
2272 dev_maps
= xmap_dereference(dev
->xps_cpus_map
);
2273 nr_ids
= nr_cpu_ids
;
2276 if (maps_sz
< L1_CACHE_BYTES
)
2277 maps_sz
= L1_CACHE_BYTES
;
2279 /* allocate memory for queue storage */
2280 for (j
= -1; j
= netif_attrmask_next_and(j
, online_mask
, mask
, nr_ids
),
2283 new_dev_maps
= kzalloc(maps_sz
, GFP_KERNEL
);
2284 if (!new_dev_maps
) {
2285 mutex_unlock(&xps_map_mutex
);
2289 tci
= j
* num_tc
+ tc
;
2290 map
= dev_maps
? xmap_dereference(dev_maps
->attr_map
[tci
]) :
2293 map
= expand_xps_map(map
, j
, index
, is_rxqs_map
);
2297 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2301 goto out_no_new_maps
;
2304 /* Increment static keys at most once per type */
2305 static_key_slow_inc_cpuslocked(&xps_needed
);
2307 static_key_slow_inc_cpuslocked(&xps_rxqs_needed
);
2310 for (j
= -1; j
= netif_attrmask_next(j
, possible_mask
, nr_ids
),
2312 /* copy maps belonging to foreign traffic classes */
2313 for (i
= tc
, tci
= j
* num_tc
; dev_maps
&& i
--; tci
++) {
2314 /* fill in the new device map from the old device map */
2315 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2316 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2319 /* We need to explicitly update tci as prevous loop
2320 * could break out early if dev_maps is NULL.
2322 tci
= j
* num_tc
+ tc
;
2324 if (netif_attr_test_mask(j
, mask
, nr_ids
) &&
2325 netif_attr_test_online(j
, online_mask
, nr_ids
)) {
2326 /* add tx-queue to CPU/rx-queue maps */
2329 map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2330 while ((pos
< map
->len
) && (map
->queues
[pos
] != index
))
2333 if (pos
== map
->len
)
2334 map
->queues
[map
->len
++] = index
;
2337 if (numa_node_id
== -2)
2338 numa_node_id
= cpu_to_node(j
);
2339 else if (numa_node_id
!= cpu_to_node(j
))
2343 } else if (dev_maps
) {
2344 /* fill in the new device map from the old device map */
2345 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2346 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2349 /* copy maps belonging to foreign traffic classes */
2350 for (i
= num_tc
- tc
, tci
++; dev_maps
&& --i
; tci
++) {
2351 /* fill in the new device map from the old device map */
2352 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2353 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2358 rcu_assign_pointer(dev
->xps_rxqs_map
, new_dev_maps
);
2360 rcu_assign_pointer(dev
->xps_cpus_map
, new_dev_maps
);
2362 /* Cleanup old maps */
2364 goto out_no_old_maps
;
2366 for (j
= -1; j
= netif_attrmask_next(j
, possible_mask
, nr_ids
),
2368 for (i
= num_tc
, tci
= j
* num_tc
; i
--; tci
++) {
2369 new_map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2370 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2371 if (map
&& map
!= new_map
)
2372 kfree_rcu(map
, rcu
);
2376 kfree_rcu(dev_maps
, rcu
);
2379 dev_maps
= new_dev_maps
;
2384 /* update Tx queue numa node */
2385 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, index
),
2386 (numa_node_id
>= 0) ?
2387 numa_node_id
: NUMA_NO_NODE
);
2393 /* removes tx-queue from unused CPUs/rx-queues */
2394 for (j
= -1; j
= netif_attrmask_next(j
, possible_mask
, nr_ids
),
2396 for (i
= tc
, tci
= j
* num_tc
; i
--; tci
++)
2397 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2398 if (!netif_attr_test_mask(j
, mask
, nr_ids
) ||
2399 !netif_attr_test_online(j
, online_mask
, nr_ids
))
2400 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2401 for (i
= num_tc
- tc
, tci
++; --i
; tci
++)
2402 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2405 /* free map if not active */
2407 reset_xps_maps(dev
, dev_maps
, is_rxqs_map
);
2410 mutex_unlock(&xps_map_mutex
);
2414 /* remove any maps that we added */
2415 for (j
= -1; j
= netif_attrmask_next(j
, possible_mask
, nr_ids
),
2417 for (i
= num_tc
, tci
= j
* num_tc
; i
--; tci
++) {
2418 new_map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2420 xmap_dereference(dev_maps
->attr_map
[tci
]) :
2422 if (new_map
&& new_map
!= map
)
2427 mutex_unlock(&xps_map_mutex
);
2429 kfree(new_dev_maps
);
2432 EXPORT_SYMBOL_GPL(__netif_set_xps_queue
);
2434 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
2440 ret
= __netif_set_xps_queue(dev
, cpumask_bits(mask
), index
, false);
2445 EXPORT_SYMBOL(netif_set_xps_queue
);
2448 static void netdev_unbind_all_sb_channels(struct net_device
*dev
)
2450 struct netdev_queue
*txq
= &dev
->_tx
[dev
->num_tx_queues
];
2452 /* Unbind any subordinate channels */
2453 while (txq
-- != &dev
->_tx
[0]) {
2455 netdev_unbind_sb_channel(dev
, txq
->sb_dev
);
2459 void netdev_reset_tc(struct net_device
*dev
)
2462 netif_reset_xps_queues_gt(dev
, 0);
2464 netdev_unbind_all_sb_channels(dev
);
2466 /* Reset TC configuration of device */
2468 memset(dev
->tc_to_txq
, 0, sizeof(dev
->tc_to_txq
));
2469 memset(dev
->prio_tc_map
, 0, sizeof(dev
->prio_tc_map
));
2471 EXPORT_SYMBOL(netdev_reset_tc
);
2473 int netdev_set_tc_queue(struct net_device
*dev
, u8 tc
, u16 count
, u16 offset
)
2475 if (tc
>= dev
->num_tc
)
2479 netif_reset_xps_queues(dev
, offset
, count
);
2481 dev
->tc_to_txq
[tc
].count
= count
;
2482 dev
->tc_to_txq
[tc
].offset
= offset
;
2485 EXPORT_SYMBOL(netdev_set_tc_queue
);
2487 int netdev_set_num_tc(struct net_device
*dev
, u8 num_tc
)
2489 if (num_tc
> TC_MAX_QUEUE
)
2493 netif_reset_xps_queues_gt(dev
, 0);
2495 netdev_unbind_all_sb_channels(dev
);
2497 dev
->num_tc
= num_tc
;
2500 EXPORT_SYMBOL(netdev_set_num_tc
);
2502 void netdev_unbind_sb_channel(struct net_device
*dev
,
2503 struct net_device
*sb_dev
)
2505 struct netdev_queue
*txq
= &dev
->_tx
[dev
->num_tx_queues
];
2508 netif_reset_xps_queues_gt(sb_dev
, 0);
2510 memset(sb_dev
->tc_to_txq
, 0, sizeof(sb_dev
->tc_to_txq
));
2511 memset(sb_dev
->prio_tc_map
, 0, sizeof(sb_dev
->prio_tc_map
));
2513 while (txq
-- != &dev
->_tx
[0]) {
2514 if (txq
->sb_dev
== sb_dev
)
2518 EXPORT_SYMBOL(netdev_unbind_sb_channel
);
2520 int netdev_bind_sb_channel_queue(struct net_device
*dev
,
2521 struct net_device
*sb_dev
,
2522 u8 tc
, u16 count
, u16 offset
)
2524 /* Make certain the sb_dev and dev are already configured */
2525 if (sb_dev
->num_tc
>= 0 || tc
>= dev
->num_tc
)
2528 /* We cannot hand out queues we don't have */
2529 if ((offset
+ count
) > dev
->real_num_tx_queues
)
2532 /* Record the mapping */
2533 sb_dev
->tc_to_txq
[tc
].count
= count
;
2534 sb_dev
->tc_to_txq
[tc
].offset
= offset
;
2536 /* Provide a way for Tx queue to find the tc_to_txq map or
2537 * XPS map for itself.
2540 netdev_get_tx_queue(dev
, count
+ offset
)->sb_dev
= sb_dev
;
2544 EXPORT_SYMBOL(netdev_bind_sb_channel_queue
);
2546 int netdev_set_sb_channel(struct net_device
*dev
, u16 channel
)
2548 /* Do not use a multiqueue device to represent a subordinate channel */
2549 if (netif_is_multiqueue(dev
))
2552 /* We allow channels 1 - 32767 to be used for subordinate channels.
2553 * Channel 0 is meant to be "native" mode and used only to represent
2554 * the main root device. We allow writing 0 to reset the device back
2555 * to normal mode after being used as a subordinate channel.
2557 if (channel
> S16_MAX
)
2560 dev
->num_tc
= -channel
;
2564 EXPORT_SYMBOL(netdev_set_sb_channel
);
2567 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2568 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2570 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
)
2575 disabling
= txq
< dev
->real_num_tx_queues
;
2577 if (txq
< 1 || txq
> dev
->num_tx_queues
)
2580 if (dev
->reg_state
== NETREG_REGISTERED
||
2581 dev
->reg_state
== NETREG_UNREGISTERING
) {
2584 rc
= netdev_queue_update_kobjects(dev
, dev
->real_num_tx_queues
,
2590 netif_setup_tc(dev
, txq
);
2592 dev_qdisc_change_real_num_tx(dev
, txq
);
2594 dev
->real_num_tx_queues
= txq
;
2598 qdisc_reset_all_tx_gt(dev
, txq
);
2600 netif_reset_xps_queues_gt(dev
, txq
);
2604 dev
->real_num_tx_queues
= txq
;
2609 EXPORT_SYMBOL(netif_set_real_num_tx_queues
);
2613 * netif_set_real_num_rx_queues - set actual number of RX queues used
2614 * @dev: Network device
2615 * @rxq: Actual number of RX queues
2617 * This must be called either with the rtnl_lock held or before
2618 * registration of the net device. Returns 0 on success, or a
2619 * negative error code. If called before registration, it always
2622 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
)
2626 if (rxq
< 1 || rxq
> dev
->num_rx_queues
)
2629 if (dev
->reg_state
== NETREG_REGISTERED
) {
2632 rc
= net_rx_queue_update_kobjects(dev
, dev
->real_num_rx_queues
,
2638 dev
->real_num_rx_queues
= rxq
;
2641 EXPORT_SYMBOL(netif_set_real_num_rx_queues
);
2645 * netif_get_num_default_rss_queues - default number of RSS queues
2647 * This routine should set an upper limit on the number of RSS queues
2648 * used by default by multiqueue devices.
2650 int netif_get_num_default_rss_queues(void)
2652 return is_kdump_kernel() ?
2653 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES
, num_online_cpus());
2655 EXPORT_SYMBOL(netif_get_num_default_rss_queues
);
2657 static void __netif_reschedule(struct Qdisc
*q
)
2659 struct softnet_data
*sd
;
2660 unsigned long flags
;
2662 local_irq_save(flags
);
2663 sd
= this_cpu_ptr(&softnet_data
);
2664 q
->next_sched
= NULL
;
2665 *sd
->output_queue_tailp
= q
;
2666 sd
->output_queue_tailp
= &q
->next_sched
;
2667 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2668 local_irq_restore(flags
);
2671 void __netif_schedule(struct Qdisc
*q
)
2673 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
2674 __netif_reschedule(q
);
2676 EXPORT_SYMBOL(__netif_schedule
);
2678 struct dev_kfree_skb_cb
{
2679 enum skb_free_reason reason
;
2682 static struct dev_kfree_skb_cb
*get_kfree_skb_cb(const struct sk_buff
*skb
)
2684 return (struct dev_kfree_skb_cb
*)skb
->cb
;
2687 void netif_schedule_queue(struct netdev_queue
*txq
)
2690 if (!(txq
->state
& QUEUE_STATE_ANY_XOFF
)) {
2691 struct Qdisc
*q
= rcu_dereference(txq
->qdisc
);
2693 __netif_schedule(q
);
2697 EXPORT_SYMBOL(netif_schedule_queue
);
2699 void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
2701 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
)) {
2705 q
= rcu_dereference(dev_queue
->qdisc
);
2706 __netif_schedule(q
);
2710 EXPORT_SYMBOL(netif_tx_wake_queue
);
2712 void __dev_kfree_skb_irq(struct sk_buff
*skb
, enum skb_free_reason reason
)
2714 unsigned long flags
;
2719 if (likely(refcount_read(&skb
->users
) == 1)) {
2721 refcount_set(&skb
->users
, 0);
2722 } else if (likely(!refcount_dec_and_test(&skb
->users
))) {
2725 get_kfree_skb_cb(skb
)->reason
= reason
;
2726 local_irq_save(flags
);
2727 skb
->next
= __this_cpu_read(softnet_data
.completion_queue
);
2728 __this_cpu_write(softnet_data
.completion_queue
, skb
);
2729 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2730 local_irq_restore(flags
);
2732 EXPORT_SYMBOL(__dev_kfree_skb_irq
);
2734 void __dev_kfree_skb_any(struct sk_buff
*skb
, enum skb_free_reason reason
)
2736 if (in_irq() || irqs_disabled())
2737 __dev_kfree_skb_irq(skb
, reason
);
2741 EXPORT_SYMBOL(__dev_kfree_skb_any
);
2745 * netif_device_detach - mark device as removed
2746 * @dev: network device
2748 * Mark device as removed from system and therefore no longer available.
2750 void netif_device_detach(struct net_device
*dev
)
2752 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2753 netif_running(dev
)) {
2754 netif_tx_stop_all_queues(dev
);
2757 EXPORT_SYMBOL(netif_device_detach
);
2760 * netif_device_attach - mark device as attached
2761 * @dev: network device
2763 * Mark device as attached from system and restart if needed.
2765 void netif_device_attach(struct net_device
*dev
)
2767 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2768 netif_running(dev
)) {
2769 netif_tx_wake_all_queues(dev
);
2770 __netdev_watchdog_up(dev
);
2773 EXPORT_SYMBOL(netif_device_attach
);
2776 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2777 * to be used as a distribution range.
2779 static u16
skb_tx_hash(const struct net_device
*dev
,
2780 const struct net_device
*sb_dev
,
2781 struct sk_buff
*skb
)
2785 u16 qcount
= dev
->real_num_tx_queues
;
2788 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
2790 qoffset
= sb_dev
->tc_to_txq
[tc
].offset
;
2791 qcount
= sb_dev
->tc_to_txq
[tc
].count
;
2792 if (unlikely(!qcount
)) {
2793 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
2794 sb_dev
->name
, qoffset
, tc
);
2796 qcount
= dev
->real_num_tx_queues
;
2800 if (skb_rx_queue_recorded(skb
)) {
2801 hash
= skb_get_rx_queue(skb
);
2802 if (hash
>= qoffset
)
2804 while (unlikely(hash
>= qcount
))
2806 return hash
+ qoffset
;
2809 return (u16
) reciprocal_scale(skb_get_hash(skb
), qcount
) + qoffset
;
2812 static void skb_warn_bad_offload(const struct sk_buff
*skb
)
2814 static const netdev_features_t null_features
;
2815 struct net_device
*dev
= skb
->dev
;
2816 const char *name
= "";
2818 if (!net_ratelimit())
2822 if (dev
->dev
.parent
)
2823 name
= dev_driver_string(dev
->dev
.parent
);
2825 name
= netdev_name(dev
);
2827 skb_dump(KERN_WARNING
, skb
, false);
2828 WARN(1, "%s: caps=(%pNF, %pNF)\n",
2829 name
, dev
? &dev
->features
: &null_features
,
2830 skb
->sk
? &skb
->sk
->sk_route_caps
: &null_features
);
2834 * Invalidate hardware checksum when packet is to be mangled, and
2835 * complete checksum manually on outgoing path.
2837 int skb_checksum_help(struct sk_buff
*skb
)
2840 int ret
= 0, offset
;
2842 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
2843 goto out_set_summed
;
2845 if (unlikely(skb_shinfo(skb
)->gso_size
)) {
2846 skb_warn_bad_offload(skb
);
2850 /* Before computing a checksum, we should make sure no frag could
2851 * be modified by an external entity : checksum could be wrong.
2853 if (skb_has_shared_frag(skb
)) {
2854 ret
= __skb_linearize(skb
);
2859 offset
= skb_checksum_start_offset(skb
);
2860 BUG_ON(offset
>= skb_headlen(skb
));
2861 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
2863 offset
+= skb
->csum_offset
;
2864 BUG_ON(offset
+ sizeof(__sum16
) > skb_headlen(skb
));
2866 if (skb_cloned(skb
) &&
2867 !skb_clone_writable(skb
, offset
+ sizeof(__sum16
))) {
2868 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2873 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
) ?: CSUM_MANGLED_0
;
2875 skb
->ip_summed
= CHECKSUM_NONE
;
2879 EXPORT_SYMBOL(skb_checksum_help
);
2881 int skb_crc32c_csum_help(struct sk_buff
*skb
)
2884 int ret
= 0, offset
, start
;
2886 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2889 if (unlikely(skb_is_gso(skb
)))
2892 /* Before computing a checksum, we should make sure no frag could
2893 * be modified by an external entity : checksum could be wrong.
2895 if (unlikely(skb_has_shared_frag(skb
))) {
2896 ret
= __skb_linearize(skb
);
2900 start
= skb_checksum_start_offset(skb
);
2901 offset
= start
+ offsetof(struct sctphdr
, checksum
);
2902 if (WARN_ON_ONCE(offset
>= skb_headlen(skb
))) {
2906 if (skb_cloned(skb
) &&
2907 !skb_clone_writable(skb
, offset
+ sizeof(__le32
))) {
2908 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2912 crc32c_csum
= cpu_to_le32(~__skb_checksum(skb
, start
,
2913 skb
->len
- start
, ~(__u32
)0,
2915 *(__le32
*)(skb
->data
+ offset
) = crc32c_csum
;
2916 skb
->ip_summed
= CHECKSUM_NONE
;
2917 skb
->csum_not_inet
= 0;
2922 __be16
skb_network_protocol(struct sk_buff
*skb
, int *depth
)
2924 __be16 type
= skb
->protocol
;
2926 /* Tunnel gso handlers can set protocol to ethernet. */
2927 if (type
== htons(ETH_P_TEB
)) {
2930 if (unlikely(!pskb_may_pull(skb
, sizeof(struct ethhdr
))))
2933 eth
= (struct ethhdr
*)skb
->data
;
2934 type
= eth
->h_proto
;
2937 return __vlan_get_protocol(skb
, type
, depth
);
2941 * skb_mac_gso_segment - mac layer segmentation handler.
2942 * @skb: buffer to segment
2943 * @features: features for the output path (see dev->features)
2945 struct sk_buff
*skb_mac_gso_segment(struct sk_buff
*skb
,
2946 netdev_features_t features
)
2948 struct sk_buff
*segs
= ERR_PTR(-EPROTONOSUPPORT
);
2949 struct packet_offload
*ptype
;
2950 int vlan_depth
= skb
->mac_len
;
2951 __be16 type
= skb_network_protocol(skb
, &vlan_depth
);
2953 if (unlikely(!type
))
2954 return ERR_PTR(-EINVAL
);
2956 __skb_pull(skb
, vlan_depth
);
2959 list_for_each_entry_rcu(ptype
, &offload_base
, list
) {
2960 if (ptype
->type
== type
&& ptype
->callbacks
.gso_segment
) {
2961 segs
= ptype
->callbacks
.gso_segment(skb
, features
);
2967 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
2971 EXPORT_SYMBOL(skb_mac_gso_segment
);
2974 /* openvswitch calls this on rx path, so we need a different check.
2976 static inline bool skb_needs_check(struct sk_buff
*skb
, bool tx_path
)
2979 return skb
->ip_summed
!= CHECKSUM_PARTIAL
&&
2980 skb
->ip_summed
!= CHECKSUM_UNNECESSARY
;
2982 return skb
->ip_summed
== CHECKSUM_NONE
;
2986 * __skb_gso_segment - Perform segmentation on skb.
2987 * @skb: buffer to segment
2988 * @features: features for the output path (see dev->features)
2989 * @tx_path: whether it is called in TX path
2991 * This function segments the given skb and returns a list of segments.
2993 * It may return NULL if the skb requires no segmentation. This is
2994 * only possible when GSO is used for verifying header integrity.
2996 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
2998 struct sk_buff
*__skb_gso_segment(struct sk_buff
*skb
,
2999 netdev_features_t features
, bool tx_path
)
3001 struct sk_buff
*segs
;
3003 if (unlikely(skb_needs_check(skb
, tx_path
))) {
3006 /* We're going to init ->check field in TCP or UDP header */
3007 err
= skb_cow_head(skb
, 0);
3009 return ERR_PTR(err
);
3012 /* Only report GSO partial support if it will enable us to
3013 * support segmentation on this frame without needing additional
3016 if (features
& NETIF_F_GSO_PARTIAL
) {
3017 netdev_features_t partial_features
= NETIF_F_GSO_ROBUST
;
3018 struct net_device
*dev
= skb
->dev
;
3020 partial_features
|= dev
->features
& dev
->gso_partial_features
;
3021 if (!skb_gso_ok(skb
, features
| partial_features
))
3022 features
&= ~NETIF_F_GSO_PARTIAL
;
3025 BUILD_BUG_ON(SKB_SGO_CB_OFFSET
+
3026 sizeof(*SKB_GSO_CB(skb
)) > sizeof(skb
->cb
));
3028 SKB_GSO_CB(skb
)->mac_offset
= skb_headroom(skb
);
3029 SKB_GSO_CB(skb
)->encap_level
= 0;
3031 skb_reset_mac_header(skb
);
3032 skb_reset_mac_len(skb
);
3034 segs
= skb_mac_gso_segment(skb
, features
);
3036 if (unlikely(skb_needs_check(skb
, tx_path
) && !IS_ERR(segs
)))
3037 skb_warn_bad_offload(skb
);
3041 EXPORT_SYMBOL(__skb_gso_segment
);
3043 /* Take action when hardware reception checksum errors are detected. */
3045 void netdev_rx_csum_fault(struct net_device
*dev
, struct sk_buff
*skb
)
3047 if (net_ratelimit()) {
3048 pr_err("%s: hw csum failure\n", dev
? dev
->name
: "<unknown>");
3049 skb_dump(KERN_ERR
, skb
, true);
3053 EXPORT_SYMBOL(netdev_rx_csum_fault
);
3056 /* XXX: check that highmem exists at all on the given machine. */
3057 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
3059 #ifdef CONFIG_HIGHMEM
3062 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
3063 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3064 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3066 if (PageHighMem(skb_frag_page(frag
)))
3074 /* If MPLS offload request, verify we are testing hardware MPLS features
3075 * instead of standard features for the netdev.
3077 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3078 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
3079 netdev_features_t features
,
3082 if (eth_p_mpls(type
))
3083 features
&= skb
->dev
->mpls_features
;
3088 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
3089 netdev_features_t features
,
3096 static netdev_features_t
harmonize_features(struct sk_buff
*skb
,
3097 netdev_features_t features
)
3102 type
= skb_network_protocol(skb
, &tmp
);
3103 features
= net_mpls_features(skb
, features
, type
);
3105 if (skb
->ip_summed
!= CHECKSUM_NONE
&&
3106 !can_checksum_protocol(features
, type
)) {
3107 features
&= ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
3109 if (illegal_highdma(skb
->dev
, skb
))
3110 features
&= ~NETIF_F_SG
;
3115 netdev_features_t
passthru_features_check(struct sk_buff
*skb
,
3116 struct net_device
*dev
,
3117 netdev_features_t features
)
3121 EXPORT_SYMBOL(passthru_features_check
);
3123 static netdev_features_t
dflt_features_check(struct sk_buff
*skb
,
3124 struct net_device
*dev
,
3125 netdev_features_t features
)
3127 return vlan_features_check(skb
, features
);
3130 static netdev_features_t
gso_features_check(const struct sk_buff
*skb
,
3131 struct net_device
*dev
,
3132 netdev_features_t features
)
3134 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
;
3136 if (gso_segs
> dev
->gso_max_segs
)
3137 return features
& ~NETIF_F_GSO_MASK
;
3139 /* Support for GSO partial features requires software
3140 * intervention before we can actually process the packets
3141 * so we need to strip support for any partial features now
3142 * and we can pull them back in after we have partially
3143 * segmented the frame.
3145 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
))
3146 features
&= ~dev
->gso_partial_features
;
3148 /* Make sure to clear the IPv4 ID mangling feature if the
3149 * IPv4 header has the potential to be fragmented.
3151 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
3152 struct iphdr
*iph
= skb
->encapsulation
?
3153 inner_ip_hdr(skb
) : ip_hdr(skb
);
3155 if (!(iph
->frag_off
& htons(IP_DF
)))
3156 features
&= ~NETIF_F_TSO_MANGLEID
;
3162 netdev_features_t
netif_skb_features(struct sk_buff
*skb
)
3164 struct net_device
*dev
= skb
->dev
;
3165 netdev_features_t features
= dev
->features
;
3167 if (skb_is_gso(skb
))
3168 features
= gso_features_check(skb
, dev
, features
);
3170 /* If encapsulation offload request, verify we are testing
3171 * hardware encapsulation features instead of standard
3172 * features for the netdev
3174 if (skb
->encapsulation
)
3175 features
&= dev
->hw_enc_features
;
3177 if (skb_vlan_tagged(skb
))
3178 features
= netdev_intersect_features(features
,
3179 dev
->vlan_features
|
3180 NETIF_F_HW_VLAN_CTAG_TX
|
3181 NETIF_F_HW_VLAN_STAG_TX
);
3183 if (dev
->netdev_ops
->ndo_features_check
)
3184 features
&= dev
->netdev_ops
->ndo_features_check(skb
, dev
,
3187 features
&= dflt_features_check(skb
, dev
, features
);
3189 return harmonize_features(skb
, features
);
3191 EXPORT_SYMBOL(netif_skb_features
);
3193 static int xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
3194 struct netdev_queue
*txq
, bool more
)
3199 if (dev_nit_active(dev
))
3200 dev_queue_xmit_nit(skb
, dev
);
3203 trace_net_dev_start_xmit(skb
, dev
);
3204 rc
= netdev_start_xmit(skb
, dev
, txq
, more
);
3205 trace_net_dev_xmit(skb
, rc
, dev
, len
);
3210 struct sk_buff
*dev_hard_start_xmit(struct sk_buff
*first
, struct net_device
*dev
,
3211 struct netdev_queue
*txq
, int *ret
)
3213 struct sk_buff
*skb
= first
;
3214 int rc
= NETDEV_TX_OK
;
3217 struct sk_buff
*next
= skb
->next
;
3219 skb_mark_not_on_list(skb
);
3220 rc
= xmit_one(skb
, dev
, txq
, next
!= NULL
);
3221 if (unlikely(!dev_xmit_complete(rc
))) {
3227 if (netif_tx_queue_stopped(txq
) && skb
) {
3228 rc
= NETDEV_TX_BUSY
;
3238 static struct sk_buff
*validate_xmit_vlan(struct sk_buff
*skb
,
3239 netdev_features_t features
)
3241 if (skb_vlan_tag_present(skb
) &&
3242 !vlan_hw_offload_capable(features
, skb
->vlan_proto
))
3243 skb
= __vlan_hwaccel_push_inside(skb
);
3247 int skb_csum_hwoffload_help(struct sk_buff
*skb
,
3248 const netdev_features_t features
)
3250 if (unlikely(skb
->csum_not_inet
))
3251 return !!(features
& NETIF_F_SCTP_CRC
) ? 0 :
3252 skb_crc32c_csum_help(skb
);
3254 return !!(features
& NETIF_F_CSUM_MASK
) ? 0 : skb_checksum_help(skb
);
3256 EXPORT_SYMBOL(skb_csum_hwoffload_help
);
3258 static struct sk_buff
*validate_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3260 netdev_features_t features
;
3262 features
= netif_skb_features(skb
);
3263 skb
= validate_xmit_vlan(skb
, features
);
3267 skb
= sk_validate_xmit_skb(skb
, dev
);
3271 if (netif_needs_gso(skb
, features
)) {
3272 struct sk_buff
*segs
;
3274 segs
= skb_gso_segment(skb
, features
);
3282 if (skb_needs_linearize(skb
, features
) &&
3283 __skb_linearize(skb
))
3286 /* If packet is not checksummed and device does not
3287 * support checksumming for this protocol, complete
3288 * checksumming here.
3290 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3291 if (skb
->encapsulation
)
3292 skb_set_inner_transport_header(skb
,
3293 skb_checksum_start_offset(skb
));
3295 skb_set_transport_header(skb
,
3296 skb_checksum_start_offset(skb
));
3297 if (skb_csum_hwoffload_help(skb
, features
))
3302 skb
= validate_xmit_xfrm(skb
, features
, again
);
3309 atomic_long_inc(&dev
->tx_dropped
);
3313 struct sk_buff
*validate_xmit_skb_list(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3315 struct sk_buff
*next
, *head
= NULL
, *tail
;
3317 for (; skb
!= NULL
; skb
= next
) {
3319 skb_mark_not_on_list(skb
);
3321 /* in case skb wont be segmented, point to itself */
3324 skb
= validate_xmit_skb(skb
, dev
, again
);
3332 /* If skb was segmented, skb->prev points to
3333 * the last segment. If not, it still contains skb.
3339 EXPORT_SYMBOL_GPL(validate_xmit_skb_list
);
3341 static void qdisc_pkt_len_init(struct sk_buff
*skb
)
3343 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
3345 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
3347 /* To get more precise estimation of bytes sent on wire,
3348 * we add to pkt_len the headers size of all segments
3350 if (shinfo
->gso_size
&& skb_transport_header_was_set(skb
)) {
3351 unsigned int hdr_len
;
3352 u16 gso_segs
= shinfo
->gso_segs
;
3354 /* mac layer + network layer */
3355 hdr_len
= skb_transport_header(skb
) - skb_mac_header(skb
);
3357 /* + transport layer */
3358 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))) {
3359 const struct tcphdr
*th
;
3360 struct tcphdr _tcphdr
;
3362 th
= skb_header_pointer(skb
, skb_transport_offset(skb
),
3363 sizeof(_tcphdr
), &_tcphdr
);
3365 hdr_len
+= __tcp_hdrlen(th
);
3367 struct udphdr _udphdr
;
3369 if (skb_header_pointer(skb
, skb_transport_offset(skb
),
3370 sizeof(_udphdr
), &_udphdr
))
3371 hdr_len
+= sizeof(struct udphdr
);
3374 if (shinfo
->gso_type
& SKB_GSO_DODGY
)
3375 gso_segs
= DIV_ROUND_UP(skb
->len
- hdr_len
,
3378 qdisc_skb_cb(skb
)->pkt_len
+= (gso_segs
- 1) * hdr_len
;
3382 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
3383 struct net_device
*dev
,
3384 struct netdev_queue
*txq
)
3386 spinlock_t
*root_lock
= qdisc_lock(q
);
3387 struct sk_buff
*to_free
= NULL
;
3391 qdisc_calculate_pkt_len(skb
, q
);
3393 if (q
->flags
& TCQ_F_NOLOCK
) {
3394 rc
= q
->enqueue(skb
, q
, &to_free
) & NET_XMIT_MASK
;
3395 if (likely(!netif_xmit_frozen_or_stopped(txq
)))
3398 if (unlikely(to_free
))
3399 kfree_skb_list(to_free
);
3404 * Heuristic to force contended enqueues to serialize on a
3405 * separate lock before trying to get qdisc main lock.
3406 * This permits qdisc->running owner to get the lock more
3407 * often and dequeue packets faster.
3409 contended
= qdisc_is_running(q
);
3410 if (unlikely(contended
))
3411 spin_lock(&q
->busylock
);
3413 spin_lock(root_lock
);
3414 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
3415 __qdisc_drop(skb
, &to_free
);
3417 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
3418 qdisc_run_begin(q
)) {
3420 * This is a work-conserving queue; there are no old skbs
3421 * waiting to be sent out; and the qdisc is not running -
3422 * xmit the skb directly.
3425 qdisc_bstats_update(q
, skb
);
3427 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, true)) {
3428 if (unlikely(contended
)) {
3429 spin_unlock(&q
->busylock
);
3436 rc
= NET_XMIT_SUCCESS
;
3438 rc
= q
->enqueue(skb
, q
, &to_free
) & NET_XMIT_MASK
;
3439 if (qdisc_run_begin(q
)) {
3440 if (unlikely(contended
)) {
3441 spin_unlock(&q
->busylock
);
3448 spin_unlock(root_lock
);
3449 if (unlikely(to_free
))
3450 kfree_skb_list(to_free
);
3451 if (unlikely(contended
))
3452 spin_unlock(&q
->busylock
);
3456 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3457 static void skb_update_prio(struct sk_buff
*skb
)
3459 const struct netprio_map
*map
;
3460 const struct sock
*sk
;
3461 unsigned int prioidx
;
3465 map
= rcu_dereference_bh(skb
->dev
->priomap
);
3468 sk
= skb_to_full_sk(skb
);
3472 prioidx
= sock_cgroup_prioidx(&sk
->sk_cgrp_data
);
3474 if (prioidx
< map
->priomap_len
)
3475 skb
->priority
= map
->priomap
[prioidx
];
3478 #define skb_update_prio(skb)
3482 * dev_loopback_xmit - loop back @skb
3483 * @net: network namespace this loopback is happening in
3484 * @sk: sk needed to be a netfilter okfn
3485 * @skb: buffer to transmit
3487 int dev_loopback_xmit(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
3489 skb_reset_mac_header(skb
);
3490 __skb_pull(skb
, skb_network_offset(skb
));
3491 skb
->pkt_type
= PACKET_LOOPBACK
;
3492 if (skb
->ip_summed
== CHECKSUM_NONE
)
3493 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3494 WARN_ON(!skb_dst(skb
));
3499 EXPORT_SYMBOL(dev_loopback_xmit
);
3501 #ifdef CONFIG_NET_EGRESS
3502 static struct sk_buff
*
3503 sch_handle_egress(struct sk_buff
*skb
, int *ret
, struct net_device
*dev
)
3505 struct mini_Qdisc
*miniq
= rcu_dereference_bh(dev
->miniq_egress
);
3506 struct tcf_result cl_res
;
3511 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3512 mini_qdisc_bstats_cpu_update(miniq
, skb
);
3514 switch (tcf_classify(skb
, miniq
->filter_list
, &cl_res
, false)) {
3516 case TC_ACT_RECLASSIFY
:
3517 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
3520 mini_qdisc_qstats_cpu_drop(miniq
);
3521 *ret
= NET_XMIT_DROP
;
3527 *ret
= NET_XMIT_SUCCESS
;
3530 case TC_ACT_REDIRECT
:
3531 /* No need to push/pop skb's mac_header here on egress! */
3532 skb_do_redirect(skb
);
3533 *ret
= NET_XMIT_SUCCESS
;
3541 #endif /* CONFIG_NET_EGRESS */
3544 static int __get_xps_queue_idx(struct net_device
*dev
, struct sk_buff
*skb
,
3545 struct xps_dev_maps
*dev_maps
, unsigned int tci
)
3547 struct xps_map
*map
;
3548 int queue_index
= -1;
3552 tci
+= netdev_get_prio_tc_map(dev
, skb
->priority
);
3555 map
= rcu_dereference(dev_maps
->attr_map
[tci
]);
3558 queue_index
= map
->queues
[0];
3560 queue_index
= map
->queues
[reciprocal_scale(
3561 skb_get_hash(skb
), map
->len
)];
3562 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
3569 static int get_xps_queue(struct net_device
*dev
, struct net_device
*sb_dev
,
3570 struct sk_buff
*skb
)
3573 struct xps_dev_maps
*dev_maps
;
3574 struct sock
*sk
= skb
->sk
;
3575 int queue_index
= -1;
3577 if (!static_key_false(&xps_needed
))
3581 if (!static_key_false(&xps_rxqs_needed
))
3584 dev_maps
= rcu_dereference(sb_dev
->xps_rxqs_map
);
3586 int tci
= sk_rx_queue_get(sk
);
3588 if (tci
>= 0 && tci
< dev
->num_rx_queues
)
3589 queue_index
= __get_xps_queue_idx(dev
, skb
, dev_maps
,
3594 if (queue_index
< 0) {
3595 dev_maps
= rcu_dereference(sb_dev
->xps_cpus_map
);
3597 unsigned int tci
= skb
->sender_cpu
- 1;
3599 queue_index
= __get_xps_queue_idx(dev
, skb
, dev_maps
,
3611 u16
dev_pick_tx_zero(struct net_device
*dev
, struct sk_buff
*skb
,
3612 struct net_device
*sb_dev
)
3616 EXPORT_SYMBOL(dev_pick_tx_zero
);
3618 u16
dev_pick_tx_cpu_id(struct net_device
*dev
, struct sk_buff
*skb
,
3619 struct net_device
*sb_dev
)
3621 return (u16
)raw_smp_processor_id() % dev
->real_num_tx_queues
;
3623 EXPORT_SYMBOL(dev_pick_tx_cpu_id
);
3625 u16
netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
,
3626 struct net_device
*sb_dev
)
3628 struct sock
*sk
= skb
->sk
;
3629 int queue_index
= sk_tx_queue_get(sk
);
3631 sb_dev
= sb_dev
? : dev
;
3633 if (queue_index
< 0 || skb
->ooo_okay
||
3634 queue_index
>= dev
->real_num_tx_queues
) {
3635 int new_index
= get_xps_queue(dev
, sb_dev
, skb
);
3638 new_index
= skb_tx_hash(dev
, sb_dev
, skb
);
3640 if (queue_index
!= new_index
&& sk
&&
3642 rcu_access_pointer(sk
->sk_dst_cache
))
3643 sk_tx_queue_set(sk
, new_index
);
3645 queue_index
= new_index
;
3650 EXPORT_SYMBOL(netdev_pick_tx
);
3652 struct netdev_queue
*netdev_core_pick_tx(struct net_device
*dev
,
3653 struct sk_buff
*skb
,
3654 struct net_device
*sb_dev
)
3656 int queue_index
= 0;
3659 u32 sender_cpu
= skb
->sender_cpu
- 1;
3661 if (sender_cpu
>= (u32
)NR_CPUS
)
3662 skb
->sender_cpu
= raw_smp_processor_id() + 1;
3665 if (dev
->real_num_tx_queues
!= 1) {
3666 const struct net_device_ops
*ops
= dev
->netdev_ops
;
3668 if (ops
->ndo_select_queue
)
3669 queue_index
= ops
->ndo_select_queue(dev
, skb
, sb_dev
);
3671 queue_index
= netdev_pick_tx(dev
, skb
, sb_dev
);
3673 queue_index
= netdev_cap_txqueue(dev
, queue_index
);
3676 skb_set_queue_mapping(skb
, queue_index
);
3677 return netdev_get_tx_queue(dev
, queue_index
);
3681 * __dev_queue_xmit - transmit a buffer
3682 * @skb: buffer to transmit
3683 * @sb_dev: suboordinate device used for L2 forwarding offload
3685 * Queue a buffer for transmission to a network device. The caller must
3686 * have set the device and priority and built the buffer before calling
3687 * this function. The function can be called from an interrupt.
3689 * A negative errno code is returned on a failure. A success does not
3690 * guarantee the frame will be transmitted as it may be dropped due
3691 * to congestion or traffic shaping.
3693 * -----------------------------------------------------------------------------------
3694 * I notice this method can also return errors from the queue disciplines,
3695 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3698 * Regardless of the return value, the skb is consumed, so it is currently
3699 * difficult to retry a send to this method. (You can bump the ref count
3700 * before sending to hold a reference for retry if you are careful.)
3702 * When calling this method, interrupts MUST be enabled. This is because
3703 * the BH enable code must have IRQs enabled so that it will not deadlock.
3706 static int __dev_queue_xmit(struct sk_buff
*skb
, struct net_device
*sb_dev
)
3708 struct net_device
*dev
= skb
->dev
;
3709 struct netdev_queue
*txq
;
3714 skb_reset_mac_header(skb
);
3716 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_SCHED_TSTAMP
))
3717 __skb_tstamp_tx(skb
, NULL
, skb
->sk
, SCM_TSTAMP_SCHED
);
3719 /* Disable soft irqs for various locks below. Also
3720 * stops preemption for RCU.
3724 skb_update_prio(skb
);
3726 qdisc_pkt_len_init(skb
);
3727 #ifdef CONFIG_NET_CLS_ACT
3728 skb
->tc_at_ingress
= 0;
3729 # ifdef CONFIG_NET_EGRESS
3730 if (static_branch_unlikely(&egress_needed_key
)) {
3731 skb
= sch_handle_egress(skb
, &rc
, dev
);
3737 /* If device/qdisc don't need skb->dst, release it right now while
3738 * its hot in this cpu cache.
3740 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
3745 txq
= netdev_core_pick_tx(dev
, skb
, sb_dev
);
3746 q
= rcu_dereference_bh(txq
->qdisc
);
3748 trace_net_dev_queue(skb
);
3750 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
3754 /* The device has no queue. Common case for software devices:
3755 * loopback, all the sorts of tunnels...
3757 * Really, it is unlikely that netif_tx_lock protection is necessary
3758 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3760 * However, it is possible, that they rely on protection
3763 * Check this and shot the lock. It is not prone from deadlocks.
3764 *Either shot noqueue qdisc, it is even simpler 8)
3766 if (dev
->flags
& IFF_UP
) {
3767 int cpu
= smp_processor_id(); /* ok because BHs are off */
3769 /* Other cpus might concurrently change txq->xmit_lock_owner
3770 * to -1 or to their cpu id, but not to our id.
3772 if (READ_ONCE(txq
->xmit_lock_owner
) != cpu
) {
3773 if (dev_xmit_recursion())
3774 goto recursion_alert
;
3776 skb
= validate_xmit_skb(skb
, dev
, &again
);
3780 HARD_TX_LOCK(dev
, txq
, cpu
);
3782 if (!netif_xmit_stopped(txq
)) {
3783 dev_xmit_recursion_inc();
3784 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &rc
);
3785 dev_xmit_recursion_dec();
3786 if (dev_xmit_complete(rc
)) {
3787 HARD_TX_UNLOCK(dev
, txq
);
3791 HARD_TX_UNLOCK(dev
, txq
);
3792 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3795 /* Recursion is detected! It is possible,
3799 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3805 rcu_read_unlock_bh();
3807 atomic_long_inc(&dev
->tx_dropped
);
3808 kfree_skb_list(skb
);
3811 rcu_read_unlock_bh();
3815 int dev_queue_xmit(struct sk_buff
*skb
)
3817 return __dev_queue_xmit(skb
, NULL
);
3819 EXPORT_SYMBOL(dev_queue_xmit
);
3821 int dev_queue_xmit_accel(struct sk_buff
*skb
, struct net_device
*sb_dev
)
3823 return __dev_queue_xmit(skb
, sb_dev
);
3825 EXPORT_SYMBOL(dev_queue_xmit_accel
);
3827 int dev_direct_xmit(struct sk_buff
*skb
, u16 queue_id
)
3829 struct net_device
*dev
= skb
->dev
;
3830 struct sk_buff
*orig_skb
= skb
;
3831 struct netdev_queue
*txq
;
3832 int ret
= NETDEV_TX_BUSY
;
3835 if (unlikely(!netif_running(dev
) ||
3836 !netif_carrier_ok(dev
)))
3839 skb
= validate_xmit_skb_list(skb
, dev
, &again
);
3840 if (skb
!= orig_skb
)
3843 skb_set_queue_mapping(skb
, queue_id
);
3844 txq
= skb_get_tx_queue(dev
, skb
);
3848 dev_xmit_recursion_inc();
3849 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
3850 if (!netif_xmit_frozen_or_drv_stopped(txq
))
3851 ret
= netdev_start_xmit(skb
, dev
, txq
, false);
3852 HARD_TX_UNLOCK(dev
, txq
);
3853 dev_xmit_recursion_dec();
3857 if (!dev_xmit_complete(ret
))
3862 atomic_long_inc(&dev
->tx_dropped
);
3863 kfree_skb_list(skb
);
3864 return NET_XMIT_DROP
;
3866 EXPORT_SYMBOL(dev_direct_xmit
);
3868 /*************************************************************************
3870 *************************************************************************/
3872 int netdev_max_backlog __read_mostly
= 1000;
3873 EXPORT_SYMBOL(netdev_max_backlog
);
3875 int netdev_tstamp_prequeue __read_mostly
= 1;
3876 int netdev_budget __read_mostly
= 300;
3877 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
3878 unsigned int __read_mostly netdev_budget_usecs
= 2 * USEC_PER_SEC
/ HZ
;
3879 int weight_p __read_mostly
= 64; /* old backlog weight */
3880 int dev_weight_rx_bias __read_mostly
= 1; /* bias for backlog weight */
3881 int dev_weight_tx_bias __read_mostly
= 1; /* bias for output_queue quota */
3882 int dev_rx_weight __read_mostly
= 64;
3883 int dev_tx_weight __read_mostly
= 64;
3884 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
3885 int gro_normal_batch __read_mostly
= 8;
3887 /* Called with irq disabled */
3888 static inline void ____napi_schedule(struct softnet_data
*sd
,
3889 struct napi_struct
*napi
)
3891 list_add_tail(&napi
->poll_list
, &sd
->poll_list
);
3892 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3897 /* One global table that all flow-based protocols share. */
3898 struct rps_sock_flow_table __rcu
*rps_sock_flow_table __read_mostly
;
3899 EXPORT_SYMBOL(rps_sock_flow_table
);
3900 u32 rps_cpu_mask __read_mostly
;
3901 EXPORT_SYMBOL(rps_cpu_mask
);
3903 struct static_key_false rps_needed __read_mostly
;
3904 EXPORT_SYMBOL(rps_needed
);
3905 struct static_key_false rfs_needed __read_mostly
;
3906 EXPORT_SYMBOL(rfs_needed
);
3908 static struct rps_dev_flow
*
3909 set_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3910 struct rps_dev_flow
*rflow
, u16 next_cpu
)
3912 if (next_cpu
< nr_cpu_ids
) {
3913 #ifdef CONFIG_RFS_ACCEL
3914 struct netdev_rx_queue
*rxqueue
;
3915 struct rps_dev_flow_table
*flow_table
;
3916 struct rps_dev_flow
*old_rflow
;
3921 /* Should we steer this flow to a different hardware queue? */
3922 if (!skb_rx_queue_recorded(skb
) || !dev
->rx_cpu_rmap
||
3923 !(dev
->features
& NETIF_F_NTUPLE
))
3925 rxq_index
= cpu_rmap_lookup_index(dev
->rx_cpu_rmap
, next_cpu
);
3926 if (rxq_index
== skb_get_rx_queue(skb
))
3929 rxqueue
= dev
->_rx
+ rxq_index
;
3930 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3933 flow_id
= skb_get_hash(skb
) & flow_table
->mask
;
3934 rc
= dev
->netdev_ops
->ndo_rx_flow_steer(dev
, skb
,
3935 rxq_index
, flow_id
);
3939 rflow
= &flow_table
->flows
[flow_id
];
3941 if (old_rflow
->filter
== rflow
->filter
)
3942 old_rflow
->filter
= RPS_NO_FILTER
;
3946 per_cpu(softnet_data
, next_cpu
).input_queue_head
;
3949 rflow
->cpu
= next_cpu
;
3954 * get_rps_cpu is called from netif_receive_skb and returns the target
3955 * CPU from the RPS map of the receiving queue for a given skb.
3956 * rcu_read_lock must be held on entry.
3958 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3959 struct rps_dev_flow
**rflowp
)
3961 const struct rps_sock_flow_table
*sock_flow_table
;
3962 struct netdev_rx_queue
*rxqueue
= dev
->_rx
;
3963 struct rps_dev_flow_table
*flow_table
;
3964 struct rps_map
*map
;
3969 if (skb_rx_queue_recorded(skb
)) {
3970 u16 index
= skb_get_rx_queue(skb
);
3972 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
3973 WARN_ONCE(dev
->real_num_rx_queues
> 1,
3974 "%s received packet on queue %u, but number "
3975 "of RX queues is %u\n",
3976 dev
->name
, index
, dev
->real_num_rx_queues
);
3982 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3984 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3985 map
= rcu_dereference(rxqueue
->rps_map
);
3986 if (!flow_table
&& !map
)
3989 skb_reset_network_header(skb
);
3990 hash
= skb_get_hash(skb
);
3994 sock_flow_table
= rcu_dereference(rps_sock_flow_table
);
3995 if (flow_table
&& sock_flow_table
) {
3996 struct rps_dev_flow
*rflow
;
4000 /* First check into global flow table if there is a match */
4001 ident
= sock_flow_table
->ents
[hash
& sock_flow_table
->mask
];
4002 if ((ident
^ hash
) & ~rps_cpu_mask
)
4005 next_cpu
= ident
& rps_cpu_mask
;
4007 /* OK, now we know there is a match,
4008 * we can look at the local (per receive queue) flow table
4010 rflow
= &flow_table
->flows
[hash
& flow_table
->mask
];
4014 * If the desired CPU (where last recvmsg was done) is
4015 * different from current CPU (one in the rx-queue flow
4016 * table entry), switch if one of the following holds:
4017 * - Current CPU is unset (>= nr_cpu_ids).
4018 * - Current CPU is offline.
4019 * - The current CPU's queue tail has advanced beyond the
4020 * last packet that was enqueued using this table entry.
4021 * This guarantees that all previous packets for the flow
4022 * have been dequeued, thus preserving in order delivery.
4024 if (unlikely(tcpu
!= next_cpu
) &&
4025 (tcpu
>= nr_cpu_ids
|| !cpu_online(tcpu
) ||
4026 ((int)(per_cpu(softnet_data
, tcpu
).input_queue_head
-
4027 rflow
->last_qtail
)) >= 0)) {
4029 rflow
= set_rps_cpu(dev
, skb
, rflow
, next_cpu
);
4032 if (tcpu
< nr_cpu_ids
&& cpu_online(tcpu
)) {
4042 tcpu
= map
->cpus
[reciprocal_scale(hash
, map
->len
)];
4043 if (cpu_online(tcpu
)) {
4053 #ifdef CONFIG_RFS_ACCEL
4056 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4057 * @dev: Device on which the filter was set
4058 * @rxq_index: RX queue index
4059 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4060 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4062 * Drivers that implement ndo_rx_flow_steer() should periodically call
4063 * this function for each installed filter and remove the filters for
4064 * which it returns %true.
4066 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
4067 u32 flow_id
, u16 filter_id
)
4069 struct netdev_rx_queue
*rxqueue
= dev
->_rx
+ rxq_index
;
4070 struct rps_dev_flow_table
*flow_table
;
4071 struct rps_dev_flow
*rflow
;
4076 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4077 if (flow_table
&& flow_id
<= flow_table
->mask
) {
4078 rflow
= &flow_table
->flows
[flow_id
];
4079 cpu
= READ_ONCE(rflow
->cpu
);
4080 if (rflow
->filter
== filter_id
&& cpu
< nr_cpu_ids
&&
4081 ((int)(per_cpu(softnet_data
, cpu
).input_queue_head
-
4082 rflow
->last_qtail
) <
4083 (int)(10 * flow_table
->mask
)))
4089 EXPORT_SYMBOL(rps_may_expire_flow
);
4091 #endif /* CONFIG_RFS_ACCEL */
4093 /* Called from hardirq (IPI) context */
4094 static void rps_trigger_softirq(void *data
)
4096 struct softnet_data
*sd
= data
;
4098 ____napi_schedule(sd
, &sd
->backlog
);
4102 #endif /* CONFIG_RPS */
4105 * Check if this softnet_data structure is another cpu one
4106 * If yes, queue it to our IPI list and return 1
4109 static int rps_ipi_queued(struct softnet_data
*sd
)
4112 struct softnet_data
*mysd
= this_cpu_ptr(&softnet_data
);
4115 sd
->rps_ipi_next
= mysd
->rps_ipi_list
;
4116 mysd
->rps_ipi_list
= sd
;
4118 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4121 #endif /* CONFIG_RPS */
4125 #ifdef CONFIG_NET_FLOW_LIMIT
4126 int netdev_flow_limit_table_len __read_mostly
= (1 << 12);
4129 static bool skb_flow_limit(struct sk_buff
*skb
, unsigned int qlen
)
4131 #ifdef CONFIG_NET_FLOW_LIMIT
4132 struct sd_flow_limit
*fl
;
4133 struct softnet_data
*sd
;
4134 unsigned int old_flow
, new_flow
;
4136 if (qlen
< (netdev_max_backlog
>> 1))
4139 sd
= this_cpu_ptr(&softnet_data
);
4142 fl
= rcu_dereference(sd
->flow_limit
);
4144 new_flow
= skb_get_hash(skb
) & (fl
->num_buckets
- 1);
4145 old_flow
= fl
->history
[fl
->history_head
];
4146 fl
->history
[fl
->history_head
] = new_flow
;
4149 fl
->history_head
&= FLOW_LIMIT_HISTORY
- 1;
4151 if (likely(fl
->buckets
[old_flow
]))
4152 fl
->buckets
[old_flow
]--;
4154 if (++fl
->buckets
[new_flow
] > (FLOW_LIMIT_HISTORY
>> 1)) {
4166 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4167 * queue (may be a remote CPU queue).
4169 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
,
4170 unsigned int *qtail
)
4172 struct softnet_data
*sd
;
4173 unsigned long flags
;
4176 sd
= &per_cpu(softnet_data
, cpu
);
4178 local_irq_save(flags
);
4181 if (!netif_running(skb
->dev
))
4183 qlen
= skb_queue_len(&sd
->input_pkt_queue
);
4184 if (qlen
<= netdev_max_backlog
&& !skb_flow_limit(skb
, qlen
)) {
4187 __skb_queue_tail(&sd
->input_pkt_queue
, skb
);
4188 input_queue_tail_incr_save(sd
, qtail
);
4190 local_irq_restore(flags
);
4191 return NET_RX_SUCCESS
;
4194 /* Schedule NAPI for backlog device
4195 * We can use non atomic operation since we own the queue lock
4197 if (!__test_and_set_bit(NAPI_STATE_SCHED
, &sd
->backlog
.state
)) {
4198 if (!rps_ipi_queued(sd
))
4199 ____napi_schedule(sd
, &sd
->backlog
);
4208 local_irq_restore(flags
);
4210 atomic_long_inc(&skb
->dev
->rx_dropped
);
4215 static struct netdev_rx_queue
*netif_get_rxqueue(struct sk_buff
*skb
)
4217 struct net_device
*dev
= skb
->dev
;
4218 struct netdev_rx_queue
*rxqueue
;
4222 if (skb_rx_queue_recorded(skb
)) {
4223 u16 index
= skb_get_rx_queue(skb
);
4225 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
4226 WARN_ONCE(dev
->real_num_rx_queues
> 1,
4227 "%s received packet on queue %u, but number "
4228 "of RX queues is %u\n",
4229 dev
->name
, index
, dev
->real_num_rx_queues
);
4231 return rxqueue
; /* Return first rxqueue */
4238 static u32
netif_receive_generic_xdp(struct sk_buff
*skb
,
4239 struct xdp_buff
*xdp
,
4240 struct bpf_prog
*xdp_prog
)
4242 struct netdev_rx_queue
*rxqueue
;
4243 void *orig_data
, *orig_data_end
;
4244 u32 metalen
, act
= XDP_DROP
;
4245 __be16 orig_eth_type
;
4251 /* Reinjected packets coming from act_mirred or similar should
4252 * not get XDP generic processing.
4254 if (skb_is_redirected(skb
))
4257 /* XDP packets must be linear and must have sufficient headroom
4258 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4259 * native XDP provides, thus we need to do it here as well.
4261 if (skb_cloned(skb
) || skb_is_nonlinear(skb
) ||
4262 skb_headroom(skb
) < XDP_PACKET_HEADROOM
) {
4263 int hroom
= XDP_PACKET_HEADROOM
- skb_headroom(skb
);
4264 int troom
= skb
->tail
+ skb
->data_len
- skb
->end
;
4266 /* In case we have to go down the path and also linearize,
4267 * then lets do the pskb_expand_head() work just once here.
4269 if (pskb_expand_head(skb
,
4270 hroom
> 0 ? ALIGN(hroom
, NET_SKB_PAD
) : 0,
4271 troom
> 0 ? troom
+ 128 : 0, GFP_ATOMIC
))
4273 if (skb_linearize(skb
))
4277 /* The XDP program wants to see the packet starting at the MAC
4280 mac_len
= skb
->data
- skb_mac_header(skb
);
4281 hlen
= skb_headlen(skb
) + mac_len
;
4282 xdp
->data
= skb
->data
- mac_len
;
4283 xdp
->data_meta
= xdp
->data
;
4284 xdp
->data_end
= xdp
->data
+ hlen
;
4285 xdp
->data_hard_start
= skb
->data
- skb_headroom(skb
);
4286 orig_data_end
= xdp
->data_end
;
4287 orig_data
= xdp
->data
;
4288 eth
= (struct ethhdr
*)xdp
->data
;
4289 orig_bcast
= is_multicast_ether_addr_64bits(eth
->h_dest
);
4290 orig_eth_type
= eth
->h_proto
;
4292 rxqueue
= netif_get_rxqueue(skb
);
4293 xdp
->rxq
= &rxqueue
->xdp_rxq
;
4295 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
4297 /* check if bpf_xdp_adjust_head was used */
4298 off
= xdp
->data
- orig_data
;
4301 __skb_pull(skb
, off
);
4303 __skb_push(skb
, -off
);
4305 skb
->mac_header
+= off
;
4306 skb_reset_network_header(skb
);
4309 /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
4312 off
= orig_data_end
- xdp
->data_end
;
4314 skb_set_tail_pointer(skb
, xdp
->data_end
- xdp
->data
);
4319 /* check if XDP changed eth hdr such SKB needs update */
4320 eth
= (struct ethhdr
*)xdp
->data
;
4321 if ((orig_eth_type
!= eth
->h_proto
) ||
4322 (orig_bcast
!= is_multicast_ether_addr_64bits(eth
->h_dest
))) {
4323 __skb_push(skb
, ETH_HLEN
);
4324 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
4330 __skb_push(skb
, mac_len
);
4333 metalen
= xdp
->data
- xdp
->data_meta
;
4335 skb_metadata_set(skb
, metalen
);
4338 bpf_warn_invalid_xdp_action(act
);
4341 trace_xdp_exception(skb
->dev
, xdp_prog
, act
);
4352 /* When doing generic XDP we have to bypass the qdisc layer and the
4353 * network taps in order to match in-driver-XDP behavior.
4355 void generic_xdp_tx(struct sk_buff
*skb
, struct bpf_prog
*xdp_prog
)
4357 struct net_device
*dev
= skb
->dev
;
4358 struct netdev_queue
*txq
;
4359 bool free_skb
= true;
4362 txq
= netdev_core_pick_tx(dev
, skb
, NULL
);
4363 cpu
= smp_processor_id();
4364 HARD_TX_LOCK(dev
, txq
, cpu
);
4365 if (!netif_xmit_stopped(txq
)) {
4366 rc
= netdev_start_xmit(skb
, dev
, txq
, 0);
4367 if (dev_xmit_complete(rc
))
4370 HARD_TX_UNLOCK(dev
, txq
);
4372 trace_xdp_exception(dev
, xdp_prog
, XDP_TX
);
4376 EXPORT_SYMBOL_GPL(generic_xdp_tx
);
4378 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key
);
4380 int do_xdp_generic(struct bpf_prog
*xdp_prog
, struct sk_buff
*skb
)
4383 struct xdp_buff xdp
;
4387 act
= netif_receive_generic_xdp(skb
, &xdp
, xdp_prog
);
4388 if (act
!= XDP_PASS
) {
4391 err
= xdp_do_generic_redirect(skb
->dev
, skb
,
4397 generic_xdp_tx(skb
, xdp_prog
);
4408 EXPORT_SYMBOL_GPL(do_xdp_generic
);
4410 static int netif_rx_internal(struct sk_buff
*skb
)
4414 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
4416 trace_netif_rx(skb
);
4419 if (static_branch_unlikely(&rps_needed
)) {
4420 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
4426 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
4428 cpu
= smp_processor_id();
4430 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
4439 ret
= enqueue_to_backlog(skb
, get_cpu(), &qtail
);
4446 * netif_rx - post buffer to the network code
4447 * @skb: buffer to post
4449 * This function receives a packet from a device driver and queues it for
4450 * the upper (protocol) levels to process. It always succeeds. The buffer
4451 * may be dropped during processing for congestion control or by the
4455 * NET_RX_SUCCESS (no congestion)
4456 * NET_RX_DROP (packet was dropped)
4460 int netif_rx(struct sk_buff
*skb
)
4464 trace_netif_rx_entry(skb
);
4466 ret
= netif_rx_internal(skb
);
4467 trace_netif_rx_exit(ret
);
4471 EXPORT_SYMBOL(netif_rx
);
4473 int netif_rx_ni(struct sk_buff
*skb
)
4477 trace_netif_rx_ni_entry(skb
);
4480 err
= netif_rx_internal(skb
);
4481 if (local_softirq_pending())
4484 trace_netif_rx_ni_exit(err
);
4488 EXPORT_SYMBOL(netif_rx_ni
);
4490 static __latent_entropy
void net_tx_action(struct softirq_action
*h
)
4492 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
4494 if (sd
->completion_queue
) {
4495 struct sk_buff
*clist
;
4497 local_irq_disable();
4498 clist
= sd
->completion_queue
;
4499 sd
->completion_queue
= NULL
;
4503 struct sk_buff
*skb
= clist
;
4505 clist
= clist
->next
;
4507 WARN_ON(refcount_read(&skb
->users
));
4508 if (likely(get_kfree_skb_cb(skb
)->reason
== SKB_REASON_CONSUMED
))
4509 trace_consume_skb(skb
);
4511 trace_kfree_skb(skb
, net_tx_action
);
4513 if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
)
4516 __kfree_skb_defer(skb
);
4519 __kfree_skb_flush();
4522 if (sd
->output_queue
) {
4525 local_irq_disable();
4526 head
= sd
->output_queue
;
4527 sd
->output_queue
= NULL
;
4528 sd
->output_queue_tailp
= &sd
->output_queue
;
4534 struct Qdisc
*q
= head
;
4535 spinlock_t
*root_lock
= NULL
;
4537 head
= head
->next_sched
;
4539 /* We need to make sure head->next_sched is read
4540 * before clearing __QDISC_STATE_SCHED
4542 smp_mb__before_atomic();
4544 if (!(q
->flags
& TCQ_F_NOLOCK
)) {
4545 root_lock
= qdisc_lock(q
);
4546 spin_lock(root_lock
);
4547 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
,
4549 /* There is a synchronize_net() between
4550 * STATE_DEACTIVATED flag being set and
4551 * qdisc_reset()/some_qdisc_is_busy() in
4552 * dev_deactivate(), so we can safely bail out
4553 * early here to avoid data race between
4554 * qdisc_deactivate() and some_qdisc_is_busy()
4555 * for lockless qdisc.
4557 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
4561 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
4564 spin_unlock(root_lock
);
4570 xfrm_dev_backlog(sd
);
4573 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4574 /* This hook is defined here for ATM LANE */
4575 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
4576 unsigned char *addr
) __read_mostly
;
4577 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
4580 static inline struct sk_buff
*
4581 sch_handle_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
, int *ret
,
4582 struct net_device
*orig_dev
)
4584 #ifdef CONFIG_NET_CLS_ACT
4585 struct mini_Qdisc
*miniq
= rcu_dereference_bh(skb
->dev
->miniq_ingress
);
4586 struct tcf_result cl_res
;
4588 /* If there's at least one ingress present somewhere (so
4589 * we get here via enabled static key), remaining devices
4590 * that are not configured with an ingress qdisc will bail
4597 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4601 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
4602 skb
->tc_at_ingress
= 1;
4603 mini_qdisc_bstats_cpu_update(miniq
, skb
);
4605 switch (tcf_classify(skb
, miniq
->filter_list
, &cl_res
, false)) {
4607 case TC_ACT_RECLASSIFY
:
4608 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
4611 mini_qdisc_qstats_cpu_drop(miniq
);
4619 case TC_ACT_REDIRECT
:
4620 /* skb_mac_header check was done by cls/act_bpf, so
4621 * we can safely push the L2 header back before
4622 * redirecting to another netdev
4624 __skb_push(skb
, skb
->mac_len
);
4625 skb_do_redirect(skb
);
4627 case TC_ACT_CONSUMED
:
4632 #endif /* CONFIG_NET_CLS_ACT */
4637 * netdev_is_rx_handler_busy - check if receive handler is registered
4638 * @dev: device to check
4640 * Check if a receive handler is already registered for a given device.
4641 * Return true if there one.
4643 * The caller must hold the rtnl_mutex.
4645 bool netdev_is_rx_handler_busy(struct net_device
*dev
)
4648 return dev
&& rtnl_dereference(dev
->rx_handler
);
4650 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy
);
4653 * netdev_rx_handler_register - register receive handler
4654 * @dev: device to register a handler for
4655 * @rx_handler: receive handler to register
4656 * @rx_handler_data: data pointer that is used by rx handler
4658 * Register a receive handler for a device. This handler will then be
4659 * called from __netif_receive_skb. A negative errno code is returned
4662 * The caller must hold the rtnl_mutex.
4664 * For a general description of rx_handler, see enum rx_handler_result.
4666 int netdev_rx_handler_register(struct net_device
*dev
,
4667 rx_handler_func_t
*rx_handler
,
4668 void *rx_handler_data
)
4670 if (netdev_is_rx_handler_busy(dev
))
4673 if (dev
->priv_flags
& IFF_NO_RX_HANDLER
)
4676 /* Note: rx_handler_data must be set before rx_handler */
4677 rcu_assign_pointer(dev
->rx_handler_data
, rx_handler_data
);
4678 rcu_assign_pointer(dev
->rx_handler
, rx_handler
);
4682 EXPORT_SYMBOL_GPL(netdev_rx_handler_register
);
4685 * netdev_rx_handler_unregister - unregister receive handler
4686 * @dev: device to unregister a handler from
4688 * Unregister a receive handler from a device.
4690 * The caller must hold the rtnl_mutex.
4692 void netdev_rx_handler_unregister(struct net_device
*dev
)
4696 RCU_INIT_POINTER(dev
->rx_handler
, NULL
);
4697 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4698 * section has a guarantee to see a non NULL rx_handler_data
4702 RCU_INIT_POINTER(dev
->rx_handler_data
, NULL
);
4704 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister
);
4707 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4708 * the special handling of PFMEMALLOC skbs.
4710 static bool skb_pfmemalloc_protocol(struct sk_buff
*skb
)
4712 switch (skb
->protocol
) {
4713 case htons(ETH_P_ARP
):
4714 case htons(ETH_P_IP
):
4715 case htons(ETH_P_IPV6
):
4716 case htons(ETH_P_8021Q
):
4717 case htons(ETH_P_8021AD
):
4724 static inline int nf_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
,
4725 int *ret
, struct net_device
*orig_dev
)
4727 #ifdef CONFIG_NETFILTER_INGRESS
4728 if (nf_hook_ingress_active(skb
)) {
4732 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4737 ingress_retval
= nf_hook_ingress(skb
);
4739 return ingress_retval
;
4741 #endif /* CONFIG_NETFILTER_INGRESS */
4745 static int __netif_receive_skb_core(struct sk_buff
**pskb
, bool pfmemalloc
,
4746 struct packet_type
**ppt_prev
)
4748 struct packet_type
*ptype
, *pt_prev
;
4749 rx_handler_func_t
*rx_handler
;
4750 struct sk_buff
*skb
= *pskb
;
4751 struct net_device
*orig_dev
;
4752 bool deliver_exact
= false;
4753 int ret
= NET_RX_DROP
;
4756 net_timestamp_check(!netdev_tstamp_prequeue
, skb
);
4758 trace_netif_receive_skb(skb
);
4760 orig_dev
= skb
->dev
;
4762 skb_reset_network_header(skb
);
4763 if (!skb_transport_header_was_set(skb
))
4764 skb_reset_transport_header(skb
);
4765 skb_reset_mac_len(skb
);
4770 skb
->skb_iif
= skb
->dev
->ifindex
;
4772 __this_cpu_inc(softnet_data
.processed
);
4774 if (static_branch_unlikely(&generic_xdp_needed_key
)) {
4778 ret2
= do_xdp_generic(rcu_dereference(skb
->dev
->xdp_prog
), skb
);
4781 if (ret2
!= XDP_PASS
) {
4785 skb_reset_mac_len(skb
);
4788 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
) ||
4789 skb
->protocol
== cpu_to_be16(ETH_P_8021AD
)) {
4790 skb
= skb_vlan_untag(skb
);
4795 if (skb_skip_tc_classify(skb
))
4801 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
4803 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4807 list_for_each_entry_rcu(ptype
, &skb
->dev
->ptype_all
, list
) {
4809 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4814 #ifdef CONFIG_NET_INGRESS
4815 if (static_branch_unlikely(&ingress_needed_key
)) {
4816 skb
= sch_handle_ingress(skb
, &pt_prev
, &ret
, orig_dev
);
4820 if (nf_ingress(skb
, &pt_prev
, &ret
, orig_dev
) < 0)
4824 skb_reset_redirect(skb
);
4826 if (pfmemalloc
&& !skb_pfmemalloc_protocol(skb
))
4829 if (skb_vlan_tag_present(skb
)) {
4831 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4834 if (vlan_do_receive(&skb
))
4836 else if (unlikely(!skb
))
4840 rx_handler
= rcu_dereference(skb
->dev
->rx_handler
);
4843 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4846 switch (rx_handler(&skb
)) {
4847 case RX_HANDLER_CONSUMED
:
4848 ret
= NET_RX_SUCCESS
;
4850 case RX_HANDLER_ANOTHER
:
4852 case RX_HANDLER_EXACT
:
4853 deliver_exact
= true;
4854 case RX_HANDLER_PASS
:
4861 if (unlikely(skb_vlan_tag_present(skb
))) {
4863 if (skb_vlan_tag_get_id(skb
)) {
4864 /* Vlan id is non 0 and vlan_do_receive() above couldn't
4867 skb
->pkt_type
= PACKET_OTHERHOST
;
4868 } else if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
) ||
4869 skb
->protocol
== cpu_to_be16(ETH_P_8021AD
)) {
4870 /* Outer header is 802.1P with vlan 0, inner header is
4871 * 802.1Q or 802.1AD and vlan_do_receive() above could
4872 * not find vlan dev for vlan id 0.
4874 __vlan_hwaccel_clear_tag(skb
);
4875 skb
= skb_vlan_untag(skb
);
4878 if (vlan_do_receive(&skb
))
4879 /* After stripping off 802.1P header with vlan 0
4880 * vlan dev is found for inner header.
4883 else if (unlikely(!skb
))
4886 /* We have stripped outer 802.1P vlan 0 header.
4887 * But could not find vlan dev.
4888 * check again for vlan id to set OTHERHOST.
4892 /* Note: we might in the future use prio bits
4893 * and set skb->priority like in vlan_do_receive()
4894 * For the time being, just ignore Priority Code Point
4896 __vlan_hwaccel_clear_tag(skb
);
4899 type
= skb
->protocol
;
4901 /* deliver only exact match when indicated */
4902 if (likely(!deliver_exact
)) {
4903 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4904 &ptype_base
[ntohs(type
) &
4908 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4909 &orig_dev
->ptype_specific
);
4911 if (unlikely(skb
->dev
!= orig_dev
)) {
4912 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4913 &skb
->dev
->ptype_specific
);
4917 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
4919 *ppt_prev
= pt_prev
;
4923 atomic_long_inc(&skb
->dev
->rx_dropped
);
4925 atomic_long_inc(&skb
->dev
->rx_nohandler
);
4927 /* Jamal, now you will not able to escape explaining
4928 * me how you were going to use this. :-)
4934 /* The invariant here is that if *ppt_prev is not NULL
4935 * then skb should also be non-NULL.
4937 * Apparently *ppt_prev assignment above holds this invariant due to
4938 * skb dereferencing near it.
4944 static int __netif_receive_skb_one_core(struct sk_buff
*skb
, bool pfmemalloc
)
4946 struct net_device
*orig_dev
= skb
->dev
;
4947 struct packet_type
*pt_prev
= NULL
;
4950 ret
= __netif_receive_skb_core(&skb
, pfmemalloc
, &pt_prev
);
4952 ret
= INDIRECT_CALL_INET(pt_prev
->func
, ipv6_rcv
, ip_rcv
, skb
,
4953 skb
->dev
, pt_prev
, orig_dev
);
4958 * netif_receive_skb_core - special purpose version of netif_receive_skb
4959 * @skb: buffer to process
4961 * More direct receive version of netif_receive_skb(). It should
4962 * only be used by callers that have a need to skip RPS and Generic XDP.
4963 * Caller must also take care of handling if (page_is_)pfmemalloc.
4965 * This function may only be called from softirq context and interrupts
4966 * should be enabled.
4968 * Return values (usually ignored):
4969 * NET_RX_SUCCESS: no congestion
4970 * NET_RX_DROP: packet was dropped
4972 int netif_receive_skb_core(struct sk_buff
*skb
)
4977 ret
= __netif_receive_skb_one_core(skb
, false);
4982 EXPORT_SYMBOL(netif_receive_skb_core
);
4984 static inline void __netif_receive_skb_list_ptype(struct list_head
*head
,
4985 struct packet_type
*pt_prev
,
4986 struct net_device
*orig_dev
)
4988 struct sk_buff
*skb
, *next
;
4992 if (list_empty(head
))
4994 if (pt_prev
->list_func
!= NULL
)
4995 INDIRECT_CALL_INET(pt_prev
->list_func
, ipv6_list_rcv
,
4996 ip_list_rcv
, head
, pt_prev
, orig_dev
);
4998 list_for_each_entry_safe(skb
, next
, head
, list
) {
4999 skb_list_del_init(skb
);
5000 pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
5004 static void __netif_receive_skb_list_core(struct list_head
*head
, bool pfmemalloc
)
5006 /* Fast-path assumptions:
5007 * - There is no RX handler.
5008 * - Only one packet_type matches.
5009 * If either of these fails, we will end up doing some per-packet
5010 * processing in-line, then handling the 'last ptype' for the whole
5011 * sublist. This can't cause out-of-order delivery to any single ptype,
5012 * because the 'last ptype' must be constant across the sublist, and all
5013 * other ptypes are handled per-packet.
5015 /* Current (common) ptype of sublist */
5016 struct packet_type
*pt_curr
= NULL
;
5017 /* Current (common) orig_dev of sublist */
5018 struct net_device
*od_curr
= NULL
;
5019 struct list_head sublist
;
5020 struct sk_buff
*skb
, *next
;
5022 INIT_LIST_HEAD(&sublist
);
5023 list_for_each_entry_safe(skb
, next
, head
, list
) {
5024 struct net_device
*orig_dev
= skb
->dev
;
5025 struct packet_type
*pt_prev
= NULL
;
5027 skb_list_del_init(skb
);
5028 __netif_receive_skb_core(&skb
, pfmemalloc
, &pt_prev
);
5031 if (pt_curr
!= pt_prev
|| od_curr
!= orig_dev
) {
5032 /* dispatch old sublist */
5033 __netif_receive_skb_list_ptype(&sublist
, pt_curr
, od_curr
);
5034 /* start new sublist */
5035 INIT_LIST_HEAD(&sublist
);
5039 list_add_tail(&skb
->list
, &sublist
);
5042 /* dispatch final sublist */
5043 __netif_receive_skb_list_ptype(&sublist
, pt_curr
, od_curr
);
5046 static int __netif_receive_skb(struct sk_buff
*skb
)
5050 if (sk_memalloc_socks() && skb_pfmemalloc(skb
)) {
5051 unsigned int noreclaim_flag
;
5054 * PFMEMALLOC skbs are special, they should
5055 * - be delivered to SOCK_MEMALLOC sockets only
5056 * - stay away from userspace
5057 * - have bounded memory usage
5059 * Use PF_MEMALLOC as this saves us from propagating the allocation
5060 * context down to all allocation sites.
5062 noreclaim_flag
= memalloc_noreclaim_save();
5063 ret
= __netif_receive_skb_one_core(skb
, true);
5064 memalloc_noreclaim_restore(noreclaim_flag
);
5066 ret
= __netif_receive_skb_one_core(skb
, false);
5071 static void __netif_receive_skb_list(struct list_head
*head
)
5073 unsigned long noreclaim_flag
= 0;
5074 struct sk_buff
*skb
, *next
;
5075 bool pfmemalloc
= false; /* Is current sublist PF_MEMALLOC? */
5077 list_for_each_entry_safe(skb
, next
, head
, list
) {
5078 if ((sk_memalloc_socks() && skb_pfmemalloc(skb
)) != pfmemalloc
) {
5079 struct list_head sublist
;
5081 /* Handle the previous sublist */
5082 list_cut_before(&sublist
, head
, &skb
->list
);
5083 if (!list_empty(&sublist
))
5084 __netif_receive_skb_list_core(&sublist
, pfmemalloc
);
5085 pfmemalloc
= !pfmemalloc
;
5086 /* See comments in __netif_receive_skb */
5088 noreclaim_flag
= memalloc_noreclaim_save();
5090 memalloc_noreclaim_restore(noreclaim_flag
);
5093 /* Handle the remaining sublist */
5094 if (!list_empty(head
))
5095 __netif_receive_skb_list_core(head
, pfmemalloc
);
5096 /* Restore pflags */
5098 memalloc_noreclaim_restore(noreclaim_flag
);
5101 static int generic_xdp_install(struct net_device
*dev
, struct netdev_bpf
*xdp
)
5103 struct bpf_prog
*old
= rtnl_dereference(dev
->xdp_prog
);
5104 struct bpf_prog
*new = xdp
->prog
;
5107 switch (xdp
->command
) {
5108 case XDP_SETUP_PROG
:
5109 rcu_assign_pointer(dev
->xdp_prog
, new);
5114 static_branch_dec(&generic_xdp_needed_key
);
5115 } else if (new && !old
) {
5116 static_branch_inc(&generic_xdp_needed_key
);
5117 dev_disable_lro(dev
);
5118 dev_disable_gro_hw(dev
);
5122 case XDP_QUERY_PROG
:
5123 xdp
->prog_id
= old
? old
->aux
->id
: 0;
5134 static int netif_receive_skb_internal(struct sk_buff
*skb
)
5138 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
5140 if (skb_defer_rx_timestamp(skb
))
5141 return NET_RX_SUCCESS
;
5145 if (static_branch_unlikely(&rps_needed
)) {
5146 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
5147 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
5150 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
5156 ret
= __netif_receive_skb(skb
);
5161 static void netif_receive_skb_list_internal(struct list_head
*head
)
5163 struct sk_buff
*skb
, *next
;
5164 struct list_head sublist
;
5166 INIT_LIST_HEAD(&sublist
);
5167 list_for_each_entry_safe(skb
, next
, head
, list
) {
5168 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
5169 skb_list_del_init(skb
);
5170 if (!skb_defer_rx_timestamp(skb
))
5171 list_add_tail(&skb
->list
, &sublist
);
5173 list_splice_init(&sublist
, head
);
5177 if (static_branch_unlikely(&rps_needed
)) {
5178 list_for_each_entry_safe(skb
, next
, head
, list
) {
5179 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
5180 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
5183 /* Will be handled, remove from list */
5184 skb_list_del_init(skb
);
5185 enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
5190 __netif_receive_skb_list(head
);
5195 * netif_receive_skb - process receive buffer from network
5196 * @skb: buffer to process
5198 * netif_receive_skb() is the main receive data processing function.
5199 * It always succeeds. The buffer may be dropped during processing
5200 * for congestion control or by the protocol layers.
5202 * This function may only be called from softirq context and interrupts
5203 * should be enabled.
5205 * Return values (usually ignored):
5206 * NET_RX_SUCCESS: no congestion
5207 * NET_RX_DROP: packet was dropped
5209 int netif_receive_skb(struct sk_buff
*skb
)
5213 trace_netif_receive_skb_entry(skb
);
5215 ret
= netif_receive_skb_internal(skb
);
5216 trace_netif_receive_skb_exit(ret
);
5220 EXPORT_SYMBOL(netif_receive_skb
);
5223 * netif_receive_skb_list - process many receive buffers from network
5224 * @head: list of skbs to process.
5226 * Since return value of netif_receive_skb() is normally ignored, and
5227 * wouldn't be meaningful for a list, this function returns void.
5229 * This function may only be called from softirq context and interrupts
5230 * should be enabled.
5232 void netif_receive_skb_list(struct list_head
*head
)
5234 struct sk_buff
*skb
;
5236 if (list_empty(head
))
5238 if (trace_netif_receive_skb_list_entry_enabled()) {
5239 list_for_each_entry(skb
, head
, list
)
5240 trace_netif_receive_skb_list_entry(skb
);
5242 netif_receive_skb_list_internal(head
);
5243 trace_netif_receive_skb_list_exit(0);
5245 EXPORT_SYMBOL(netif_receive_skb_list
);
5247 DEFINE_PER_CPU(struct work_struct
, flush_works
);
5249 /* Network device is going away, flush any packets still pending */
5250 static void flush_backlog(struct work_struct
*work
)
5252 struct sk_buff
*skb
, *tmp
;
5253 struct softnet_data
*sd
;
5256 sd
= this_cpu_ptr(&softnet_data
);
5258 local_irq_disable();
5260 skb_queue_walk_safe(&sd
->input_pkt_queue
, skb
, tmp
) {
5261 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
5262 __skb_unlink(skb
, &sd
->input_pkt_queue
);
5263 dev_kfree_skb_irq(skb
);
5264 input_queue_head_incr(sd
);
5270 skb_queue_walk_safe(&sd
->process_queue
, skb
, tmp
) {
5271 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
5272 __skb_unlink(skb
, &sd
->process_queue
);
5274 input_queue_head_incr(sd
);
5280 static void flush_all_backlogs(void)
5286 for_each_online_cpu(cpu
)
5287 queue_work_on(cpu
, system_highpri_wq
,
5288 per_cpu_ptr(&flush_works
, cpu
));
5290 for_each_online_cpu(cpu
)
5291 flush_work(per_cpu_ptr(&flush_works
, cpu
));
5296 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
5297 static void gro_normal_list(struct napi_struct
*napi
)
5299 if (!napi
->rx_count
)
5301 netif_receive_skb_list_internal(&napi
->rx_list
);
5302 INIT_LIST_HEAD(&napi
->rx_list
);
5306 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
5307 * pass the whole batch up to the stack.
5309 static void gro_normal_one(struct napi_struct
*napi
, struct sk_buff
*skb
, int segs
)
5311 list_add_tail(&skb
->list
, &napi
->rx_list
);
5312 napi
->rx_count
+= segs
;
5313 if (napi
->rx_count
>= gro_normal_batch
)
5314 gro_normal_list(napi
);
5317 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff
*, int));
5318 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff
*, int));
5319 static int napi_gro_complete(struct napi_struct
*napi
, struct sk_buff
*skb
)
5321 struct packet_offload
*ptype
;
5322 __be16 type
= skb
->protocol
;
5323 struct list_head
*head
= &offload_base
;
5326 BUILD_BUG_ON(sizeof(struct napi_gro_cb
) > sizeof(skb
->cb
));
5328 if (NAPI_GRO_CB(skb
)->count
== 1) {
5329 skb_shinfo(skb
)->gso_size
= 0;
5334 list_for_each_entry_rcu(ptype
, head
, list
) {
5335 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
5338 err
= INDIRECT_CALL_INET(ptype
->callbacks
.gro_complete
,
5339 ipv6_gro_complete
, inet_gro_complete
,
5346 WARN_ON(&ptype
->list
== head
);
5348 return NET_RX_SUCCESS
;
5352 gro_normal_one(napi
, skb
, NAPI_GRO_CB(skb
)->count
);
5353 return NET_RX_SUCCESS
;
5356 static void __napi_gro_flush_chain(struct napi_struct
*napi
, u32 index
,
5359 struct list_head
*head
= &napi
->gro_hash
[index
].list
;
5360 struct sk_buff
*skb
, *p
;
5362 list_for_each_entry_safe_reverse(skb
, p
, head
, list
) {
5363 if (flush_old
&& NAPI_GRO_CB(skb
)->age
== jiffies
)
5365 skb_list_del_init(skb
);
5366 napi_gro_complete(napi
, skb
);
5367 napi
->gro_hash
[index
].count
--;
5370 if (!napi
->gro_hash
[index
].count
)
5371 __clear_bit(index
, &napi
->gro_bitmask
);
5374 /* napi->gro_hash[].list contains packets ordered by age.
5375 * youngest packets at the head of it.
5376 * Complete skbs in reverse order to reduce latencies.
5378 void napi_gro_flush(struct napi_struct
*napi
, bool flush_old
)
5380 unsigned long bitmask
= napi
->gro_bitmask
;
5381 unsigned int i
, base
= ~0U;
5383 while ((i
= ffs(bitmask
)) != 0) {
5386 __napi_gro_flush_chain(napi
, base
, flush_old
);
5389 EXPORT_SYMBOL(napi_gro_flush
);
5391 static struct list_head
*gro_list_prepare(struct napi_struct
*napi
,
5392 struct sk_buff
*skb
)
5394 unsigned int maclen
= skb
->dev
->hard_header_len
;
5395 u32 hash
= skb_get_hash_raw(skb
);
5396 struct list_head
*head
;
5399 head
= &napi
->gro_hash
[hash
& (GRO_HASH_BUCKETS
- 1)].list
;
5400 list_for_each_entry(p
, head
, list
) {
5401 unsigned long diffs
;
5403 NAPI_GRO_CB(p
)->flush
= 0;
5405 if (hash
!= skb_get_hash_raw(p
)) {
5406 NAPI_GRO_CB(p
)->same_flow
= 0;
5410 diffs
= (unsigned long)p
->dev
^ (unsigned long)skb
->dev
;
5411 diffs
|= skb_vlan_tag_present(p
) ^ skb_vlan_tag_present(skb
);
5412 if (skb_vlan_tag_present(p
))
5413 diffs
|= p
->vlan_tci
^ skb
->vlan_tci
;
5414 diffs
|= skb_metadata_dst_cmp(p
, skb
);
5415 diffs
|= skb_metadata_differs(p
, skb
);
5416 if (maclen
== ETH_HLEN
)
5417 diffs
|= compare_ether_header(skb_mac_header(p
),
5418 skb_mac_header(skb
));
5420 diffs
= memcmp(skb_mac_header(p
),
5421 skb_mac_header(skb
),
5423 NAPI_GRO_CB(p
)->same_flow
= !diffs
;
5429 static inline void skb_gro_reset_offset(struct sk_buff
*skb
, u32 nhoff
)
5431 const struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
5432 const skb_frag_t
*frag0
= &pinfo
->frags
[0];
5434 NAPI_GRO_CB(skb
)->data_offset
= 0;
5435 NAPI_GRO_CB(skb
)->frag0
= NULL
;
5436 NAPI_GRO_CB(skb
)->frag0_len
= 0;
5438 if (skb_mac_header(skb
) == skb_tail_pointer(skb
) &&
5440 !PageHighMem(skb_frag_page(frag0
)) &&
5441 (!NET_IP_ALIGN
|| !((skb_frag_off(frag0
) + nhoff
) & 3))) {
5442 NAPI_GRO_CB(skb
)->frag0
= skb_frag_address(frag0
);
5443 NAPI_GRO_CB(skb
)->frag0_len
= min_t(unsigned int,
5444 skb_frag_size(frag0
),
5445 skb
->end
- skb
->tail
);
5449 static void gro_pull_from_frag0(struct sk_buff
*skb
, int grow
)
5451 struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
5453 BUG_ON(skb
->end
- skb
->tail
< grow
);
5455 memcpy(skb_tail_pointer(skb
), NAPI_GRO_CB(skb
)->frag0
, grow
);
5457 skb
->data_len
-= grow
;
5460 skb_frag_off_add(&pinfo
->frags
[0], grow
);
5461 skb_frag_size_sub(&pinfo
->frags
[0], grow
);
5463 if (unlikely(!skb_frag_size(&pinfo
->frags
[0]))) {
5464 skb_frag_unref(skb
, 0);
5465 memmove(pinfo
->frags
, pinfo
->frags
+ 1,
5466 --pinfo
->nr_frags
* sizeof(pinfo
->frags
[0]));
5470 static void gro_flush_oldest(struct napi_struct
*napi
, struct list_head
*head
)
5472 struct sk_buff
*oldest
;
5474 oldest
= list_last_entry(head
, struct sk_buff
, list
);
5476 /* We are called with head length >= MAX_GRO_SKBS, so this is
5479 if (WARN_ON_ONCE(!oldest
))
5482 /* Do not adjust napi->gro_hash[].count, caller is adding a new
5485 skb_list_del_init(oldest
);
5486 napi_gro_complete(napi
, oldest
);
5489 INDIRECT_CALLABLE_DECLARE(struct sk_buff
*inet_gro_receive(struct list_head
*,
5491 INDIRECT_CALLABLE_DECLARE(struct sk_buff
*ipv6_gro_receive(struct list_head
*,
5493 static enum gro_result
dev_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
5495 u32 hash
= skb_get_hash_raw(skb
) & (GRO_HASH_BUCKETS
- 1);
5496 struct list_head
*head
= &offload_base
;
5497 struct packet_offload
*ptype
;
5498 __be16 type
= skb
->protocol
;
5499 struct list_head
*gro_head
;
5500 struct sk_buff
*pp
= NULL
;
5501 enum gro_result ret
;
5505 if (netif_elide_gro(skb
->dev
))
5508 gro_head
= gro_list_prepare(napi
, skb
);
5511 list_for_each_entry_rcu(ptype
, head
, list
) {
5512 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
5515 skb_set_network_header(skb
, skb_gro_offset(skb
));
5516 skb_reset_mac_len(skb
);
5517 NAPI_GRO_CB(skb
)->same_flow
= 0;
5518 NAPI_GRO_CB(skb
)->flush
= skb_is_gso(skb
) || skb_has_frag_list(skb
);
5519 NAPI_GRO_CB(skb
)->free
= 0;
5520 NAPI_GRO_CB(skb
)->encap_mark
= 0;
5521 NAPI_GRO_CB(skb
)->recursion_counter
= 0;
5522 NAPI_GRO_CB(skb
)->is_fou
= 0;
5523 NAPI_GRO_CB(skb
)->is_atomic
= 1;
5524 NAPI_GRO_CB(skb
)->gro_remcsum_start
= 0;
5526 /* Setup for GRO checksum validation */
5527 switch (skb
->ip_summed
) {
5528 case CHECKSUM_COMPLETE
:
5529 NAPI_GRO_CB(skb
)->csum
= skb
->csum
;
5530 NAPI_GRO_CB(skb
)->csum_valid
= 1;
5531 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
5533 case CHECKSUM_UNNECESSARY
:
5534 NAPI_GRO_CB(skb
)->csum_cnt
= skb
->csum_level
+ 1;
5535 NAPI_GRO_CB(skb
)->csum_valid
= 0;
5538 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
5539 NAPI_GRO_CB(skb
)->csum_valid
= 0;
5542 pp
= INDIRECT_CALL_INET(ptype
->callbacks
.gro_receive
,
5543 ipv6_gro_receive
, inet_gro_receive
,
5549 if (&ptype
->list
== head
)
5552 if (IS_ERR(pp
) && PTR_ERR(pp
) == -EINPROGRESS
) {
5557 same_flow
= NAPI_GRO_CB(skb
)->same_flow
;
5558 ret
= NAPI_GRO_CB(skb
)->free
? GRO_MERGED_FREE
: GRO_MERGED
;
5561 skb_list_del_init(pp
);
5562 napi_gro_complete(napi
, pp
);
5563 napi
->gro_hash
[hash
].count
--;
5569 if (NAPI_GRO_CB(skb
)->flush
)
5572 if (unlikely(napi
->gro_hash
[hash
].count
>= MAX_GRO_SKBS
)) {
5573 gro_flush_oldest(napi
, gro_head
);
5575 napi
->gro_hash
[hash
].count
++;
5577 NAPI_GRO_CB(skb
)->count
= 1;
5578 NAPI_GRO_CB(skb
)->age
= jiffies
;
5579 NAPI_GRO_CB(skb
)->last
= skb
;
5580 skb_shinfo(skb
)->gso_size
= skb_gro_len(skb
);
5581 list_add(&skb
->list
, gro_head
);
5585 grow
= skb_gro_offset(skb
) - skb_headlen(skb
);
5587 gro_pull_from_frag0(skb
, grow
);
5589 if (napi
->gro_hash
[hash
].count
) {
5590 if (!test_bit(hash
, &napi
->gro_bitmask
))
5591 __set_bit(hash
, &napi
->gro_bitmask
);
5592 } else if (test_bit(hash
, &napi
->gro_bitmask
)) {
5593 __clear_bit(hash
, &napi
->gro_bitmask
);
5603 struct packet_offload
*gro_find_receive_by_type(__be16 type
)
5605 struct list_head
*offload_head
= &offload_base
;
5606 struct packet_offload
*ptype
;
5608 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
5609 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
5615 EXPORT_SYMBOL(gro_find_receive_by_type
);
5617 struct packet_offload
*gro_find_complete_by_type(__be16 type
)
5619 struct list_head
*offload_head
= &offload_base
;
5620 struct packet_offload
*ptype
;
5622 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
5623 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
5629 EXPORT_SYMBOL(gro_find_complete_by_type
);
5631 static void napi_skb_free_stolen_head(struct sk_buff
*skb
)
5635 kmem_cache_free(skbuff_head_cache
, skb
);
5638 static gro_result_t
napi_skb_finish(struct napi_struct
*napi
,
5639 struct sk_buff
*skb
,
5644 gro_normal_one(napi
, skb
, 1);
5651 case GRO_MERGED_FREE
:
5652 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
5653 napi_skb_free_stolen_head(skb
);
5667 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
5671 skb_mark_napi_id(skb
, napi
);
5672 trace_napi_gro_receive_entry(skb
);
5674 skb_gro_reset_offset(skb
, 0);
5676 ret
= napi_skb_finish(napi
, skb
, dev_gro_receive(napi
, skb
));
5677 trace_napi_gro_receive_exit(ret
);
5681 EXPORT_SYMBOL(napi_gro_receive
);
5683 static void napi_reuse_skb(struct napi_struct
*napi
, struct sk_buff
*skb
)
5685 if (unlikely(skb
->pfmemalloc
)) {
5689 __skb_pull(skb
, skb_headlen(skb
));
5690 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
5691 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
- skb_headroom(skb
));
5692 __vlan_hwaccel_clear_tag(skb
);
5693 skb
->dev
= napi
->dev
;
5696 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
5697 skb
->pkt_type
= PACKET_HOST
;
5699 skb
->encapsulation
= 0;
5700 skb_shinfo(skb
)->gso_type
= 0;
5701 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
5707 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
)
5709 struct sk_buff
*skb
= napi
->skb
;
5712 skb
= napi_alloc_skb(napi
, GRO_MAX_HEAD
);
5715 skb_mark_napi_id(skb
, napi
);
5720 EXPORT_SYMBOL(napi_get_frags
);
5722 static gro_result_t
napi_frags_finish(struct napi_struct
*napi
,
5723 struct sk_buff
*skb
,
5729 __skb_push(skb
, ETH_HLEN
);
5730 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
5731 if (ret
== GRO_NORMAL
)
5732 gro_normal_one(napi
, skb
, 1);
5736 napi_reuse_skb(napi
, skb
);
5739 case GRO_MERGED_FREE
:
5740 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
5741 napi_skb_free_stolen_head(skb
);
5743 napi_reuse_skb(napi
, skb
);
5754 /* Upper GRO stack assumes network header starts at gro_offset=0
5755 * Drivers could call both napi_gro_frags() and napi_gro_receive()
5756 * We copy ethernet header into skb->data to have a common layout.
5758 static struct sk_buff
*napi_frags_skb(struct napi_struct
*napi
)
5760 struct sk_buff
*skb
= napi
->skb
;
5761 const struct ethhdr
*eth
;
5762 unsigned int hlen
= sizeof(*eth
);
5766 skb_reset_mac_header(skb
);
5767 skb_gro_reset_offset(skb
, hlen
);
5769 if (unlikely(skb_gro_header_hard(skb
, hlen
))) {
5770 eth
= skb_gro_header_slow(skb
, hlen
, 0);
5771 if (unlikely(!eth
)) {
5772 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5773 __func__
, napi
->dev
->name
);
5774 napi_reuse_skb(napi
, skb
);
5778 eth
= (const struct ethhdr
*)skb
->data
;
5779 gro_pull_from_frag0(skb
, hlen
);
5780 NAPI_GRO_CB(skb
)->frag0
+= hlen
;
5781 NAPI_GRO_CB(skb
)->frag0_len
-= hlen
;
5783 __skb_pull(skb
, hlen
);
5786 * This works because the only protocols we care about don't require
5788 * We'll fix it up properly in napi_frags_finish()
5790 skb
->protocol
= eth
->h_proto
;
5795 gro_result_t
napi_gro_frags(struct napi_struct
*napi
)
5798 struct sk_buff
*skb
= napi_frags_skb(napi
);
5803 trace_napi_gro_frags_entry(skb
);
5805 ret
= napi_frags_finish(napi
, skb
, dev_gro_receive(napi
, skb
));
5806 trace_napi_gro_frags_exit(ret
);
5810 EXPORT_SYMBOL(napi_gro_frags
);
5812 /* Compute the checksum from gro_offset and return the folded value
5813 * after adding in any pseudo checksum.
5815 __sum16
__skb_gro_checksum_complete(struct sk_buff
*skb
)
5820 wsum
= skb_checksum(skb
, skb_gro_offset(skb
), skb_gro_len(skb
), 0);
5822 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5823 sum
= csum_fold(csum_add(NAPI_GRO_CB(skb
)->csum
, wsum
));
5824 /* See comments in __skb_checksum_complete(). */
5826 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
5827 !skb
->csum_complete_sw
)
5828 netdev_rx_csum_fault(skb
->dev
, skb
);
5831 NAPI_GRO_CB(skb
)->csum
= wsum
;
5832 NAPI_GRO_CB(skb
)->csum_valid
= 1;
5836 EXPORT_SYMBOL(__skb_gro_checksum_complete
);
5838 static void net_rps_send_ipi(struct softnet_data
*remsd
)
5842 struct softnet_data
*next
= remsd
->rps_ipi_next
;
5844 if (cpu_online(remsd
->cpu
))
5845 smp_call_function_single_async(remsd
->cpu
, &remsd
->csd
);
5852 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5853 * Note: called with local irq disabled, but exits with local irq enabled.
5855 static void net_rps_action_and_irq_enable(struct softnet_data
*sd
)
5858 struct softnet_data
*remsd
= sd
->rps_ipi_list
;
5861 sd
->rps_ipi_list
= NULL
;
5865 /* Send pending IPI's to kick RPS processing on remote cpus. */
5866 net_rps_send_ipi(remsd
);
5872 static bool sd_has_rps_ipi_waiting(struct softnet_data
*sd
)
5875 return sd
->rps_ipi_list
!= NULL
;
5881 static int process_backlog(struct napi_struct
*napi
, int quota
)
5883 struct softnet_data
*sd
= container_of(napi
, struct softnet_data
, backlog
);
5887 /* Check if we have pending ipi, its better to send them now,
5888 * not waiting net_rx_action() end.
5890 if (sd_has_rps_ipi_waiting(sd
)) {
5891 local_irq_disable();
5892 net_rps_action_and_irq_enable(sd
);
5895 napi
->weight
= dev_rx_weight
;
5897 struct sk_buff
*skb
;
5899 while ((skb
= __skb_dequeue(&sd
->process_queue
))) {
5901 __netif_receive_skb(skb
);
5903 input_queue_head_incr(sd
);
5904 if (++work
>= quota
)
5909 local_irq_disable();
5911 if (skb_queue_empty(&sd
->input_pkt_queue
)) {
5913 * Inline a custom version of __napi_complete().
5914 * only current cpu owns and manipulates this napi,
5915 * and NAPI_STATE_SCHED is the only possible flag set
5917 * We can use a plain write instead of clear_bit(),
5918 * and we dont need an smp_mb() memory barrier.
5923 skb_queue_splice_tail_init(&sd
->input_pkt_queue
,
5924 &sd
->process_queue
);
5934 * __napi_schedule - schedule for receive
5935 * @n: entry to schedule
5937 * The entry's receive function will be scheduled to run.
5938 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
5940 void __napi_schedule(struct napi_struct
*n
)
5942 unsigned long flags
;
5944 local_irq_save(flags
);
5945 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
5946 local_irq_restore(flags
);
5948 EXPORT_SYMBOL(__napi_schedule
);
5951 * napi_schedule_prep - check if napi can be scheduled
5954 * Test if NAPI routine is already running, and if not mark
5955 * it as running. This is used as a condition variable
5956 * insure only one NAPI poll instance runs. We also make
5957 * sure there is no pending NAPI disable.
5959 bool napi_schedule_prep(struct napi_struct
*n
)
5961 unsigned long val
, new;
5964 val
= READ_ONCE(n
->state
);
5965 if (unlikely(val
& NAPIF_STATE_DISABLE
))
5967 new = val
| NAPIF_STATE_SCHED
;
5969 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5970 * This was suggested by Alexander Duyck, as compiler
5971 * emits better code than :
5972 * if (val & NAPIF_STATE_SCHED)
5973 * new |= NAPIF_STATE_MISSED;
5975 new |= (val
& NAPIF_STATE_SCHED
) / NAPIF_STATE_SCHED
*
5977 } while (cmpxchg(&n
->state
, val
, new) != val
);
5979 return !(val
& NAPIF_STATE_SCHED
);
5981 EXPORT_SYMBOL(napi_schedule_prep
);
5984 * __napi_schedule_irqoff - schedule for receive
5985 * @n: entry to schedule
5987 * Variant of __napi_schedule() assuming hard irqs are masked.
5989 * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
5990 * because the interrupt disabled assumption might not be true
5991 * due to force-threaded interrupts and spinlock substitution.
5993 void __napi_schedule_irqoff(struct napi_struct
*n
)
5995 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
5996 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
6000 EXPORT_SYMBOL(__napi_schedule_irqoff
);
6002 bool napi_complete_done(struct napi_struct
*n
, int work_done
)
6004 unsigned long flags
, val
, new;
6007 * 1) Don't let napi dequeue from the cpu poll list
6008 * just in case its running on a different cpu.
6009 * 2) If we are busy polling, do nothing here, we have
6010 * the guarantee we will be called later.
6012 if (unlikely(n
->state
& (NAPIF_STATE_NPSVC
|
6013 NAPIF_STATE_IN_BUSY_POLL
)))
6016 if (n
->gro_bitmask
) {
6017 unsigned long timeout
= 0;
6020 timeout
= n
->dev
->gro_flush_timeout
;
6022 /* When the NAPI instance uses a timeout and keeps postponing
6023 * it, we need to bound somehow the time packets are kept in
6026 napi_gro_flush(n
, !!timeout
);
6028 hrtimer_start(&n
->timer
, ns_to_ktime(timeout
),
6029 HRTIMER_MODE_REL_PINNED
);
6034 if (unlikely(!list_empty(&n
->poll_list
))) {
6035 /* If n->poll_list is not empty, we need to mask irqs */
6036 local_irq_save(flags
);
6037 list_del_init(&n
->poll_list
);
6038 local_irq_restore(flags
);
6042 val
= READ_ONCE(n
->state
);
6044 WARN_ON_ONCE(!(val
& NAPIF_STATE_SCHED
));
6046 new = val
& ~(NAPIF_STATE_MISSED
| NAPIF_STATE_SCHED
);
6048 /* If STATE_MISSED was set, leave STATE_SCHED set,
6049 * because we will call napi->poll() one more time.
6050 * This C code was suggested by Alexander Duyck to help gcc.
6052 new |= (val
& NAPIF_STATE_MISSED
) / NAPIF_STATE_MISSED
*
6054 } while (cmpxchg(&n
->state
, val
, new) != val
);
6056 if (unlikely(val
& NAPIF_STATE_MISSED
)) {
6063 EXPORT_SYMBOL(napi_complete_done
);
6065 /* must be called under rcu_read_lock(), as we dont take a reference */
6066 static struct napi_struct
*napi_by_id(unsigned int napi_id
)
6068 unsigned int hash
= napi_id
% HASH_SIZE(napi_hash
);
6069 struct napi_struct
*napi
;
6071 hlist_for_each_entry_rcu(napi
, &napi_hash
[hash
], napi_hash_node
)
6072 if (napi
->napi_id
== napi_id
)
6078 #if defined(CONFIG_NET_RX_BUSY_POLL)
6080 #define BUSY_POLL_BUDGET 8
6082 static void busy_poll_stop(struct napi_struct
*napi
, void *have_poll_lock
)
6086 /* Busy polling means there is a high chance device driver hard irq
6087 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6088 * set in napi_schedule_prep().
6089 * Since we are about to call napi->poll() once more, we can safely
6090 * clear NAPI_STATE_MISSED.
6092 * Note: x86 could use a single "lock and ..." instruction
6093 * to perform these two clear_bit()
6095 clear_bit(NAPI_STATE_MISSED
, &napi
->state
);
6096 clear_bit(NAPI_STATE_IN_BUSY_POLL
, &napi
->state
);
6100 /* All we really want here is to re-enable device interrupts.
6101 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6103 rc
= napi
->poll(napi
, BUSY_POLL_BUDGET
);
6104 /* We can't gro_normal_list() here, because napi->poll() might have
6105 * rearmed the napi (napi_complete_done()) in which case it could
6106 * already be running on another CPU.
6108 trace_napi_poll(napi
, rc
, BUSY_POLL_BUDGET
);
6109 netpoll_poll_unlock(have_poll_lock
);
6110 if (rc
== BUSY_POLL_BUDGET
) {
6111 /* As the whole budget was spent, we still own the napi so can
6112 * safely handle the rx_list.
6114 gro_normal_list(napi
);
6115 __napi_schedule(napi
);
6120 void napi_busy_loop(unsigned int napi_id
,
6121 bool (*loop_end
)(void *, unsigned long),
6124 unsigned long start_time
= loop_end
? busy_loop_current_time() : 0;
6125 int (*napi_poll
)(struct napi_struct
*napi
, int budget
);
6126 void *have_poll_lock
= NULL
;
6127 struct napi_struct
*napi
;
6134 napi
= napi_by_id(napi_id
);
6144 unsigned long val
= READ_ONCE(napi
->state
);
6146 /* If multiple threads are competing for this napi,
6147 * we avoid dirtying napi->state as much as we can.
6149 if (val
& (NAPIF_STATE_DISABLE
| NAPIF_STATE_SCHED
|
6150 NAPIF_STATE_IN_BUSY_POLL
))
6152 if (cmpxchg(&napi
->state
, val
,
6153 val
| NAPIF_STATE_IN_BUSY_POLL
|
6154 NAPIF_STATE_SCHED
) != val
)
6156 have_poll_lock
= netpoll_poll_lock(napi
);
6157 napi_poll
= napi
->poll
;
6159 work
= napi_poll(napi
, BUSY_POLL_BUDGET
);
6160 trace_napi_poll(napi
, work
, BUSY_POLL_BUDGET
);
6161 gro_normal_list(napi
);
6164 __NET_ADD_STATS(dev_net(napi
->dev
),
6165 LINUX_MIB_BUSYPOLLRXPACKETS
, work
);
6168 if (!loop_end
|| loop_end(loop_end_arg
, start_time
))
6171 if (unlikely(need_resched())) {
6173 busy_poll_stop(napi
, have_poll_lock
);
6177 if (loop_end(loop_end_arg
, start_time
))
6184 busy_poll_stop(napi
, have_poll_lock
);
6189 EXPORT_SYMBOL(napi_busy_loop
);
6191 #endif /* CONFIG_NET_RX_BUSY_POLL */
6193 static void napi_hash_add(struct napi_struct
*napi
)
6195 if (test_bit(NAPI_STATE_NO_BUSY_POLL
, &napi
->state
) ||
6196 test_and_set_bit(NAPI_STATE_HASHED
, &napi
->state
))
6199 spin_lock(&napi_hash_lock
);
6201 /* 0..NR_CPUS range is reserved for sender_cpu use */
6203 if (unlikely(++napi_gen_id
< MIN_NAPI_ID
))
6204 napi_gen_id
= MIN_NAPI_ID
;
6205 } while (napi_by_id(napi_gen_id
));
6206 napi
->napi_id
= napi_gen_id
;
6208 hlist_add_head_rcu(&napi
->napi_hash_node
,
6209 &napi_hash
[napi
->napi_id
% HASH_SIZE(napi_hash
)]);
6211 spin_unlock(&napi_hash_lock
);
6214 /* Warning : caller is responsible to make sure rcu grace period
6215 * is respected before freeing memory containing @napi
6217 bool napi_hash_del(struct napi_struct
*napi
)
6219 bool rcu_sync_needed
= false;
6221 spin_lock(&napi_hash_lock
);
6223 if (test_and_clear_bit(NAPI_STATE_HASHED
, &napi
->state
)) {
6224 rcu_sync_needed
= true;
6225 hlist_del_rcu(&napi
->napi_hash_node
);
6227 spin_unlock(&napi_hash_lock
);
6228 return rcu_sync_needed
;
6230 EXPORT_SYMBOL_GPL(napi_hash_del
);
6232 static enum hrtimer_restart
napi_watchdog(struct hrtimer
*timer
)
6234 struct napi_struct
*napi
;
6236 napi
= container_of(timer
, struct napi_struct
, timer
);
6238 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6239 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6241 if (napi
->gro_bitmask
&& !napi_disable_pending(napi
) &&
6242 !test_and_set_bit(NAPI_STATE_SCHED
, &napi
->state
))
6243 __napi_schedule_irqoff(napi
);
6245 return HRTIMER_NORESTART
;
6248 static void init_gro_hash(struct napi_struct
*napi
)
6252 for (i
= 0; i
< GRO_HASH_BUCKETS
; i
++) {
6253 INIT_LIST_HEAD(&napi
->gro_hash
[i
].list
);
6254 napi
->gro_hash
[i
].count
= 0;
6256 napi
->gro_bitmask
= 0;
6259 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
6260 int (*poll
)(struct napi_struct
*, int), int weight
)
6262 INIT_LIST_HEAD(&napi
->poll_list
);
6263 hrtimer_init(&napi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
6264 napi
->timer
.function
= napi_watchdog
;
6265 init_gro_hash(napi
);
6267 INIT_LIST_HEAD(&napi
->rx_list
);
6270 if (weight
> NAPI_POLL_WEIGHT
)
6271 netdev_err_once(dev
, "%s() called with weight %d\n", __func__
,
6273 napi
->weight
= weight
;
6275 #ifdef CONFIG_NETPOLL
6276 napi
->poll_owner
= -1;
6278 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
6279 set_bit(NAPI_STATE_NPSVC
, &napi
->state
);
6280 list_add_rcu(&napi
->dev_list
, &dev
->napi_list
);
6281 napi_hash_add(napi
);
6283 EXPORT_SYMBOL(netif_napi_add
);
6285 void napi_disable(struct napi_struct
*n
)
6288 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
6290 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
6292 while (test_and_set_bit(NAPI_STATE_NPSVC
, &n
->state
))
6295 hrtimer_cancel(&n
->timer
);
6297 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
6299 EXPORT_SYMBOL(napi_disable
);
6301 static void flush_gro_hash(struct napi_struct
*napi
)
6305 for (i
= 0; i
< GRO_HASH_BUCKETS
; i
++) {
6306 struct sk_buff
*skb
, *n
;
6308 list_for_each_entry_safe(skb
, n
, &napi
->gro_hash
[i
].list
, list
)
6310 napi
->gro_hash
[i
].count
= 0;
6314 /* Must be called in process context */
6315 void netif_napi_del(struct napi_struct
*napi
)
6318 if (napi_hash_del(napi
))
6320 list_del_init(&napi
->dev_list
);
6321 napi_free_frags(napi
);
6323 flush_gro_hash(napi
);
6324 napi
->gro_bitmask
= 0;
6326 EXPORT_SYMBOL(netif_napi_del
);
6328 static int napi_poll(struct napi_struct
*n
, struct list_head
*repoll
)
6333 list_del_init(&n
->poll_list
);
6335 have
= netpoll_poll_lock(n
);
6339 /* This NAPI_STATE_SCHED test is for avoiding a race
6340 * with netpoll's poll_napi(). Only the entity which
6341 * obtains the lock and sees NAPI_STATE_SCHED set will
6342 * actually make the ->poll() call. Therefore we avoid
6343 * accidentally calling ->poll() when NAPI is not scheduled.
6346 if (test_bit(NAPI_STATE_SCHED
, &n
->state
)) {
6347 work
= n
->poll(n
, weight
);
6348 trace_napi_poll(n
, work
, weight
);
6351 if (unlikely(work
> weight
))
6352 pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6353 n
->poll
, work
, weight
);
6355 if (likely(work
< weight
))
6358 /* Drivers must not modify the NAPI state if they
6359 * consume the entire weight. In such cases this code
6360 * still "owns" the NAPI instance and therefore can
6361 * move the instance around on the list at-will.
6363 if (unlikely(napi_disable_pending(n
))) {
6368 if (n
->gro_bitmask
) {
6369 /* flush too old packets
6370 * If HZ < 1000, flush all packets.
6372 napi_gro_flush(n
, HZ
>= 1000);
6377 /* Some drivers may have called napi_schedule
6378 * prior to exhausting their budget.
6380 if (unlikely(!list_empty(&n
->poll_list
))) {
6381 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6382 n
->dev
? n
->dev
->name
: "backlog");
6386 list_add_tail(&n
->poll_list
, repoll
);
6389 netpoll_poll_unlock(have
);
6394 static __latent_entropy
void net_rx_action(struct softirq_action
*h
)
6396 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
6397 unsigned long time_limit
= jiffies
+
6398 usecs_to_jiffies(netdev_budget_usecs
);
6399 int budget
= netdev_budget
;
6403 local_irq_disable();
6404 list_splice_init(&sd
->poll_list
, &list
);
6408 struct napi_struct
*n
;
6410 if (list_empty(&list
)) {
6411 if (!sd_has_rps_ipi_waiting(sd
) && list_empty(&repoll
))
6416 n
= list_first_entry(&list
, struct napi_struct
, poll_list
);
6417 budget
-= napi_poll(n
, &repoll
);
6419 /* If softirq window is exhausted then punt.
6420 * Allow this to run for 2 jiffies since which will allow
6421 * an average latency of 1.5/HZ.
6423 if (unlikely(budget
<= 0 ||
6424 time_after_eq(jiffies
, time_limit
))) {
6430 local_irq_disable();
6432 list_splice_tail_init(&sd
->poll_list
, &list
);
6433 list_splice_tail(&repoll
, &list
);
6434 list_splice(&list
, &sd
->poll_list
);
6435 if (!list_empty(&sd
->poll_list
))
6436 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
6438 net_rps_action_and_irq_enable(sd
);
6440 __kfree_skb_flush();
6443 struct netdev_adjacent
{
6444 struct net_device
*dev
;
6446 /* upper master flag, there can only be one master device per list */
6449 /* lookup ignore flag */
6452 /* counter for the number of times this device was added to us */
6455 /* private field for the users */
6458 struct list_head list
;
6459 struct rcu_head rcu
;
6462 static struct netdev_adjacent
*__netdev_find_adj(struct net_device
*adj_dev
,
6463 struct list_head
*adj_list
)
6465 struct netdev_adjacent
*adj
;
6467 list_for_each_entry(adj
, adj_list
, list
) {
6468 if (adj
->dev
== adj_dev
)
6474 static int ____netdev_has_upper_dev(struct net_device
*upper_dev
, void *data
)
6476 struct net_device
*dev
= data
;
6478 return upper_dev
== dev
;
6482 * netdev_has_upper_dev - Check if device is linked to an upper device
6484 * @upper_dev: upper device to check
6486 * Find out if a device is linked to specified upper device and return true
6487 * in case it is. Note that this checks only immediate upper device,
6488 * not through a complete stack of devices. The caller must hold the RTNL lock.
6490 bool netdev_has_upper_dev(struct net_device
*dev
,
6491 struct net_device
*upper_dev
)
6495 return netdev_walk_all_upper_dev_rcu(dev
, ____netdev_has_upper_dev
,
6498 EXPORT_SYMBOL(netdev_has_upper_dev
);
6501 * netdev_has_upper_dev_all - Check if device is linked to an upper device
6503 * @upper_dev: upper device to check
6505 * Find out if a device is linked to specified upper device and return true
6506 * in case it is. Note that this checks the entire upper device chain.
6507 * The caller must hold rcu lock.
6510 bool netdev_has_upper_dev_all_rcu(struct net_device
*dev
,
6511 struct net_device
*upper_dev
)
6513 return !!netdev_walk_all_upper_dev_rcu(dev
, ____netdev_has_upper_dev
,
6516 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu
);
6519 * netdev_has_any_upper_dev - Check if device is linked to some device
6522 * Find out if a device is linked to an upper device and return true in case
6523 * it is. The caller must hold the RTNL lock.
6525 bool netdev_has_any_upper_dev(struct net_device
*dev
)
6529 return !list_empty(&dev
->adj_list
.upper
);
6531 EXPORT_SYMBOL(netdev_has_any_upper_dev
);
6534 * netdev_master_upper_dev_get - Get master upper device
6537 * Find a master upper device and return pointer to it or NULL in case
6538 * it's not there. The caller must hold the RTNL lock.
6540 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
)
6542 struct netdev_adjacent
*upper
;
6546 if (list_empty(&dev
->adj_list
.upper
))
6549 upper
= list_first_entry(&dev
->adj_list
.upper
,
6550 struct netdev_adjacent
, list
);
6551 if (likely(upper
->master
))
6555 EXPORT_SYMBOL(netdev_master_upper_dev_get
);
6557 static struct net_device
*__netdev_master_upper_dev_get(struct net_device
*dev
)
6559 struct netdev_adjacent
*upper
;
6563 if (list_empty(&dev
->adj_list
.upper
))
6566 upper
= list_first_entry(&dev
->adj_list
.upper
,
6567 struct netdev_adjacent
, list
);
6568 if (likely(upper
->master
) && !upper
->ignore
)
6574 * netdev_has_any_lower_dev - Check if device is linked to some device
6577 * Find out if a device is linked to a lower device and return true in case
6578 * it is. The caller must hold the RTNL lock.
6580 static bool netdev_has_any_lower_dev(struct net_device
*dev
)
6584 return !list_empty(&dev
->adj_list
.lower
);
6587 void *netdev_adjacent_get_private(struct list_head
*adj_list
)
6589 struct netdev_adjacent
*adj
;
6591 adj
= list_entry(adj_list
, struct netdev_adjacent
, list
);
6593 return adj
->private;
6595 EXPORT_SYMBOL(netdev_adjacent_get_private
);
6598 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6600 * @iter: list_head ** of the current position
6602 * Gets the next device from the dev's upper list, starting from iter
6603 * position. The caller must hold RCU read lock.
6605 struct net_device
*netdev_upper_get_next_dev_rcu(struct net_device
*dev
,
6606 struct list_head
**iter
)
6608 struct netdev_adjacent
*upper
;
6610 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6612 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6614 if (&upper
->list
== &dev
->adj_list
.upper
)
6617 *iter
= &upper
->list
;
6621 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu
);
6623 static struct net_device
*__netdev_next_upper_dev(struct net_device
*dev
,
6624 struct list_head
**iter
,
6627 struct netdev_adjacent
*upper
;
6629 upper
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
6631 if (&upper
->list
== &dev
->adj_list
.upper
)
6634 *iter
= &upper
->list
;
6635 *ignore
= upper
->ignore
;
6640 static struct net_device
*netdev_next_upper_dev_rcu(struct net_device
*dev
,
6641 struct list_head
**iter
)
6643 struct netdev_adjacent
*upper
;
6645 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6647 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6649 if (&upper
->list
== &dev
->adj_list
.upper
)
6652 *iter
= &upper
->list
;
6657 static int __netdev_walk_all_upper_dev(struct net_device
*dev
,
6658 int (*fn
)(struct net_device
*dev
,
6662 struct net_device
*udev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
6663 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
6668 iter
= &dev
->adj_list
.upper
;
6672 ret
= fn(now
, data
);
6679 udev
= __netdev_next_upper_dev(now
, &iter
, &ignore
);
6686 niter
= &udev
->adj_list
.upper
;
6687 dev_stack
[cur
] = now
;
6688 iter_stack
[cur
++] = iter
;
6695 next
= dev_stack
[--cur
];
6696 niter
= iter_stack
[cur
];
6706 int netdev_walk_all_upper_dev_rcu(struct net_device
*dev
,
6707 int (*fn
)(struct net_device
*dev
,
6711 struct net_device
*udev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
6712 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
6716 iter
= &dev
->adj_list
.upper
;
6720 ret
= fn(now
, data
);
6727 udev
= netdev_next_upper_dev_rcu(now
, &iter
);
6732 niter
= &udev
->adj_list
.upper
;
6733 dev_stack
[cur
] = now
;
6734 iter_stack
[cur
++] = iter
;
6741 next
= dev_stack
[--cur
];
6742 niter
= iter_stack
[cur
];
6751 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu
);
6753 static bool __netdev_has_upper_dev(struct net_device
*dev
,
6754 struct net_device
*upper_dev
)
6758 return __netdev_walk_all_upper_dev(dev
, ____netdev_has_upper_dev
,
6763 * netdev_lower_get_next_private - Get the next ->private from the
6764 * lower neighbour list
6766 * @iter: list_head ** of the current position
6768 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6769 * list, starting from iter position. The caller must hold either hold the
6770 * RTNL lock or its own locking that guarantees that the neighbour lower
6771 * list will remain unchanged.
6773 void *netdev_lower_get_next_private(struct net_device
*dev
,
6774 struct list_head
**iter
)
6776 struct netdev_adjacent
*lower
;
6778 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
6780 if (&lower
->list
== &dev
->adj_list
.lower
)
6783 *iter
= lower
->list
.next
;
6785 return lower
->private;
6787 EXPORT_SYMBOL(netdev_lower_get_next_private
);
6790 * netdev_lower_get_next_private_rcu - Get the next ->private from the
6791 * lower neighbour list, RCU
6794 * @iter: list_head ** of the current position
6796 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6797 * list, starting from iter position. The caller must hold RCU read lock.
6799 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
6800 struct list_head
**iter
)
6802 struct netdev_adjacent
*lower
;
6804 WARN_ON_ONCE(!rcu_read_lock_held());
6806 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6808 if (&lower
->list
== &dev
->adj_list
.lower
)
6811 *iter
= &lower
->list
;
6813 return lower
->private;
6815 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu
);
6818 * netdev_lower_get_next - Get the next device from the lower neighbour
6821 * @iter: list_head ** of the current position
6823 * Gets the next netdev_adjacent from the dev's lower neighbour
6824 * list, starting from iter position. The caller must hold RTNL lock or
6825 * its own locking that guarantees that the neighbour lower
6826 * list will remain unchanged.
6828 void *netdev_lower_get_next(struct net_device
*dev
, struct list_head
**iter
)
6830 struct netdev_adjacent
*lower
;
6832 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
6834 if (&lower
->list
== &dev
->adj_list
.lower
)
6837 *iter
= lower
->list
.next
;
6841 EXPORT_SYMBOL(netdev_lower_get_next
);
6843 static struct net_device
*netdev_next_lower_dev(struct net_device
*dev
,
6844 struct list_head
**iter
)
6846 struct netdev_adjacent
*lower
;
6848 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
6850 if (&lower
->list
== &dev
->adj_list
.lower
)
6853 *iter
= &lower
->list
;
6858 static struct net_device
*__netdev_next_lower_dev(struct net_device
*dev
,
6859 struct list_head
**iter
,
6862 struct netdev_adjacent
*lower
;
6864 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
6866 if (&lower
->list
== &dev
->adj_list
.lower
)
6869 *iter
= &lower
->list
;
6870 *ignore
= lower
->ignore
;
6875 int netdev_walk_all_lower_dev(struct net_device
*dev
,
6876 int (*fn
)(struct net_device
*dev
,
6880 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
6881 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
6885 iter
= &dev
->adj_list
.lower
;
6889 ret
= fn(now
, data
);
6896 ldev
= netdev_next_lower_dev(now
, &iter
);
6901 niter
= &ldev
->adj_list
.lower
;
6902 dev_stack
[cur
] = now
;
6903 iter_stack
[cur
++] = iter
;
6910 next
= dev_stack
[--cur
];
6911 niter
= iter_stack
[cur
];
6920 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev
);
6922 static int __netdev_walk_all_lower_dev(struct net_device
*dev
,
6923 int (*fn
)(struct net_device
*dev
,
6927 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
6928 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
6933 iter
= &dev
->adj_list
.lower
;
6937 ret
= fn(now
, data
);
6944 ldev
= __netdev_next_lower_dev(now
, &iter
, &ignore
);
6951 niter
= &ldev
->adj_list
.lower
;
6952 dev_stack
[cur
] = now
;
6953 iter_stack
[cur
++] = iter
;
6960 next
= dev_stack
[--cur
];
6961 niter
= iter_stack
[cur
];
6971 struct net_device
*netdev_next_lower_dev_rcu(struct net_device
*dev
,
6972 struct list_head
**iter
)
6974 struct netdev_adjacent
*lower
;
6976 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6977 if (&lower
->list
== &dev
->adj_list
.lower
)
6980 *iter
= &lower
->list
;
6984 EXPORT_SYMBOL(netdev_next_lower_dev_rcu
);
6986 static u8
__netdev_upper_depth(struct net_device
*dev
)
6988 struct net_device
*udev
;
6989 struct list_head
*iter
;
6993 for (iter
= &dev
->adj_list
.upper
,
6994 udev
= __netdev_next_upper_dev(dev
, &iter
, &ignore
);
6996 udev
= __netdev_next_upper_dev(dev
, &iter
, &ignore
)) {
6999 if (max_depth
< udev
->upper_level
)
7000 max_depth
= udev
->upper_level
;
7006 static u8
__netdev_lower_depth(struct net_device
*dev
)
7008 struct net_device
*ldev
;
7009 struct list_head
*iter
;
7013 for (iter
= &dev
->adj_list
.lower
,
7014 ldev
= __netdev_next_lower_dev(dev
, &iter
, &ignore
);
7016 ldev
= __netdev_next_lower_dev(dev
, &iter
, &ignore
)) {
7019 if (max_depth
< ldev
->lower_level
)
7020 max_depth
= ldev
->lower_level
;
7026 static int __netdev_update_upper_level(struct net_device
*dev
, void *data
)
7028 dev
->upper_level
= __netdev_upper_depth(dev
) + 1;
7032 static int __netdev_update_lower_level(struct net_device
*dev
, void *data
)
7034 dev
->lower_level
= __netdev_lower_depth(dev
) + 1;
7038 int netdev_walk_all_lower_dev_rcu(struct net_device
*dev
,
7039 int (*fn
)(struct net_device
*dev
,
7043 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7044 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7048 iter
= &dev
->adj_list
.lower
;
7052 ret
= fn(now
, data
);
7059 ldev
= netdev_next_lower_dev_rcu(now
, &iter
);
7064 niter
= &ldev
->adj_list
.lower
;
7065 dev_stack
[cur
] = now
;
7066 iter_stack
[cur
++] = iter
;
7073 next
= dev_stack
[--cur
];
7074 niter
= iter_stack
[cur
];
7083 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu
);
7086 * netdev_lower_get_first_private_rcu - Get the first ->private from the
7087 * lower neighbour list, RCU
7091 * Gets the first netdev_adjacent->private from the dev's lower neighbour
7092 * list. The caller must hold RCU read lock.
7094 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
)
7096 struct netdev_adjacent
*lower
;
7098 lower
= list_first_or_null_rcu(&dev
->adj_list
.lower
,
7099 struct netdev_adjacent
, list
);
7101 return lower
->private;
7104 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu
);
7107 * netdev_master_upper_dev_get_rcu - Get master upper device
7110 * Find a master upper device and return pointer to it or NULL in case
7111 * it's not there. The caller must hold the RCU read lock.
7113 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
)
7115 struct netdev_adjacent
*upper
;
7117 upper
= list_first_or_null_rcu(&dev
->adj_list
.upper
,
7118 struct netdev_adjacent
, list
);
7119 if (upper
&& likely(upper
->master
))
7123 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu
);
7125 static int netdev_adjacent_sysfs_add(struct net_device
*dev
,
7126 struct net_device
*adj_dev
,
7127 struct list_head
*dev_list
)
7129 char linkname
[IFNAMSIZ
+7];
7131 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
7132 "upper_%s" : "lower_%s", adj_dev
->name
);
7133 return sysfs_create_link(&(dev
->dev
.kobj
), &(adj_dev
->dev
.kobj
),
7136 static void netdev_adjacent_sysfs_del(struct net_device
*dev
,
7138 struct list_head
*dev_list
)
7140 char linkname
[IFNAMSIZ
+7];
7142 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
7143 "upper_%s" : "lower_%s", name
);
7144 sysfs_remove_link(&(dev
->dev
.kobj
), linkname
);
7147 static inline bool netdev_adjacent_is_neigh_list(struct net_device
*dev
,
7148 struct net_device
*adj_dev
,
7149 struct list_head
*dev_list
)
7151 return (dev_list
== &dev
->adj_list
.upper
||
7152 dev_list
== &dev
->adj_list
.lower
) &&
7153 net_eq(dev_net(dev
), dev_net(adj_dev
));
7156 static int __netdev_adjacent_dev_insert(struct net_device
*dev
,
7157 struct net_device
*adj_dev
,
7158 struct list_head
*dev_list
,
7159 void *private, bool master
)
7161 struct netdev_adjacent
*adj
;
7164 adj
= __netdev_find_adj(adj_dev
, dev_list
);
7168 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7169 dev
->name
, adj_dev
->name
, adj
->ref_nr
);
7174 adj
= kmalloc(sizeof(*adj
), GFP_KERNEL
);
7179 adj
->master
= master
;
7181 adj
->private = private;
7182 adj
->ignore
= false;
7185 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7186 dev
->name
, adj_dev
->name
, adj
->ref_nr
, adj_dev
->name
);
7188 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
)) {
7189 ret
= netdev_adjacent_sysfs_add(dev
, adj_dev
, dev_list
);
7194 /* Ensure that master link is always the first item in list. */
7196 ret
= sysfs_create_link(&(dev
->dev
.kobj
),
7197 &(adj_dev
->dev
.kobj
), "master");
7199 goto remove_symlinks
;
7201 list_add_rcu(&adj
->list
, dev_list
);
7203 list_add_tail_rcu(&adj
->list
, dev_list
);
7209 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
7210 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
7218 static void __netdev_adjacent_dev_remove(struct net_device
*dev
,
7219 struct net_device
*adj_dev
,
7221 struct list_head
*dev_list
)
7223 struct netdev_adjacent
*adj
;
7225 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7226 dev
->name
, adj_dev
->name
, ref_nr
);
7228 adj
= __netdev_find_adj(adj_dev
, dev_list
);
7231 pr_err("Adjacency does not exist for device %s from %s\n",
7232 dev
->name
, adj_dev
->name
);
7237 if (adj
->ref_nr
> ref_nr
) {
7238 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7239 dev
->name
, adj_dev
->name
, ref_nr
,
7240 adj
->ref_nr
- ref_nr
);
7241 adj
->ref_nr
-= ref_nr
;
7246 sysfs_remove_link(&(dev
->dev
.kobj
), "master");
7248 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
7249 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
7251 list_del_rcu(&adj
->list
);
7252 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7253 adj_dev
->name
, dev
->name
, adj_dev
->name
);
7255 kfree_rcu(adj
, rcu
);
7258 static int __netdev_adjacent_dev_link_lists(struct net_device
*dev
,
7259 struct net_device
*upper_dev
,
7260 struct list_head
*up_list
,
7261 struct list_head
*down_list
,
7262 void *private, bool master
)
7266 ret
= __netdev_adjacent_dev_insert(dev
, upper_dev
, up_list
,
7271 ret
= __netdev_adjacent_dev_insert(upper_dev
, dev
, down_list
,
7274 __netdev_adjacent_dev_remove(dev
, upper_dev
, 1, up_list
);
7281 static void __netdev_adjacent_dev_unlink_lists(struct net_device
*dev
,
7282 struct net_device
*upper_dev
,
7284 struct list_head
*up_list
,
7285 struct list_head
*down_list
)
7287 __netdev_adjacent_dev_remove(dev
, upper_dev
, ref_nr
, up_list
);
7288 __netdev_adjacent_dev_remove(upper_dev
, dev
, ref_nr
, down_list
);
7291 static int __netdev_adjacent_dev_link_neighbour(struct net_device
*dev
,
7292 struct net_device
*upper_dev
,
7293 void *private, bool master
)
7295 return __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
7296 &dev
->adj_list
.upper
,
7297 &upper_dev
->adj_list
.lower
,
7301 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device
*dev
,
7302 struct net_device
*upper_dev
)
7304 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
, 1,
7305 &dev
->adj_list
.upper
,
7306 &upper_dev
->adj_list
.lower
);
7309 static int __netdev_upper_dev_link(struct net_device
*dev
,
7310 struct net_device
*upper_dev
, bool master
,
7311 void *upper_priv
, void *upper_info
,
7312 struct netlink_ext_ack
*extack
)
7314 struct netdev_notifier_changeupper_info changeupper_info
= {
7319 .upper_dev
= upper_dev
,
7322 .upper_info
= upper_info
,
7324 struct net_device
*master_dev
;
7329 if (dev
== upper_dev
)
7332 /* To prevent loops, check if dev is not upper device to upper_dev. */
7333 if (__netdev_has_upper_dev(upper_dev
, dev
))
7336 if ((dev
->lower_level
+ upper_dev
->upper_level
) > MAX_NEST_DEV
)
7340 if (__netdev_has_upper_dev(dev
, upper_dev
))
7343 master_dev
= __netdev_master_upper_dev_get(dev
);
7345 return master_dev
== upper_dev
? -EEXIST
: -EBUSY
;
7348 ret
= call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
7349 &changeupper_info
.info
);
7350 ret
= notifier_to_errno(ret
);
7354 ret
= __netdev_adjacent_dev_link_neighbour(dev
, upper_dev
, upper_priv
,
7359 ret
= call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
7360 &changeupper_info
.info
);
7361 ret
= notifier_to_errno(ret
);
7365 __netdev_update_upper_level(dev
, NULL
);
7366 __netdev_walk_all_lower_dev(dev
, __netdev_update_upper_level
, NULL
);
7368 __netdev_update_lower_level(upper_dev
, NULL
);
7369 __netdev_walk_all_upper_dev(upper_dev
, __netdev_update_lower_level
,
7375 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
7381 * netdev_upper_dev_link - Add a link to the upper device
7383 * @upper_dev: new upper device
7384 * @extack: netlink extended ack
7386 * Adds a link to device which is upper to this one. The caller must hold
7387 * the RTNL lock. On a failure a negative errno code is returned.
7388 * On success the reference counts are adjusted and the function
7391 int netdev_upper_dev_link(struct net_device
*dev
,
7392 struct net_device
*upper_dev
,
7393 struct netlink_ext_ack
*extack
)
7395 return __netdev_upper_dev_link(dev
, upper_dev
, false,
7396 NULL
, NULL
, extack
);
7398 EXPORT_SYMBOL(netdev_upper_dev_link
);
7401 * netdev_master_upper_dev_link - Add a master link to the upper device
7403 * @upper_dev: new upper device
7404 * @upper_priv: upper device private
7405 * @upper_info: upper info to be passed down via notifier
7406 * @extack: netlink extended ack
7408 * Adds a link to device which is upper to this one. In this case, only
7409 * one master upper device can be linked, although other non-master devices
7410 * might be linked as well. The caller must hold the RTNL lock.
7411 * On a failure a negative errno code is returned. On success the reference
7412 * counts are adjusted and the function returns zero.
7414 int netdev_master_upper_dev_link(struct net_device
*dev
,
7415 struct net_device
*upper_dev
,
7416 void *upper_priv
, void *upper_info
,
7417 struct netlink_ext_ack
*extack
)
7419 return __netdev_upper_dev_link(dev
, upper_dev
, true,
7420 upper_priv
, upper_info
, extack
);
7422 EXPORT_SYMBOL(netdev_master_upper_dev_link
);
7425 * netdev_upper_dev_unlink - Removes a link to upper device
7427 * @upper_dev: new upper device
7429 * Removes a link to device which is upper to this one. The caller must hold
7432 void netdev_upper_dev_unlink(struct net_device
*dev
,
7433 struct net_device
*upper_dev
)
7435 struct netdev_notifier_changeupper_info changeupper_info
= {
7439 .upper_dev
= upper_dev
,
7445 changeupper_info
.master
= netdev_master_upper_dev_get(dev
) == upper_dev
;
7447 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
7448 &changeupper_info
.info
);
7450 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
7452 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
7453 &changeupper_info
.info
);
7455 __netdev_update_upper_level(dev
, NULL
);
7456 __netdev_walk_all_lower_dev(dev
, __netdev_update_upper_level
, NULL
);
7458 __netdev_update_lower_level(upper_dev
, NULL
);
7459 __netdev_walk_all_upper_dev(upper_dev
, __netdev_update_lower_level
,
7462 EXPORT_SYMBOL(netdev_upper_dev_unlink
);
7464 static void __netdev_adjacent_dev_set(struct net_device
*upper_dev
,
7465 struct net_device
*lower_dev
,
7468 struct netdev_adjacent
*adj
;
7470 adj
= __netdev_find_adj(lower_dev
, &upper_dev
->adj_list
.lower
);
7474 adj
= __netdev_find_adj(upper_dev
, &lower_dev
->adj_list
.upper
);
7479 static void netdev_adjacent_dev_disable(struct net_device
*upper_dev
,
7480 struct net_device
*lower_dev
)
7482 __netdev_adjacent_dev_set(upper_dev
, lower_dev
, true);
7485 static void netdev_adjacent_dev_enable(struct net_device
*upper_dev
,
7486 struct net_device
*lower_dev
)
7488 __netdev_adjacent_dev_set(upper_dev
, lower_dev
, false);
7491 int netdev_adjacent_change_prepare(struct net_device
*old_dev
,
7492 struct net_device
*new_dev
,
7493 struct net_device
*dev
,
7494 struct netlink_ext_ack
*extack
)
7501 if (old_dev
&& new_dev
!= old_dev
)
7502 netdev_adjacent_dev_disable(dev
, old_dev
);
7504 err
= netdev_upper_dev_link(new_dev
, dev
, extack
);
7506 if (old_dev
&& new_dev
!= old_dev
)
7507 netdev_adjacent_dev_enable(dev
, old_dev
);
7513 EXPORT_SYMBOL(netdev_adjacent_change_prepare
);
7515 void netdev_adjacent_change_commit(struct net_device
*old_dev
,
7516 struct net_device
*new_dev
,
7517 struct net_device
*dev
)
7519 if (!new_dev
|| !old_dev
)
7522 if (new_dev
== old_dev
)
7525 netdev_adjacent_dev_enable(dev
, old_dev
);
7526 netdev_upper_dev_unlink(old_dev
, dev
);
7528 EXPORT_SYMBOL(netdev_adjacent_change_commit
);
7530 void netdev_adjacent_change_abort(struct net_device
*old_dev
,
7531 struct net_device
*new_dev
,
7532 struct net_device
*dev
)
7537 if (old_dev
&& new_dev
!= old_dev
)
7538 netdev_adjacent_dev_enable(dev
, old_dev
);
7540 netdev_upper_dev_unlink(new_dev
, dev
);
7542 EXPORT_SYMBOL(netdev_adjacent_change_abort
);
7545 * netdev_bonding_info_change - Dispatch event about slave change
7547 * @bonding_info: info to dispatch
7549 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7550 * The caller must hold the RTNL lock.
7552 void netdev_bonding_info_change(struct net_device
*dev
,
7553 struct netdev_bonding_info
*bonding_info
)
7555 struct netdev_notifier_bonding_info info
= {
7559 memcpy(&info
.bonding_info
, bonding_info
,
7560 sizeof(struct netdev_bonding_info
));
7561 call_netdevice_notifiers_info(NETDEV_BONDING_INFO
,
7564 EXPORT_SYMBOL(netdev_bonding_info_change
);
7566 static void netdev_adjacent_add_links(struct net_device
*dev
)
7568 struct netdev_adjacent
*iter
;
7570 struct net
*net
= dev_net(dev
);
7572 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
7573 if (!net_eq(net
, dev_net(iter
->dev
)))
7575 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
7576 &iter
->dev
->adj_list
.lower
);
7577 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
7578 &dev
->adj_list
.upper
);
7581 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
7582 if (!net_eq(net
, dev_net(iter
->dev
)))
7584 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
7585 &iter
->dev
->adj_list
.upper
);
7586 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
7587 &dev
->adj_list
.lower
);
7591 static void netdev_adjacent_del_links(struct net_device
*dev
)
7593 struct netdev_adjacent
*iter
;
7595 struct net
*net
= dev_net(dev
);
7597 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
7598 if (!net_eq(net
, dev_net(iter
->dev
)))
7600 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
7601 &iter
->dev
->adj_list
.lower
);
7602 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
7603 &dev
->adj_list
.upper
);
7606 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
7607 if (!net_eq(net
, dev_net(iter
->dev
)))
7609 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
7610 &iter
->dev
->adj_list
.upper
);
7611 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
7612 &dev
->adj_list
.lower
);
7616 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
)
7618 struct netdev_adjacent
*iter
;
7620 struct net
*net
= dev_net(dev
);
7622 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
7623 if (!net_eq(net
, dev_net(iter
->dev
)))
7625 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
7626 &iter
->dev
->adj_list
.lower
);
7627 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
7628 &iter
->dev
->adj_list
.lower
);
7631 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
7632 if (!net_eq(net
, dev_net(iter
->dev
)))
7634 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
7635 &iter
->dev
->adj_list
.upper
);
7636 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
7637 &iter
->dev
->adj_list
.upper
);
7641 void *netdev_lower_dev_get_private(struct net_device
*dev
,
7642 struct net_device
*lower_dev
)
7644 struct netdev_adjacent
*lower
;
7648 lower
= __netdev_find_adj(lower_dev
, &dev
->adj_list
.lower
);
7652 return lower
->private;
7654 EXPORT_SYMBOL(netdev_lower_dev_get_private
);
7658 * netdev_lower_change - Dispatch event about lower device state change
7659 * @lower_dev: device
7660 * @lower_state_info: state to dispatch
7662 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
7663 * The caller must hold the RTNL lock.
7665 void netdev_lower_state_changed(struct net_device
*lower_dev
,
7666 void *lower_state_info
)
7668 struct netdev_notifier_changelowerstate_info changelowerstate_info
= {
7669 .info
.dev
= lower_dev
,
7673 changelowerstate_info
.lower_state_info
= lower_state_info
;
7674 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE
,
7675 &changelowerstate_info
.info
);
7677 EXPORT_SYMBOL(netdev_lower_state_changed
);
7679 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
7681 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7683 if (ops
->ndo_change_rx_flags
)
7684 ops
->ndo_change_rx_flags(dev
, flags
);
7687 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
, bool notify
)
7689 unsigned int old_flags
= dev
->flags
;
7695 dev
->flags
|= IFF_PROMISC
;
7696 dev
->promiscuity
+= inc
;
7697 if (dev
->promiscuity
== 0) {
7700 * If inc causes overflow, untouch promisc and return error.
7703 dev
->flags
&= ~IFF_PROMISC
;
7705 dev
->promiscuity
-= inc
;
7706 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
7711 if (dev
->flags
!= old_flags
) {
7712 pr_info("device %s %s promiscuous mode\n",
7714 dev
->flags
& IFF_PROMISC
? "entered" : "left");
7715 if (audit_enabled
) {
7716 current_uid_gid(&uid
, &gid
);
7717 audit_log(audit_context(), GFP_ATOMIC
,
7718 AUDIT_ANOM_PROMISCUOUS
,
7719 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
7720 dev
->name
, (dev
->flags
& IFF_PROMISC
),
7721 (old_flags
& IFF_PROMISC
),
7722 from_kuid(&init_user_ns
, audit_get_loginuid(current
)),
7723 from_kuid(&init_user_ns
, uid
),
7724 from_kgid(&init_user_ns
, gid
),
7725 audit_get_sessionid(current
));
7728 dev_change_rx_flags(dev
, IFF_PROMISC
);
7731 __dev_notify_flags(dev
, old_flags
, IFF_PROMISC
);
7736 * dev_set_promiscuity - update promiscuity count on a device
7740 * Add or remove promiscuity from a device. While the count in the device
7741 * remains above zero the interface remains promiscuous. Once it hits zero
7742 * the device reverts back to normal filtering operation. A negative inc
7743 * value is used to drop promiscuity on the device.
7744 * Return 0 if successful or a negative errno code on error.
7746 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
7748 unsigned int old_flags
= dev
->flags
;
7751 err
= __dev_set_promiscuity(dev
, inc
, true);
7754 if (dev
->flags
!= old_flags
)
7755 dev_set_rx_mode(dev
);
7758 EXPORT_SYMBOL(dev_set_promiscuity
);
7760 static int __dev_set_allmulti(struct net_device
*dev
, int inc
, bool notify
)
7762 unsigned int old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
7766 dev
->flags
|= IFF_ALLMULTI
;
7767 dev
->allmulti
+= inc
;
7768 if (dev
->allmulti
== 0) {
7771 * If inc causes overflow, untouch allmulti and return error.
7774 dev
->flags
&= ~IFF_ALLMULTI
;
7776 dev
->allmulti
-= inc
;
7777 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
7782 if (dev
->flags
^ old_flags
) {
7783 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
7784 dev_set_rx_mode(dev
);
7786 __dev_notify_flags(dev
, old_flags
,
7787 dev
->gflags
^ old_gflags
);
7793 * dev_set_allmulti - update allmulti count on a device
7797 * Add or remove reception of all multicast frames to a device. While the
7798 * count in the device remains above zero the interface remains listening
7799 * to all interfaces. Once it hits zero the device reverts back to normal
7800 * filtering operation. A negative @inc value is used to drop the counter
7801 * when releasing a resource needing all multicasts.
7802 * Return 0 if successful or a negative errno code on error.
7805 int dev_set_allmulti(struct net_device
*dev
, int inc
)
7807 return __dev_set_allmulti(dev
, inc
, true);
7809 EXPORT_SYMBOL(dev_set_allmulti
);
7812 * Upload unicast and multicast address lists to device and
7813 * configure RX filtering. When the device doesn't support unicast
7814 * filtering it is put in promiscuous mode while unicast addresses
7817 void __dev_set_rx_mode(struct net_device
*dev
)
7819 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7821 /* dev_open will call this function so the list will stay sane. */
7822 if (!(dev
->flags
&IFF_UP
))
7825 if (!netif_device_present(dev
))
7828 if (!(dev
->priv_flags
& IFF_UNICAST_FLT
)) {
7829 /* Unicast addresses changes may only happen under the rtnl,
7830 * therefore calling __dev_set_promiscuity here is safe.
7832 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
7833 __dev_set_promiscuity(dev
, 1, false);
7834 dev
->uc_promisc
= true;
7835 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
7836 __dev_set_promiscuity(dev
, -1, false);
7837 dev
->uc_promisc
= false;
7841 if (ops
->ndo_set_rx_mode
)
7842 ops
->ndo_set_rx_mode(dev
);
7845 void dev_set_rx_mode(struct net_device
*dev
)
7847 netif_addr_lock_bh(dev
);
7848 __dev_set_rx_mode(dev
);
7849 netif_addr_unlock_bh(dev
);
7853 * dev_get_flags - get flags reported to userspace
7856 * Get the combination of flag bits exported through APIs to userspace.
7858 unsigned int dev_get_flags(const struct net_device
*dev
)
7862 flags
= (dev
->flags
& ~(IFF_PROMISC
|
7867 (dev
->gflags
& (IFF_PROMISC
|
7870 if (netif_running(dev
)) {
7871 if (netif_oper_up(dev
))
7872 flags
|= IFF_RUNNING
;
7873 if (netif_carrier_ok(dev
))
7874 flags
|= IFF_LOWER_UP
;
7875 if (netif_dormant(dev
))
7876 flags
|= IFF_DORMANT
;
7881 EXPORT_SYMBOL(dev_get_flags
);
7883 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
,
7884 struct netlink_ext_ack
*extack
)
7886 unsigned int old_flags
= dev
->flags
;
7892 * Set the flags on our device.
7895 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
7896 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
7898 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
7902 * Load in the correct multicast list now the flags have changed.
7905 if ((old_flags
^ flags
) & IFF_MULTICAST
)
7906 dev_change_rx_flags(dev
, IFF_MULTICAST
);
7908 dev_set_rx_mode(dev
);
7911 * Have we downed the interface. We handle IFF_UP ourselves
7912 * according to user attempts to set it, rather than blindly
7917 if ((old_flags
^ flags
) & IFF_UP
) {
7918 if (old_flags
& IFF_UP
)
7921 ret
= __dev_open(dev
, extack
);
7924 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
7925 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
7926 unsigned int old_flags
= dev
->flags
;
7928 dev
->gflags
^= IFF_PROMISC
;
7930 if (__dev_set_promiscuity(dev
, inc
, false) >= 0)
7931 if (dev
->flags
!= old_flags
)
7932 dev_set_rx_mode(dev
);
7935 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
7936 * is important. Some (broken) drivers set IFF_PROMISC, when
7937 * IFF_ALLMULTI is requested not asking us and not reporting.
7939 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
7940 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
7942 dev
->gflags
^= IFF_ALLMULTI
;
7943 __dev_set_allmulti(dev
, inc
, false);
7949 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
,
7950 unsigned int gchanges
)
7952 unsigned int changes
= dev
->flags
^ old_flags
;
7955 rtmsg_ifinfo(RTM_NEWLINK
, dev
, gchanges
, GFP_ATOMIC
);
7957 if (changes
& IFF_UP
) {
7958 if (dev
->flags
& IFF_UP
)
7959 call_netdevice_notifiers(NETDEV_UP
, dev
);
7961 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
7964 if (dev
->flags
& IFF_UP
&&
7965 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
))) {
7966 struct netdev_notifier_change_info change_info
= {
7970 .flags_changed
= changes
,
7973 call_netdevice_notifiers_info(NETDEV_CHANGE
, &change_info
.info
);
7978 * dev_change_flags - change device settings
7980 * @flags: device state flags
7981 * @extack: netlink extended ack
7983 * Change settings on device based state flags. The flags are
7984 * in the userspace exported format.
7986 int dev_change_flags(struct net_device
*dev
, unsigned int flags
,
7987 struct netlink_ext_ack
*extack
)
7990 unsigned int changes
, old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
7992 ret
= __dev_change_flags(dev
, flags
, extack
);
7996 changes
= (old_flags
^ dev
->flags
) | (old_gflags
^ dev
->gflags
);
7997 __dev_notify_flags(dev
, old_flags
, changes
);
8000 EXPORT_SYMBOL(dev_change_flags
);
8002 int __dev_set_mtu(struct net_device
*dev
, int new_mtu
)
8004 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8006 if (ops
->ndo_change_mtu
)
8007 return ops
->ndo_change_mtu(dev
, new_mtu
);
8009 /* Pairs with all the lockless reads of dev->mtu in the stack */
8010 WRITE_ONCE(dev
->mtu
, new_mtu
);
8013 EXPORT_SYMBOL(__dev_set_mtu
);
8015 int dev_validate_mtu(struct net_device
*dev
, int new_mtu
,
8016 struct netlink_ext_ack
*extack
)
8018 /* MTU must be positive, and in range */
8019 if (new_mtu
< 0 || new_mtu
< dev
->min_mtu
) {
8020 NL_SET_ERR_MSG(extack
, "mtu less than device minimum");
8024 if (dev
->max_mtu
> 0 && new_mtu
> dev
->max_mtu
) {
8025 NL_SET_ERR_MSG(extack
, "mtu greater than device maximum");
8032 * dev_set_mtu_ext - Change maximum transfer unit
8034 * @new_mtu: new transfer unit
8035 * @extack: netlink extended ack
8037 * Change the maximum transfer size of the network device.
8039 int dev_set_mtu_ext(struct net_device
*dev
, int new_mtu
,
8040 struct netlink_ext_ack
*extack
)
8044 if (new_mtu
== dev
->mtu
)
8047 err
= dev_validate_mtu(dev
, new_mtu
, extack
);
8051 if (!netif_device_present(dev
))
8054 err
= call_netdevice_notifiers(NETDEV_PRECHANGEMTU
, dev
);
8055 err
= notifier_to_errno(err
);
8059 orig_mtu
= dev
->mtu
;
8060 err
= __dev_set_mtu(dev
, new_mtu
);
8063 err
= call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
8065 err
= notifier_to_errno(err
);
8067 /* setting mtu back and notifying everyone again,
8068 * so that they have a chance to revert changes.
8070 __dev_set_mtu(dev
, orig_mtu
);
8071 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
8078 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
8080 struct netlink_ext_ack extack
;
8083 memset(&extack
, 0, sizeof(extack
));
8084 err
= dev_set_mtu_ext(dev
, new_mtu
, &extack
);
8085 if (err
&& extack
._msg
)
8086 net_err_ratelimited("%s: %s\n", dev
->name
, extack
._msg
);
8089 EXPORT_SYMBOL(dev_set_mtu
);
8092 * dev_change_tx_queue_len - Change TX queue length of a netdevice
8094 * @new_len: new tx queue length
8096 int dev_change_tx_queue_len(struct net_device
*dev
, unsigned long new_len
)
8098 unsigned int orig_len
= dev
->tx_queue_len
;
8101 if (new_len
!= (unsigned int)new_len
)
8104 if (new_len
!= orig_len
) {
8105 dev
->tx_queue_len
= new_len
;
8106 res
= call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN
, dev
);
8107 res
= notifier_to_errno(res
);
8110 res
= dev_qdisc_change_tx_queue_len(dev
);
8118 netdev_err(dev
, "refused to change device tx_queue_len\n");
8119 dev
->tx_queue_len
= orig_len
;
8124 * dev_set_group - Change group this device belongs to
8126 * @new_group: group this device should belong to
8128 void dev_set_group(struct net_device
*dev
, int new_group
)
8130 dev
->group
= new_group
;
8132 EXPORT_SYMBOL(dev_set_group
);
8135 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8137 * @addr: new address
8138 * @extack: netlink extended ack
8140 int dev_pre_changeaddr_notify(struct net_device
*dev
, const char *addr
,
8141 struct netlink_ext_ack
*extack
)
8143 struct netdev_notifier_pre_changeaddr_info info
= {
8145 .info
.extack
= extack
,
8150 rc
= call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR
, &info
.info
);
8151 return notifier_to_errno(rc
);
8153 EXPORT_SYMBOL(dev_pre_changeaddr_notify
);
8156 * dev_set_mac_address - Change Media Access Control Address
8159 * @extack: netlink extended ack
8161 * Change the hardware (MAC) address of the device
8163 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
,
8164 struct netlink_ext_ack
*extack
)
8166 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8169 if (!ops
->ndo_set_mac_address
)
8171 if (sa
->sa_family
!= dev
->type
)
8173 if (!netif_device_present(dev
))
8175 err
= dev_pre_changeaddr_notify(dev
, sa
->sa_data
, extack
);
8178 err
= ops
->ndo_set_mac_address(dev
, sa
);
8181 dev
->addr_assign_type
= NET_ADDR_SET
;
8182 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
8183 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
8186 EXPORT_SYMBOL(dev_set_mac_address
);
8188 static DECLARE_RWSEM(dev_addr_sem
);
8190 int dev_set_mac_address_user(struct net_device
*dev
, struct sockaddr
*sa
,
8191 struct netlink_ext_ack
*extack
)
8195 down_write(&dev_addr_sem
);
8196 ret
= dev_set_mac_address(dev
, sa
, extack
);
8197 up_write(&dev_addr_sem
);
8200 EXPORT_SYMBOL(dev_set_mac_address_user
);
8202 int dev_get_mac_address(struct sockaddr
*sa
, struct net
*net
, char *dev_name
)
8204 size_t size
= sizeof(sa
->sa_data
);
8205 struct net_device
*dev
;
8208 down_read(&dev_addr_sem
);
8211 dev
= dev_get_by_name_rcu(net
, dev_name
);
8217 memset(sa
->sa_data
, 0, size
);
8219 memcpy(sa
->sa_data
, dev
->dev_addr
,
8220 min_t(size_t, size
, dev
->addr_len
));
8221 sa
->sa_family
= dev
->type
;
8225 up_read(&dev_addr_sem
);
8228 EXPORT_SYMBOL(dev_get_mac_address
);
8231 * dev_change_carrier - Change device carrier
8233 * @new_carrier: new value
8235 * Change device carrier
8237 int dev_change_carrier(struct net_device
*dev
, bool new_carrier
)
8239 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8241 if (!ops
->ndo_change_carrier
)
8243 if (!netif_device_present(dev
))
8245 return ops
->ndo_change_carrier(dev
, new_carrier
);
8247 EXPORT_SYMBOL(dev_change_carrier
);
8250 * dev_get_phys_port_id - Get device physical port ID
8254 * Get device physical port ID
8256 int dev_get_phys_port_id(struct net_device
*dev
,
8257 struct netdev_phys_item_id
*ppid
)
8259 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8261 if (!ops
->ndo_get_phys_port_id
)
8263 return ops
->ndo_get_phys_port_id(dev
, ppid
);
8265 EXPORT_SYMBOL(dev_get_phys_port_id
);
8268 * dev_get_phys_port_name - Get device physical port name
8271 * @len: limit of bytes to copy to name
8273 * Get device physical port name
8275 int dev_get_phys_port_name(struct net_device
*dev
,
8276 char *name
, size_t len
)
8278 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8281 if (ops
->ndo_get_phys_port_name
) {
8282 err
= ops
->ndo_get_phys_port_name(dev
, name
, len
);
8283 if (err
!= -EOPNOTSUPP
)
8286 return devlink_compat_phys_port_name_get(dev
, name
, len
);
8288 EXPORT_SYMBOL(dev_get_phys_port_name
);
8291 * dev_get_port_parent_id - Get the device's port parent identifier
8292 * @dev: network device
8293 * @ppid: pointer to a storage for the port's parent identifier
8294 * @recurse: allow/disallow recursion to lower devices
8296 * Get the devices's port parent identifier
8298 int dev_get_port_parent_id(struct net_device
*dev
,
8299 struct netdev_phys_item_id
*ppid
,
8302 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8303 struct netdev_phys_item_id first
= { };
8304 struct net_device
*lower_dev
;
8305 struct list_head
*iter
;
8308 if (ops
->ndo_get_port_parent_id
) {
8309 err
= ops
->ndo_get_port_parent_id(dev
, ppid
);
8310 if (err
!= -EOPNOTSUPP
)
8314 err
= devlink_compat_switch_id_get(dev
, ppid
);
8315 if (!err
|| err
!= -EOPNOTSUPP
)
8321 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
8322 err
= dev_get_port_parent_id(lower_dev
, ppid
, recurse
);
8327 else if (memcmp(&first
, ppid
, sizeof(*ppid
)))
8333 EXPORT_SYMBOL(dev_get_port_parent_id
);
8336 * netdev_port_same_parent_id - Indicate if two network devices have
8337 * the same port parent identifier
8338 * @a: first network device
8339 * @b: second network device
8341 bool netdev_port_same_parent_id(struct net_device
*a
, struct net_device
*b
)
8343 struct netdev_phys_item_id a_id
= { };
8344 struct netdev_phys_item_id b_id
= { };
8346 if (dev_get_port_parent_id(a
, &a_id
, true) ||
8347 dev_get_port_parent_id(b
, &b_id
, true))
8350 return netdev_phys_item_id_same(&a_id
, &b_id
);
8352 EXPORT_SYMBOL(netdev_port_same_parent_id
);
8355 * dev_change_proto_down - update protocol port state information
8357 * @proto_down: new value
8359 * This info can be used by switch drivers to set the phys state of the
8362 int dev_change_proto_down(struct net_device
*dev
, bool proto_down
)
8364 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8366 if (!ops
->ndo_change_proto_down
)
8368 if (!netif_device_present(dev
))
8370 return ops
->ndo_change_proto_down(dev
, proto_down
);
8372 EXPORT_SYMBOL(dev_change_proto_down
);
8375 * dev_change_proto_down_generic - generic implementation for
8376 * ndo_change_proto_down that sets carrier according to
8380 * @proto_down: new value
8382 int dev_change_proto_down_generic(struct net_device
*dev
, bool proto_down
)
8385 netif_carrier_off(dev
);
8387 netif_carrier_on(dev
);
8388 dev
->proto_down
= proto_down
;
8391 EXPORT_SYMBOL(dev_change_proto_down_generic
);
8393 u32
__dev_xdp_query(struct net_device
*dev
, bpf_op_t bpf_op
,
8394 enum bpf_netdev_command cmd
)
8396 struct netdev_bpf xdp
;
8401 memset(&xdp
, 0, sizeof(xdp
));
8404 /* Query must always succeed. */
8405 WARN_ON(bpf_op(dev
, &xdp
) < 0 && cmd
== XDP_QUERY_PROG
);
8410 static int dev_xdp_install(struct net_device
*dev
, bpf_op_t bpf_op
,
8411 struct netlink_ext_ack
*extack
, u32 flags
,
8412 struct bpf_prog
*prog
)
8414 struct netdev_bpf xdp
;
8416 memset(&xdp
, 0, sizeof(xdp
));
8417 if (flags
& XDP_FLAGS_HW_MODE
)
8418 xdp
.command
= XDP_SETUP_PROG_HW
;
8420 xdp
.command
= XDP_SETUP_PROG
;
8421 xdp
.extack
= extack
;
8425 return bpf_op(dev
, &xdp
);
8428 static void dev_xdp_uninstall(struct net_device
*dev
)
8430 struct netdev_bpf xdp
;
8433 /* Remove generic XDP */
8434 WARN_ON(dev_xdp_install(dev
, generic_xdp_install
, NULL
, 0, NULL
));
8436 /* Remove from the driver */
8437 ndo_bpf
= dev
->netdev_ops
->ndo_bpf
;
8441 memset(&xdp
, 0, sizeof(xdp
));
8442 xdp
.command
= XDP_QUERY_PROG
;
8443 WARN_ON(ndo_bpf(dev
, &xdp
));
8445 WARN_ON(dev_xdp_install(dev
, ndo_bpf
, NULL
, xdp
.prog_flags
,
8448 /* Remove HW offload */
8449 memset(&xdp
, 0, sizeof(xdp
));
8450 xdp
.command
= XDP_QUERY_PROG_HW
;
8451 if (!ndo_bpf(dev
, &xdp
) && xdp
.prog_id
)
8452 WARN_ON(dev_xdp_install(dev
, ndo_bpf
, NULL
, xdp
.prog_flags
,
8457 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
8459 * @extack: netlink extended ack
8460 * @fd: new program fd or negative value to clear
8461 * @flags: xdp-related flags
8463 * Set or clear a bpf program for a device
8465 int dev_change_xdp_fd(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
8468 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8469 enum bpf_netdev_command query
;
8470 struct bpf_prog
*prog
= NULL
;
8471 bpf_op_t bpf_op
, bpf_chk
;
8477 offload
= flags
& XDP_FLAGS_HW_MODE
;
8478 query
= offload
? XDP_QUERY_PROG_HW
: XDP_QUERY_PROG
;
8480 bpf_op
= bpf_chk
= ops
->ndo_bpf
;
8481 if (!bpf_op
&& (flags
& (XDP_FLAGS_DRV_MODE
| XDP_FLAGS_HW_MODE
))) {
8482 NL_SET_ERR_MSG(extack
, "underlying driver does not support XDP in native mode");
8485 if (!bpf_op
|| (flags
& XDP_FLAGS_SKB_MODE
))
8486 bpf_op
= generic_xdp_install
;
8487 if (bpf_op
== bpf_chk
)
8488 bpf_chk
= generic_xdp_install
;
8493 if (!offload
&& __dev_xdp_query(dev
, bpf_chk
, XDP_QUERY_PROG
)) {
8494 NL_SET_ERR_MSG(extack
, "native and generic XDP can't be active at the same time");
8498 prog_id
= __dev_xdp_query(dev
, bpf_op
, query
);
8499 if ((flags
& XDP_FLAGS_UPDATE_IF_NOEXIST
) && prog_id
) {
8500 NL_SET_ERR_MSG(extack
, "XDP program already attached");
8504 prog
= bpf_prog_get_type_dev(fd
, BPF_PROG_TYPE_XDP
,
8505 bpf_op
== ops
->ndo_bpf
);
8507 return PTR_ERR(prog
);
8509 if (!offload
&& bpf_prog_is_dev_bound(prog
->aux
)) {
8510 NL_SET_ERR_MSG(extack
, "using device-bound program without HW_MODE flag is not supported");
8515 /* prog->aux->id may be 0 for orphaned device-bound progs */
8516 if (prog
->aux
->id
&& prog
->aux
->id
== prog_id
) {
8521 if (!__dev_xdp_query(dev
, bpf_op
, query
))
8525 err
= dev_xdp_install(dev
, bpf_op
, extack
, flags
, prog
);
8526 if (err
< 0 && prog
)
8533 * dev_new_index - allocate an ifindex
8534 * @net: the applicable net namespace
8536 * Returns a suitable unique value for a new device interface
8537 * number. The caller must hold the rtnl semaphore or the
8538 * dev_base_lock to be sure it remains unique.
8540 static int dev_new_index(struct net
*net
)
8542 int ifindex
= net
->ifindex
;
8547 if (!__dev_get_by_index(net
, ifindex
))
8548 return net
->ifindex
= ifindex
;
8552 /* Delayed registration/unregisteration */
8553 static LIST_HEAD(net_todo_list
);
8554 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq
);
8556 static void net_set_todo(struct net_device
*dev
)
8558 list_add_tail(&dev
->todo_list
, &net_todo_list
);
8559 dev_net(dev
)->dev_unreg_count
++;
8562 static void rollback_registered_many(struct list_head
*head
)
8564 struct net_device
*dev
, *tmp
;
8565 LIST_HEAD(close_head
);
8567 BUG_ON(dev_boot_phase
);
8570 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
8571 /* Some devices call without registering
8572 * for initialization unwind. Remove those
8573 * devices and proceed with the remaining.
8575 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
8576 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
8580 list_del(&dev
->unreg_list
);
8583 dev
->dismantle
= true;
8584 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
8587 /* If device is running, close it first. */
8588 list_for_each_entry(dev
, head
, unreg_list
)
8589 list_add_tail(&dev
->close_list
, &close_head
);
8590 dev_close_many(&close_head
, true);
8592 list_for_each_entry(dev
, head
, unreg_list
) {
8593 /* And unlink it from device chain. */
8594 unlist_netdevice(dev
);
8596 dev
->reg_state
= NETREG_UNREGISTERING
;
8598 flush_all_backlogs();
8602 list_for_each_entry(dev
, head
, unreg_list
) {
8603 struct sk_buff
*skb
= NULL
;
8605 /* Shutdown queueing discipline. */
8608 dev_xdp_uninstall(dev
);
8610 /* Notify protocols, that we are about to destroy
8611 * this device. They should clean all the things.
8613 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
8615 if (!dev
->rtnl_link_ops
||
8616 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
8617 skb
= rtmsg_ifinfo_build_skb(RTM_DELLINK
, dev
, ~0U, 0,
8618 GFP_KERNEL
, NULL
, 0);
8621 * Flush the unicast and multicast chains
8626 if (dev
->netdev_ops
->ndo_uninit
)
8627 dev
->netdev_ops
->ndo_uninit(dev
);
8630 rtmsg_ifinfo_send(skb
, dev
, GFP_KERNEL
);
8632 /* Notifier chain MUST detach us all upper devices. */
8633 WARN_ON(netdev_has_any_upper_dev(dev
));
8634 WARN_ON(netdev_has_any_lower_dev(dev
));
8636 /* Remove entries from kobject tree */
8637 netdev_unregister_kobject(dev
);
8639 /* Remove XPS queueing entries */
8640 netif_reset_xps_queues_gt(dev
, 0);
8646 list_for_each_entry(dev
, head
, unreg_list
)
8650 static void rollback_registered(struct net_device
*dev
)
8654 list_add(&dev
->unreg_list
, &single
);
8655 rollback_registered_many(&single
);
8659 static netdev_features_t
netdev_sync_upper_features(struct net_device
*lower
,
8660 struct net_device
*upper
, netdev_features_t features
)
8662 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
8663 netdev_features_t feature
;
8666 for_each_netdev_feature(upper_disables
, feature_bit
) {
8667 feature
= __NETIF_F_BIT(feature_bit
);
8668 if (!(upper
->wanted_features
& feature
)
8669 && (features
& feature
)) {
8670 netdev_dbg(lower
, "Dropping feature %pNF, upper dev %s has it off.\n",
8671 &feature
, upper
->name
);
8672 features
&= ~feature
;
8679 static void netdev_sync_lower_features(struct net_device
*upper
,
8680 struct net_device
*lower
, netdev_features_t features
)
8682 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
8683 netdev_features_t feature
;
8686 for_each_netdev_feature(upper_disables
, feature_bit
) {
8687 feature
= __NETIF_F_BIT(feature_bit
);
8688 if (!(features
& feature
) && (lower
->features
& feature
)) {
8689 netdev_dbg(upper
, "Disabling feature %pNF on lower dev %s.\n",
8690 &feature
, lower
->name
);
8691 lower
->wanted_features
&= ~feature
;
8692 __netdev_update_features(lower
);
8694 if (unlikely(lower
->features
& feature
))
8695 netdev_WARN(upper
, "failed to disable %pNF on %s!\n",
8696 &feature
, lower
->name
);
8698 netdev_features_change(lower
);
8703 static netdev_features_t
netdev_fix_features(struct net_device
*dev
,
8704 netdev_features_t features
)
8706 /* Fix illegal checksum combinations */
8707 if ((features
& NETIF_F_HW_CSUM
) &&
8708 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
8709 netdev_warn(dev
, "mixed HW and IP checksum settings.\n");
8710 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
8713 /* TSO requires that SG is present as well. */
8714 if ((features
& NETIF_F_ALL_TSO
) && !(features
& NETIF_F_SG
)) {
8715 netdev_dbg(dev
, "Dropping TSO features since no SG feature.\n");
8716 features
&= ~NETIF_F_ALL_TSO
;
8719 if ((features
& NETIF_F_TSO
) && !(features
& NETIF_F_HW_CSUM
) &&
8720 !(features
& NETIF_F_IP_CSUM
)) {
8721 netdev_dbg(dev
, "Dropping TSO features since no CSUM feature.\n");
8722 features
&= ~NETIF_F_TSO
;
8723 features
&= ~NETIF_F_TSO_ECN
;
8726 if ((features
& NETIF_F_TSO6
) && !(features
& NETIF_F_HW_CSUM
) &&
8727 !(features
& NETIF_F_IPV6_CSUM
)) {
8728 netdev_dbg(dev
, "Dropping TSO6 features since no CSUM feature.\n");
8729 features
&= ~NETIF_F_TSO6
;
8732 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
8733 if ((features
& NETIF_F_TSO_MANGLEID
) && !(features
& NETIF_F_TSO
))
8734 features
&= ~NETIF_F_TSO_MANGLEID
;
8736 /* TSO ECN requires that TSO is present as well. */
8737 if ((features
& NETIF_F_ALL_TSO
) == NETIF_F_TSO_ECN
)
8738 features
&= ~NETIF_F_TSO_ECN
;
8740 /* Software GSO depends on SG. */
8741 if ((features
& NETIF_F_GSO
) && !(features
& NETIF_F_SG
)) {
8742 netdev_dbg(dev
, "Dropping NETIF_F_GSO since no SG feature.\n");
8743 features
&= ~NETIF_F_GSO
;
8746 /* GSO partial features require GSO partial be set */
8747 if ((features
& dev
->gso_partial_features
) &&
8748 !(features
& NETIF_F_GSO_PARTIAL
)) {
8750 "Dropping partially supported GSO features since no GSO partial.\n");
8751 features
&= ~dev
->gso_partial_features
;
8754 if (!(features
& NETIF_F_RXCSUM
)) {
8755 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
8756 * successfully merged by hardware must also have the
8757 * checksum verified by hardware. If the user does not
8758 * want to enable RXCSUM, logically, we should disable GRO_HW.
8760 if (features
& NETIF_F_GRO_HW
) {
8761 netdev_dbg(dev
, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
8762 features
&= ~NETIF_F_GRO_HW
;
8766 /* LRO/HW-GRO features cannot be combined with RX-FCS */
8767 if (features
& NETIF_F_RXFCS
) {
8768 if (features
& NETIF_F_LRO
) {
8769 netdev_dbg(dev
, "Dropping LRO feature since RX-FCS is requested.\n");
8770 features
&= ~NETIF_F_LRO
;
8773 if (features
& NETIF_F_GRO_HW
) {
8774 netdev_dbg(dev
, "Dropping HW-GRO feature since RX-FCS is requested.\n");
8775 features
&= ~NETIF_F_GRO_HW
;
8779 if ((features
& NETIF_F_HW_TLS_RX
) && !(features
& NETIF_F_RXCSUM
)) {
8780 netdev_dbg(dev
, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
8781 features
&= ~NETIF_F_HW_TLS_RX
;
8787 int __netdev_update_features(struct net_device
*dev
)
8789 struct net_device
*upper
, *lower
;
8790 netdev_features_t features
;
8791 struct list_head
*iter
;
8796 features
= netdev_get_wanted_features(dev
);
8798 if (dev
->netdev_ops
->ndo_fix_features
)
8799 features
= dev
->netdev_ops
->ndo_fix_features(dev
, features
);
8801 /* driver might be less strict about feature dependencies */
8802 features
= netdev_fix_features(dev
, features
);
8804 /* some features can't be enabled if they're off an an upper device */
8805 netdev_for_each_upper_dev_rcu(dev
, upper
, iter
)
8806 features
= netdev_sync_upper_features(dev
, upper
, features
);
8808 if (dev
->features
== features
)
8811 netdev_dbg(dev
, "Features changed: %pNF -> %pNF\n",
8812 &dev
->features
, &features
);
8814 if (dev
->netdev_ops
->ndo_set_features
)
8815 err
= dev
->netdev_ops
->ndo_set_features(dev
, features
);
8819 if (unlikely(err
< 0)) {
8821 "set_features() failed (%d); wanted %pNF, left %pNF\n",
8822 err
, &features
, &dev
->features
);
8823 /* return non-0 since some features might have changed and
8824 * it's better to fire a spurious notification than miss it
8830 /* some features must be disabled on lower devices when disabled
8831 * on an upper device (think: bonding master or bridge)
8833 netdev_for_each_lower_dev(dev
, lower
, iter
)
8834 netdev_sync_lower_features(dev
, lower
, features
);
8837 netdev_features_t diff
= features
^ dev
->features
;
8839 if (diff
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
8840 /* udp_tunnel_{get,drop}_rx_info both need
8841 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
8842 * device, or they won't do anything.
8843 * Thus we need to update dev->features
8844 * *before* calling udp_tunnel_get_rx_info,
8845 * but *after* calling udp_tunnel_drop_rx_info.
8847 if (features
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
8848 dev
->features
= features
;
8849 udp_tunnel_get_rx_info(dev
);
8851 udp_tunnel_drop_rx_info(dev
);
8855 if (diff
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
8856 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
8857 dev
->features
= features
;
8858 err
|= vlan_get_rx_ctag_filter_info(dev
);
8860 vlan_drop_rx_ctag_filter_info(dev
);
8864 if (diff
& NETIF_F_HW_VLAN_STAG_FILTER
) {
8865 if (features
& NETIF_F_HW_VLAN_STAG_FILTER
) {
8866 dev
->features
= features
;
8867 err
|= vlan_get_rx_stag_filter_info(dev
);
8869 vlan_drop_rx_stag_filter_info(dev
);
8873 dev
->features
= features
;
8876 return err
< 0 ? 0 : 1;
8880 * netdev_update_features - recalculate device features
8881 * @dev: the device to check
8883 * Recalculate dev->features set and send notifications if it
8884 * has changed. Should be called after driver or hardware dependent
8885 * conditions might have changed that influence the features.
8887 void netdev_update_features(struct net_device
*dev
)
8889 if (__netdev_update_features(dev
))
8890 netdev_features_change(dev
);
8892 EXPORT_SYMBOL(netdev_update_features
);
8895 * netdev_change_features - recalculate device features
8896 * @dev: the device to check
8898 * Recalculate dev->features set and send notifications even
8899 * if they have not changed. Should be called instead of
8900 * netdev_update_features() if also dev->vlan_features might
8901 * have changed to allow the changes to be propagated to stacked
8904 void netdev_change_features(struct net_device
*dev
)
8906 __netdev_update_features(dev
);
8907 netdev_features_change(dev
);
8909 EXPORT_SYMBOL(netdev_change_features
);
8912 * netif_stacked_transfer_operstate - transfer operstate
8913 * @rootdev: the root or lower level device to transfer state from
8914 * @dev: the device to transfer operstate to
8916 * Transfer operational state from root to device. This is normally
8917 * called when a stacking relationship exists between the root
8918 * device and the device(a leaf device).
8920 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
8921 struct net_device
*dev
)
8923 if (rootdev
->operstate
== IF_OPER_DORMANT
)
8924 netif_dormant_on(dev
);
8926 netif_dormant_off(dev
);
8928 if (netif_carrier_ok(rootdev
))
8929 netif_carrier_on(dev
);
8931 netif_carrier_off(dev
);
8933 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
8935 static int netif_alloc_rx_queues(struct net_device
*dev
)
8937 unsigned int i
, count
= dev
->num_rx_queues
;
8938 struct netdev_rx_queue
*rx
;
8939 size_t sz
= count
* sizeof(*rx
);
8944 rx
= kvzalloc(sz
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
8950 for (i
= 0; i
< count
; i
++) {
8953 /* XDP RX-queue setup */
8954 err
= xdp_rxq_info_reg(&rx
[i
].xdp_rxq
, dev
, i
);
8961 /* Rollback successful reg's and free other resources */
8963 xdp_rxq_info_unreg(&rx
[i
].xdp_rxq
);
8969 static void netif_free_rx_queues(struct net_device
*dev
)
8971 unsigned int i
, count
= dev
->num_rx_queues
;
8973 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
8977 for (i
= 0; i
< count
; i
++)
8978 xdp_rxq_info_unreg(&dev
->_rx
[i
].xdp_rxq
);
8983 static void netdev_init_one_queue(struct net_device
*dev
,
8984 struct netdev_queue
*queue
, void *_unused
)
8986 /* Initialize queue lock */
8987 spin_lock_init(&queue
->_xmit_lock
);
8988 lockdep_set_class(&queue
->_xmit_lock
, &dev
->qdisc_xmit_lock_key
);
8989 queue
->xmit_lock_owner
= -1;
8990 netdev_queue_numa_node_write(queue
, NUMA_NO_NODE
);
8993 dql_init(&queue
->dql
, HZ
);
8997 static void netif_free_tx_queues(struct net_device
*dev
)
9002 static int netif_alloc_netdev_queues(struct net_device
*dev
)
9004 unsigned int count
= dev
->num_tx_queues
;
9005 struct netdev_queue
*tx
;
9006 size_t sz
= count
* sizeof(*tx
);
9008 if (count
< 1 || count
> 0xffff)
9011 tx
= kvzalloc(sz
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
9017 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
9018 spin_lock_init(&dev
->tx_global_lock
);
9023 void netif_tx_stop_all_queues(struct net_device
*dev
)
9027 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
9028 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
9030 netif_tx_stop_queue(txq
);
9033 EXPORT_SYMBOL(netif_tx_stop_all_queues
);
9035 static void netdev_register_lockdep_key(struct net_device
*dev
)
9037 lockdep_register_key(&dev
->qdisc_tx_busylock_key
);
9038 lockdep_register_key(&dev
->qdisc_running_key
);
9039 lockdep_register_key(&dev
->qdisc_xmit_lock_key
);
9040 lockdep_register_key(&dev
->addr_list_lock_key
);
9043 static void netdev_unregister_lockdep_key(struct net_device
*dev
)
9045 lockdep_unregister_key(&dev
->qdisc_tx_busylock_key
);
9046 lockdep_unregister_key(&dev
->qdisc_running_key
);
9047 lockdep_unregister_key(&dev
->qdisc_xmit_lock_key
);
9048 lockdep_unregister_key(&dev
->addr_list_lock_key
);
9051 void netdev_update_lockdep_key(struct net_device
*dev
)
9053 lockdep_unregister_key(&dev
->addr_list_lock_key
);
9054 lockdep_register_key(&dev
->addr_list_lock_key
);
9056 lockdep_set_class(&dev
->addr_list_lock
, &dev
->addr_list_lock_key
);
9058 EXPORT_SYMBOL(netdev_update_lockdep_key
);
9061 * register_netdevice - register a network device
9062 * @dev: device to register
9064 * Take a completed network device structure and add it to the kernel
9065 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
9066 * chain. 0 is returned on success. A negative errno code is returned
9067 * on a failure to set up the device, or if the name is a duplicate.
9069 * Callers must hold the rtnl semaphore. You may want
9070 * register_netdev() instead of this.
9073 * The locking appears insufficient to guarantee two parallel registers
9074 * will not get the same name.
9077 int register_netdevice(struct net_device
*dev
)
9080 struct net
*net
= dev_net(dev
);
9082 BUILD_BUG_ON(sizeof(netdev_features_t
) * BITS_PER_BYTE
<
9083 NETDEV_FEATURE_COUNT
);
9084 BUG_ON(dev_boot_phase
);
9089 /* When net_device's are persistent, this will be fatal. */
9090 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
9093 spin_lock_init(&dev
->addr_list_lock
);
9094 lockdep_set_class(&dev
->addr_list_lock
, &dev
->addr_list_lock_key
);
9096 ret
= dev_get_valid_name(net
, dev
, dev
->name
);
9100 /* Init, if this function is available */
9101 if (dev
->netdev_ops
->ndo_init
) {
9102 ret
= dev
->netdev_ops
->ndo_init(dev
);
9110 if (((dev
->hw_features
| dev
->features
) &
9111 NETIF_F_HW_VLAN_CTAG_FILTER
) &&
9112 (!dev
->netdev_ops
->ndo_vlan_rx_add_vid
||
9113 !dev
->netdev_ops
->ndo_vlan_rx_kill_vid
)) {
9114 netdev_WARN(dev
, "Buggy VLAN acceleration in driver!\n");
9121 dev
->ifindex
= dev_new_index(net
);
9122 else if (__dev_get_by_index(net
, dev
->ifindex
))
9125 /* Transfer changeable features to wanted_features and enable
9126 * software offloads (GSO and GRO).
9128 dev
->hw_features
|= NETIF_F_SOFT_FEATURES
;
9129 dev
->features
|= NETIF_F_SOFT_FEATURES
;
9131 if (dev
->netdev_ops
->ndo_udp_tunnel_add
) {
9132 dev
->features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
9133 dev
->hw_features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
9136 dev
->wanted_features
= dev
->features
& dev
->hw_features
;
9138 if (!(dev
->flags
& IFF_LOOPBACK
))
9139 dev
->hw_features
|= NETIF_F_NOCACHE_COPY
;
9141 /* If IPv4 TCP segmentation offload is supported we should also
9142 * allow the device to enable segmenting the frame with the option
9143 * of ignoring a static IP ID value. This doesn't enable the
9144 * feature itself but allows the user to enable it later.
9146 if (dev
->hw_features
& NETIF_F_TSO
)
9147 dev
->hw_features
|= NETIF_F_TSO_MANGLEID
;
9148 if (dev
->vlan_features
& NETIF_F_TSO
)
9149 dev
->vlan_features
|= NETIF_F_TSO_MANGLEID
;
9150 if (dev
->mpls_features
& NETIF_F_TSO
)
9151 dev
->mpls_features
|= NETIF_F_TSO_MANGLEID
;
9152 if (dev
->hw_enc_features
& NETIF_F_TSO
)
9153 dev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
9155 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
9157 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
9159 /* Make NETIF_F_SG inheritable to tunnel devices.
9161 dev
->hw_enc_features
|= NETIF_F_SG
| NETIF_F_GSO_PARTIAL
;
9163 /* Make NETIF_F_SG inheritable to MPLS.
9165 dev
->mpls_features
|= NETIF_F_SG
;
9167 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
9168 ret
= notifier_to_errno(ret
);
9172 ret
= netdev_register_kobject(dev
);
9174 dev
->reg_state
= NETREG_UNREGISTERED
;
9177 dev
->reg_state
= NETREG_REGISTERED
;
9179 __netdev_update_features(dev
);
9182 * Default initial state at registry is that the
9183 * device is present.
9186 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
9188 linkwatch_init_dev(dev
);
9190 dev_init_scheduler(dev
);
9192 list_netdevice(dev
);
9193 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
9195 /* If the device has permanent device address, driver should
9196 * set dev_addr and also addr_assign_type should be set to
9197 * NET_ADDR_PERM (default value).
9199 if (dev
->addr_assign_type
== NET_ADDR_PERM
)
9200 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
9202 /* Notify protocols, that a new device appeared. */
9203 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
9204 ret
= notifier_to_errno(ret
);
9206 rollback_registered(dev
);
9209 dev
->reg_state
= NETREG_UNREGISTERED
;
9210 /* We should put the kobject that hold in
9211 * netdev_unregister_kobject(), otherwise
9212 * the net device cannot be freed when
9213 * driver calls free_netdev(), because the
9214 * kobject is being hold.
9216 kobject_put(&dev
->dev
.kobj
);
9219 * Prevent userspace races by waiting until the network
9220 * device is fully setup before sending notifications.
9222 if (!dev
->rtnl_link_ops
||
9223 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
9224 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
9230 if (dev
->netdev_ops
->ndo_uninit
)
9231 dev
->netdev_ops
->ndo_uninit(dev
);
9232 if (dev
->priv_destructor
)
9233 dev
->priv_destructor(dev
);
9236 EXPORT_SYMBOL(register_netdevice
);
9239 * init_dummy_netdev - init a dummy network device for NAPI
9240 * @dev: device to init
9242 * This takes a network device structure and initialize the minimum
9243 * amount of fields so it can be used to schedule NAPI polls without
9244 * registering a full blown interface. This is to be used by drivers
9245 * that need to tie several hardware interfaces to a single NAPI
9246 * poll scheduler due to HW limitations.
9248 int init_dummy_netdev(struct net_device
*dev
)
9250 /* Clear everything. Note we don't initialize spinlocks
9251 * are they aren't supposed to be taken by any of the
9252 * NAPI code and this dummy netdev is supposed to be
9253 * only ever used for NAPI polls
9255 memset(dev
, 0, sizeof(struct net_device
));
9257 /* make sure we BUG if trying to hit standard
9258 * register/unregister code path
9260 dev
->reg_state
= NETREG_DUMMY
;
9262 /* NAPI wants this */
9263 INIT_LIST_HEAD(&dev
->napi_list
);
9265 /* a dummy interface is started by default */
9266 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
9267 set_bit(__LINK_STATE_START
, &dev
->state
);
9269 /* napi_busy_loop stats accounting wants this */
9270 dev_net_set(dev
, &init_net
);
9272 /* Note : We dont allocate pcpu_refcnt for dummy devices,
9273 * because users of this 'device' dont need to change
9279 EXPORT_SYMBOL_GPL(init_dummy_netdev
);
9283 * register_netdev - register a network device
9284 * @dev: device to register
9286 * Take a completed network device structure and add it to the kernel
9287 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
9288 * chain. 0 is returned on success. A negative errno code is returned
9289 * on a failure to set up the device, or if the name is a duplicate.
9291 * This is a wrapper around register_netdevice that takes the rtnl semaphore
9292 * and expands the device name if you passed a format string to
9295 int register_netdev(struct net_device
*dev
)
9299 if (rtnl_lock_killable())
9301 err
= register_netdevice(dev
);
9305 EXPORT_SYMBOL(register_netdev
);
9307 int netdev_refcnt_read(const struct net_device
*dev
)
9311 for_each_possible_cpu(i
)
9312 refcnt
+= *per_cpu_ptr(dev
->pcpu_refcnt
, i
);
9315 EXPORT_SYMBOL(netdev_refcnt_read
);
9318 * netdev_wait_allrefs - wait until all references are gone.
9319 * @dev: target net_device
9321 * This is called when unregistering network devices.
9323 * Any protocol or device that holds a reference should register
9324 * for netdevice notification, and cleanup and put back the
9325 * reference if they receive an UNREGISTER event.
9326 * We can get stuck here if buggy protocols don't correctly
9329 static void netdev_wait_allrefs(struct net_device
*dev
)
9331 unsigned long rebroadcast_time
, warning_time
;
9334 linkwatch_forget_dev(dev
);
9336 rebroadcast_time
= warning_time
= jiffies
;
9337 refcnt
= netdev_refcnt_read(dev
);
9339 while (refcnt
!= 0) {
9340 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
9343 /* Rebroadcast unregister notification */
9344 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
9350 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
9352 /* We must not have linkwatch events
9353 * pending on unregister. If this
9354 * happens, we simply run the queue
9355 * unscheduled, resulting in a noop
9358 linkwatch_run_queue();
9363 rebroadcast_time
= jiffies
;
9368 refcnt
= netdev_refcnt_read(dev
);
9370 if (refcnt
&& time_after(jiffies
, warning_time
+ 10 * HZ
)) {
9371 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
9373 warning_time
= jiffies
;
9382 * register_netdevice(x1);
9383 * register_netdevice(x2);
9385 * unregister_netdevice(y1);
9386 * unregister_netdevice(y2);
9392 * We are invoked by rtnl_unlock().
9393 * This allows us to deal with problems:
9394 * 1) We can delete sysfs objects which invoke hotplug
9395 * without deadlocking with linkwatch via keventd.
9396 * 2) Since we run with the RTNL semaphore not held, we can sleep
9397 * safely in order to wait for the netdev refcnt to drop to zero.
9399 * We must not return until all unregister events added during
9400 * the interval the lock was held have been completed.
9402 void netdev_run_todo(void)
9404 struct list_head list
;
9406 /* Snapshot list, allow later requests */
9407 list_replace_init(&net_todo_list
, &list
);
9412 /* Wait for rcu callbacks to finish before next phase */
9413 if (!list_empty(&list
))
9416 while (!list_empty(&list
)) {
9417 struct net_device
*dev
9418 = list_first_entry(&list
, struct net_device
, todo_list
);
9419 list_del(&dev
->todo_list
);
9421 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
9422 pr_err("network todo '%s' but state %d\n",
9423 dev
->name
, dev
->reg_state
);
9428 dev
->reg_state
= NETREG_UNREGISTERED
;
9430 netdev_wait_allrefs(dev
);
9433 BUG_ON(netdev_refcnt_read(dev
));
9434 BUG_ON(!list_empty(&dev
->ptype_all
));
9435 BUG_ON(!list_empty(&dev
->ptype_specific
));
9436 WARN_ON(rcu_access_pointer(dev
->ip_ptr
));
9437 WARN_ON(rcu_access_pointer(dev
->ip6_ptr
));
9438 #if IS_ENABLED(CONFIG_DECNET)
9439 WARN_ON(dev
->dn_ptr
);
9441 if (dev
->priv_destructor
)
9442 dev
->priv_destructor(dev
);
9443 if (dev
->needs_free_netdev
)
9446 /* Report a network device has been unregistered */
9448 dev_net(dev
)->dev_unreg_count
--;
9450 wake_up(&netdev_unregistering_wq
);
9452 /* Free network device */
9453 kobject_put(&dev
->dev
.kobj
);
9457 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
9458 * all the same fields in the same order as net_device_stats, with only
9459 * the type differing, but rtnl_link_stats64 may have additional fields
9460 * at the end for newer counters.
9462 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
9463 const struct net_device_stats
*netdev_stats
)
9465 #if BITS_PER_LONG == 64
9466 BUILD_BUG_ON(sizeof(*stats64
) < sizeof(*netdev_stats
));
9467 memcpy(stats64
, netdev_stats
, sizeof(*netdev_stats
));
9468 /* zero out counters that only exist in rtnl_link_stats64 */
9469 memset((char *)stats64
+ sizeof(*netdev_stats
), 0,
9470 sizeof(*stats64
) - sizeof(*netdev_stats
));
9472 size_t i
, n
= sizeof(*netdev_stats
) / sizeof(unsigned long);
9473 const unsigned long *src
= (const unsigned long *)netdev_stats
;
9474 u64
*dst
= (u64
*)stats64
;
9476 BUILD_BUG_ON(n
> sizeof(*stats64
) / sizeof(u64
));
9477 for (i
= 0; i
< n
; i
++)
9479 /* zero out counters that only exist in rtnl_link_stats64 */
9480 memset((char *)stats64
+ n
* sizeof(u64
), 0,
9481 sizeof(*stats64
) - n
* sizeof(u64
));
9484 EXPORT_SYMBOL(netdev_stats_to_stats64
);
9487 * dev_get_stats - get network device statistics
9488 * @dev: device to get statistics from
9489 * @storage: place to store stats
9491 * Get network statistics from device. Return @storage.
9492 * The device driver may provide its own method by setting
9493 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
9494 * otherwise the internal statistics structure is used.
9496 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
9497 struct rtnl_link_stats64
*storage
)
9499 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9501 if (ops
->ndo_get_stats64
) {
9502 memset(storage
, 0, sizeof(*storage
));
9503 ops
->ndo_get_stats64(dev
, storage
);
9504 } else if (ops
->ndo_get_stats
) {
9505 netdev_stats_to_stats64(storage
, ops
->ndo_get_stats(dev
));
9507 netdev_stats_to_stats64(storage
, &dev
->stats
);
9509 storage
->rx_dropped
+= (unsigned long)atomic_long_read(&dev
->rx_dropped
);
9510 storage
->tx_dropped
+= (unsigned long)atomic_long_read(&dev
->tx_dropped
);
9511 storage
->rx_nohandler
+= (unsigned long)atomic_long_read(&dev
->rx_nohandler
);
9514 EXPORT_SYMBOL(dev_get_stats
);
9516 struct netdev_queue
*dev_ingress_queue_create(struct net_device
*dev
)
9518 struct netdev_queue
*queue
= dev_ingress_queue(dev
);
9520 #ifdef CONFIG_NET_CLS_ACT
9523 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
9526 netdev_init_one_queue(dev
, queue
, NULL
);
9527 RCU_INIT_POINTER(queue
->qdisc
, &noop_qdisc
);
9528 queue
->qdisc_sleeping
= &noop_qdisc
;
9529 rcu_assign_pointer(dev
->ingress_queue
, queue
);
9534 static const struct ethtool_ops default_ethtool_ops
;
9536 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
9537 const struct ethtool_ops
*ops
)
9539 if (dev
->ethtool_ops
== &default_ethtool_ops
)
9540 dev
->ethtool_ops
= ops
;
9542 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops
);
9544 void netdev_freemem(struct net_device
*dev
)
9546 char *addr
= (char *)dev
- dev
->padded
;
9552 * alloc_netdev_mqs - allocate network device
9553 * @sizeof_priv: size of private data to allocate space for
9554 * @name: device name format string
9555 * @name_assign_type: origin of device name
9556 * @setup: callback to initialize device
9557 * @txqs: the number of TX subqueues to allocate
9558 * @rxqs: the number of RX subqueues to allocate
9560 * Allocates a struct net_device with private data area for driver use
9561 * and performs basic initialization. Also allocates subqueue structs
9562 * for each queue on the device.
9564 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
9565 unsigned char name_assign_type
,
9566 void (*setup
)(struct net_device
*),
9567 unsigned int txqs
, unsigned int rxqs
)
9569 struct net_device
*dev
;
9570 unsigned int alloc_size
;
9571 struct net_device
*p
;
9573 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
9576 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
9581 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
9585 alloc_size
= sizeof(struct net_device
);
9587 /* ensure 32-byte alignment of private area */
9588 alloc_size
= ALIGN(alloc_size
, NETDEV_ALIGN
);
9589 alloc_size
+= sizeof_priv
;
9591 /* ensure 32-byte alignment of whole construct */
9592 alloc_size
+= NETDEV_ALIGN
- 1;
9594 p
= kvzalloc(alloc_size
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
9598 dev
= PTR_ALIGN(p
, NETDEV_ALIGN
);
9599 dev
->padded
= (char *)dev
- (char *)p
;
9601 dev
->pcpu_refcnt
= alloc_percpu(int);
9602 if (!dev
->pcpu_refcnt
)
9605 if (dev_addr_init(dev
))
9611 dev_net_set(dev
, &init_net
);
9613 netdev_register_lockdep_key(dev
);
9615 dev
->gso_max_size
= GSO_MAX_SIZE
;
9616 dev
->gso_max_segs
= GSO_MAX_SEGS
;
9617 dev
->upper_level
= 1;
9618 dev
->lower_level
= 1;
9620 INIT_LIST_HEAD(&dev
->napi_list
);
9621 INIT_LIST_HEAD(&dev
->unreg_list
);
9622 INIT_LIST_HEAD(&dev
->close_list
);
9623 INIT_LIST_HEAD(&dev
->link_watch_list
);
9624 INIT_LIST_HEAD(&dev
->adj_list
.upper
);
9625 INIT_LIST_HEAD(&dev
->adj_list
.lower
);
9626 INIT_LIST_HEAD(&dev
->ptype_all
);
9627 INIT_LIST_HEAD(&dev
->ptype_specific
);
9628 #ifdef CONFIG_NET_SCHED
9629 hash_init(dev
->qdisc_hash
);
9631 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
;
9634 if (!dev
->tx_queue_len
) {
9635 dev
->priv_flags
|= IFF_NO_QUEUE
;
9636 dev
->tx_queue_len
= DEFAULT_TX_QUEUE_LEN
;
9639 dev
->num_tx_queues
= txqs
;
9640 dev
->real_num_tx_queues
= txqs
;
9641 if (netif_alloc_netdev_queues(dev
))
9644 dev
->num_rx_queues
= rxqs
;
9645 dev
->real_num_rx_queues
= rxqs
;
9646 if (netif_alloc_rx_queues(dev
))
9649 strcpy(dev
->name
, name
);
9650 dev
->name_assign_type
= name_assign_type
;
9651 dev
->group
= INIT_NETDEV_GROUP
;
9652 if (!dev
->ethtool_ops
)
9653 dev
->ethtool_ops
= &default_ethtool_ops
;
9655 nf_hook_ingress_init(dev
);
9664 free_percpu(dev
->pcpu_refcnt
);
9666 netdev_freemem(dev
);
9669 EXPORT_SYMBOL(alloc_netdev_mqs
);
9672 * free_netdev - free network device
9675 * This function does the last stage of destroying an allocated device
9676 * interface. The reference to the device object is released. If this
9677 * is the last reference then it will be freed.Must be called in process
9680 void free_netdev(struct net_device
*dev
)
9682 struct napi_struct
*p
, *n
;
9685 netif_free_tx_queues(dev
);
9686 netif_free_rx_queues(dev
);
9688 kfree(rcu_dereference_protected(dev
->ingress_queue
, 1));
9690 /* Flush device addresses */
9691 dev_addr_flush(dev
);
9693 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
9696 free_percpu(dev
->pcpu_refcnt
);
9697 dev
->pcpu_refcnt
= NULL
;
9699 netdev_unregister_lockdep_key(dev
);
9701 /* Compatibility with error handling in drivers */
9702 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
9703 netdev_freemem(dev
);
9707 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
9708 dev
->reg_state
= NETREG_RELEASED
;
9710 /* will free via device release */
9711 put_device(&dev
->dev
);
9713 EXPORT_SYMBOL(free_netdev
);
9716 * synchronize_net - Synchronize with packet receive processing
9718 * Wait for packets currently being received to be done.
9719 * Does not block later packets from starting.
9721 void synchronize_net(void)
9724 if (rtnl_is_locked())
9725 synchronize_rcu_expedited();
9729 EXPORT_SYMBOL(synchronize_net
);
9732 * unregister_netdevice_queue - remove device from the kernel
9736 * This function shuts down a device interface and removes it
9737 * from the kernel tables.
9738 * If head not NULL, device is queued to be unregistered later.
9740 * Callers must hold the rtnl semaphore. You may want
9741 * unregister_netdev() instead of this.
9744 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
9749 list_move_tail(&dev
->unreg_list
, head
);
9751 rollback_registered(dev
);
9752 /* Finish processing unregister after unlock */
9756 EXPORT_SYMBOL(unregister_netdevice_queue
);
9759 * unregister_netdevice_many - unregister many devices
9760 * @head: list of devices
9762 * Note: As most callers use a stack allocated list_head,
9763 * we force a list_del() to make sure stack wont be corrupted later.
9765 void unregister_netdevice_many(struct list_head
*head
)
9767 struct net_device
*dev
;
9769 if (!list_empty(head
)) {
9770 rollback_registered_many(head
);
9771 list_for_each_entry(dev
, head
, unreg_list
)
9776 EXPORT_SYMBOL(unregister_netdevice_many
);
9779 * unregister_netdev - remove device from the kernel
9782 * This function shuts down a device interface and removes it
9783 * from the kernel tables.
9785 * This is just a wrapper for unregister_netdevice that takes
9786 * the rtnl semaphore. In general you want to use this and not
9787 * unregister_netdevice.
9789 void unregister_netdev(struct net_device
*dev
)
9792 unregister_netdevice(dev
);
9795 EXPORT_SYMBOL(unregister_netdev
);
9798 * dev_change_net_namespace - move device to different nethost namespace
9800 * @net: network namespace
9801 * @pat: If not NULL name pattern to try if the current device name
9802 * is already taken in the destination network namespace.
9804 * This function shuts down a device interface and moves it
9805 * to a new network namespace. On success 0 is returned, on
9806 * a failure a netagive errno code is returned.
9808 * Callers must hold the rtnl semaphore.
9811 int dev_change_net_namespace(struct net_device
*dev
, struct net
*net
, const char *pat
)
9813 struct net
*net_old
= dev_net(dev
);
9814 int err
, new_nsid
, new_ifindex
;
9818 /* Don't allow namespace local devices to be moved. */
9820 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
9823 /* Ensure the device has been registrered */
9824 if (dev
->reg_state
!= NETREG_REGISTERED
)
9827 /* Get out if there is nothing todo */
9829 if (net_eq(net_old
, net
))
9832 /* Pick the destination device name, and ensure
9833 * we can use it in the destination network namespace.
9836 if (__dev_get_by_name(net
, dev
->name
)) {
9837 /* We get here if we can't use the current device name */
9840 err
= dev_get_valid_name(net
, dev
, pat
);
9846 * And now a mini version of register_netdevice unregister_netdevice.
9849 /* If device is running close it first. */
9852 /* And unlink it from device chain */
9853 unlist_netdevice(dev
);
9857 /* Shutdown queueing discipline. */
9860 /* Notify protocols, that we are about to destroy
9861 * this device. They should clean all the things.
9863 * Note that dev->reg_state stays at NETREG_REGISTERED.
9864 * This is wanted because this way 8021q and macvlan know
9865 * the device is just moving and can keep their slaves up.
9867 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
9870 new_nsid
= peernet2id_alloc(dev_net(dev
), net
, GFP_KERNEL
);
9871 /* If there is an ifindex conflict assign a new one */
9872 if (__dev_get_by_index(net
, dev
->ifindex
))
9873 new_ifindex
= dev_new_index(net
);
9875 new_ifindex
= dev
->ifindex
;
9877 rtmsg_ifinfo_newnet(RTM_DELLINK
, dev
, ~0U, GFP_KERNEL
, &new_nsid
,
9881 * Flush the unicast and multicast chains
9886 /* Send a netdev-removed uevent to the old namespace */
9887 kobject_uevent(&dev
->dev
.kobj
, KOBJ_REMOVE
);
9888 netdev_adjacent_del_links(dev
);
9890 /* Actually switch the network namespace */
9891 dev_net_set(dev
, net
);
9892 dev
->ifindex
= new_ifindex
;
9894 /* Send a netdev-add uevent to the new namespace */
9895 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ADD
);
9896 netdev_adjacent_add_links(dev
);
9898 /* Fixup kobjects */
9899 err
= device_rename(&dev
->dev
, dev
->name
);
9902 /* Adapt owner in case owning user namespace of target network
9903 * namespace is different from the original one.
9905 err
= netdev_change_owner(dev
, net_old
, net
);
9908 /* Add the device back in the hashes */
9909 list_netdevice(dev
);
9911 /* Notify protocols, that a new device appeared. */
9912 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
9915 * Prevent userspace races by waiting until the network
9916 * device is fully setup before sending notifications.
9918 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
9925 EXPORT_SYMBOL_GPL(dev_change_net_namespace
);
9927 static int dev_cpu_dead(unsigned int oldcpu
)
9929 struct sk_buff
**list_skb
;
9930 struct sk_buff
*skb
;
9932 struct softnet_data
*sd
, *oldsd
, *remsd
= NULL
;
9934 local_irq_disable();
9935 cpu
= smp_processor_id();
9936 sd
= &per_cpu(softnet_data
, cpu
);
9937 oldsd
= &per_cpu(softnet_data
, oldcpu
);
9939 /* Find end of our completion_queue. */
9940 list_skb
= &sd
->completion_queue
;
9942 list_skb
= &(*list_skb
)->next
;
9943 /* Append completion queue from offline CPU. */
9944 *list_skb
= oldsd
->completion_queue
;
9945 oldsd
->completion_queue
= NULL
;
9947 /* Append output queue from offline CPU. */
9948 if (oldsd
->output_queue
) {
9949 *sd
->output_queue_tailp
= oldsd
->output_queue
;
9950 sd
->output_queue_tailp
= oldsd
->output_queue_tailp
;
9951 oldsd
->output_queue
= NULL
;
9952 oldsd
->output_queue_tailp
= &oldsd
->output_queue
;
9954 /* Append NAPI poll list from offline CPU, with one exception :
9955 * process_backlog() must be called by cpu owning percpu backlog.
9956 * We properly handle process_queue & input_pkt_queue later.
9958 while (!list_empty(&oldsd
->poll_list
)) {
9959 struct napi_struct
*napi
= list_first_entry(&oldsd
->poll_list
,
9963 list_del_init(&napi
->poll_list
);
9964 if (napi
->poll
== process_backlog
)
9967 ____napi_schedule(sd
, napi
);
9970 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
9974 remsd
= oldsd
->rps_ipi_list
;
9975 oldsd
->rps_ipi_list
= NULL
;
9977 /* send out pending IPI's on offline CPU */
9978 net_rps_send_ipi(remsd
);
9980 /* Process offline CPU's input_pkt_queue */
9981 while ((skb
= __skb_dequeue(&oldsd
->process_queue
))) {
9983 input_queue_head_incr(oldsd
);
9985 while ((skb
= skb_dequeue(&oldsd
->input_pkt_queue
))) {
9987 input_queue_head_incr(oldsd
);
9994 * netdev_increment_features - increment feature set by one
9995 * @all: current feature set
9996 * @one: new feature set
9997 * @mask: mask feature set
9999 * Computes a new feature set after adding a device with feature set
10000 * @one to the master device with current feature set @all. Will not
10001 * enable anything that is off in @mask. Returns the new feature set.
10003 netdev_features_t
netdev_increment_features(netdev_features_t all
,
10004 netdev_features_t one
, netdev_features_t mask
)
10006 if (mask
& NETIF_F_HW_CSUM
)
10007 mask
|= NETIF_F_CSUM_MASK
;
10008 mask
|= NETIF_F_VLAN_CHALLENGED
;
10010 all
|= one
& (NETIF_F_ONE_FOR_ALL
| NETIF_F_CSUM_MASK
) & mask
;
10011 all
&= one
| ~NETIF_F_ALL_FOR_ALL
;
10013 /* If one device supports hw checksumming, set for all. */
10014 if (all
& NETIF_F_HW_CSUM
)
10015 all
&= ~(NETIF_F_CSUM_MASK
& ~NETIF_F_HW_CSUM
);
10019 EXPORT_SYMBOL(netdev_increment_features
);
10021 static struct hlist_head
* __net_init
netdev_create_hash(void)
10024 struct hlist_head
*hash
;
10026 hash
= kmalloc_array(NETDEV_HASHENTRIES
, sizeof(*hash
), GFP_KERNEL
);
10028 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
10029 INIT_HLIST_HEAD(&hash
[i
]);
10034 /* Initialize per network namespace state */
10035 static int __net_init
netdev_init(struct net
*net
)
10037 BUILD_BUG_ON(GRO_HASH_BUCKETS
>
10038 8 * FIELD_SIZEOF(struct napi_struct
, gro_bitmask
));
10040 if (net
!= &init_net
)
10041 INIT_LIST_HEAD(&net
->dev_base_head
);
10043 net
->dev_name_head
= netdev_create_hash();
10044 if (net
->dev_name_head
== NULL
)
10047 net
->dev_index_head
= netdev_create_hash();
10048 if (net
->dev_index_head
== NULL
)
10054 kfree(net
->dev_name_head
);
10060 * netdev_drivername - network driver for the device
10061 * @dev: network device
10063 * Determine network driver for device.
10065 const char *netdev_drivername(const struct net_device
*dev
)
10067 const struct device_driver
*driver
;
10068 const struct device
*parent
;
10069 const char *empty
= "";
10071 parent
= dev
->dev
.parent
;
10075 driver
= parent
->driver
;
10076 if (driver
&& driver
->name
)
10077 return driver
->name
;
10081 static void __netdev_printk(const char *level
, const struct net_device
*dev
,
10082 struct va_format
*vaf
)
10084 if (dev
&& dev
->dev
.parent
) {
10085 dev_printk_emit(level
[1] - '0',
10088 dev_driver_string(dev
->dev
.parent
),
10089 dev_name(dev
->dev
.parent
),
10090 netdev_name(dev
), netdev_reg_state(dev
),
10093 printk("%s%s%s: %pV",
10094 level
, netdev_name(dev
), netdev_reg_state(dev
), vaf
);
10096 printk("%s(NULL net_device): %pV", level
, vaf
);
10100 void netdev_printk(const char *level
, const struct net_device
*dev
,
10101 const char *format
, ...)
10103 struct va_format vaf
;
10106 va_start(args
, format
);
10111 __netdev_printk(level
, dev
, &vaf
);
10115 EXPORT_SYMBOL(netdev_printk
);
10117 #define define_netdev_printk_level(func, level) \
10118 void func(const struct net_device *dev, const char *fmt, ...) \
10120 struct va_format vaf; \
10123 va_start(args, fmt); \
10128 __netdev_printk(level, dev, &vaf); \
10132 EXPORT_SYMBOL(func);
10134 define_netdev_printk_level(netdev_emerg
, KERN_EMERG
);
10135 define_netdev_printk_level(netdev_alert
, KERN_ALERT
);
10136 define_netdev_printk_level(netdev_crit
, KERN_CRIT
);
10137 define_netdev_printk_level(netdev_err
, KERN_ERR
);
10138 define_netdev_printk_level(netdev_warn
, KERN_WARNING
);
10139 define_netdev_printk_level(netdev_notice
, KERN_NOTICE
);
10140 define_netdev_printk_level(netdev_info
, KERN_INFO
);
10142 static void __net_exit
netdev_exit(struct net
*net
)
10144 kfree(net
->dev_name_head
);
10145 kfree(net
->dev_index_head
);
10146 if (net
!= &init_net
)
10147 WARN_ON_ONCE(!list_empty(&net
->dev_base_head
));
10150 static struct pernet_operations __net_initdata netdev_net_ops
= {
10151 .init
= netdev_init
,
10152 .exit
= netdev_exit
,
10155 static void __net_exit
default_device_exit(struct net
*net
)
10157 struct net_device
*dev
, *aux
;
10159 * Push all migratable network devices back to the
10160 * initial network namespace
10163 for_each_netdev_safe(net
, dev
, aux
) {
10165 char fb_name
[IFNAMSIZ
];
10167 /* Ignore unmoveable devices (i.e. loopback) */
10168 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
10171 /* Leave virtual devices for the generic cleanup */
10172 if (dev
->rtnl_link_ops
&& !dev
->rtnl_link_ops
->netns_refund
)
10175 /* Push remaining network devices to init_net */
10176 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
10177 if (__dev_get_by_name(&init_net
, fb_name
))
10178 snprintf(fb_name
, IFNAMSIZ
, "dev%%d");
10179 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
10181 pr_emerg("%s: failed to move %s to init_net: %d\n",
10182 __func__
, dev
->name
, err
);
10189 static void __net_exit
rtnl_lock_unregistering(struct list_head
*net_list
)
10191 /* Return with the rtnl_lock held when there are no network
10192 * devices unregistering in any network namespace in net_list.
10195 bool unregistering
;
10196 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
10198 add_wait_queue(&netdev_unregistering_wq
, &wait
);
10200 unregistering
= false;
10202 list_for_each_entry(net
, net_list
, exit_list
) {
10203 if (net
->dev_unreg_count
> 0) {
10204 unregistering
= true;
10208 if (!unregistering
)
10212 wait_woken(&wait
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
10214 remove_wait_queue(&netdev_unregistering_wq
, &wait
);
10217 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
10219 /* At exit all network devices most be removed from a network
10220 * namespace. Do this in the reverse order of registration.
10221 * Do this across as many network namespaces as possible to
10222 * improve batching efficiency.
10224 struct net_device
*dev
;
10226 LIST_HEAD(dev_kill_list
);
10228 /* To prevent network device cleanup code from dereferencing
10229 * loopback devices or network devices that have been freed
10230 * wait here for all pending unregistrations to complete,
10231 * before unregistring the loopback device and allowing the
10232 * network namespace be freed.
10234 * The netdev todo list containing all network devices
10235 * unregistrations that happen in default_device_exit_batch
10236 * will run in the rtnl_unlock() at the end of
10237 * default_device_exit_batch.
10239 rtnl_lock_unregistering(net_list
);
10240 list_for_each_entry(net
, net_list
, exit_list
) {
10241 for_each_netdev_reverse(net
, dev
) {
10242 if (dev
->rtnl_link_ops
&& dev
->rtnl_link_ops
->dellink
)
10243 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
10245 unregister_netdevice_queue(dev
, &dev_kill_list
);
10248 unregister_netdevice_many(&dev_kill_list
);
10252 static struct pernet_operations __net_initdata default_device_ops
= {
10253 .exit
= default_device_exit
,
10254 .exit_batch
= default_device_exit_batch
,
10258 * Initialize the DEV module. At boot time this walks the device list and
10259 * unhooks any devices that fail to initialise (normally hardware not
10260 * present) and leaves us with a valid list of present and active devices.
10265 * This is called single threaded during boot, so no need
10266 * to take the rtnl semaphore.
10268 static int __init
net_dev_init(void)
10270 int i
, rc
= -ENOMEM
;
10272 BUG_ON(!dev_boot_phase
);
10274 if (dev_proc_init())
10277 if (netdev_kobject_init())
10280 INIT_LIST_HEAD(&ptype_all
);
10281 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
10282 INIT_LIST_HEAD(&ptype_base
[i
]);
10284 INIT_LIST_HEAD(&offload_base
);
10286 if (register_pernet_subsys(&netdev_net_ops
))
10290 * Initialise the packet receive queues.
10293 for_each_possible_cpu(i
) {
10294 struct work_struct
*flush
= per_cpu_ptr(&flush_works
, i
);
10295 struct softnet_data
*sd
= &per_cpu(softnet_data
, i
);
10297 INIT_WORK(flush
, flush_backlog
);
10299 skb_queue_head_init(&sd
->input_pkt_queue
);
10300 skb_queue_head_init(&sd
->process_queue
);
10301 #ifdef CONFIG_XFRM_OFFLOAD
10302 skb_queue_head_init(&sd
->xfrm_backlog
);
10304 INIT_LIST_HEAD(&sd
->poll_list
);
10305 sd
->output_queue_tailp
= &sd
->output_queue
;
10307 sd
->csd
.func
= rps_trigger_softirq
;
10312 init_gro_hash(&sd
->backlog
);
10313 sd
->backlog
.poll
= process_backlog
;
10314 sd
->backlog
.weight
= weight_p
;
10317 dev_boot_phase
= 0;
10319 /* The loopback device is special if any other network devices
10320 * is present in a network namespace the loopback device must
10321 * be present. Since we now dynamically allocate and free the
10322 * loopback device ensure this invariant is maintained by
10323 * keeping the loopback device as the first device on the
10324 * list of network devices. Ensuring the loopback devices
10325 * is the first device that appears and the last network device
10328 if (register_pernet_device(&loopback_net_ops
))
10331 if (register_pernet_device(&default_device_ops
))
10334 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
10335 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
10337 rc
= cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD
, "net/dev:dead",
10338 NULL
, dev_cpu_dead
);
10345 subsys_initcall(net_dev_init
);