1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET3 Protocol independent device support routines.
5 * Derived from the non IP parts of dev.c 1.0.19
7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Florian la Roche <rzsfl@rz.uni-sb.de>
12 * Alan Cox <gw4pts@gw4pts.ampr.org>
13 * David Hinds <dahinds@users.sourceforge.net>
14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15 * Adam Sulmicki <adam@cfar.umd.edu>
16 * Pekka Riikonen <priikone@poesidon.pspt.fi>
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
20 * to 2 if register_netdev gets called
21 * before net_dev_init & also removed a
22 * few lines of code in the process.
23 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant
25 * stunts to keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into
31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
32 * Alan Cox : 100 backlog just doesn't cut it when
33 * you start doing multicast video 8)
34 * Alan Cox : Rewrote net_bh and list manager.
35 * Alan Cox : Fix ETH_P_ALL echoback lengths.
36 * Alan Cox : Took out transmit every packet pass
37 * Saved a few bytes in the ioctl handler
38 * Alan Cox : Network driver sets packet type before
39 * calling netif_rx. Saves a function
41 * Alan Cox : Hashed net_bh()
42 * Richard Kooijman: Timestamp fixes.
43 * Alan Cox : Wrong field in SIOCGIFDSTADDR
44 * Alan Cox : Device lock protection.
45 * Alan Cox : Fixed nasty side effect of device close
47 * Rudi Cilibrasi : Pass the right thing to
49 * Dave Miller : 32bit quantity for the device lock to
50 * make it work out on a Sparc.
51 * Bjorn Ekwall : Added KERNELD hack.
52 * Alan Cox : Cleaned up the backlog initialise.
53 * Craig Metz : SIOCGIFCONF fix if space for under
55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
56 * is no device open function.
57 * Andi Kleen : Fix error reporting for SIOCGIFCONF
58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
59 * Cyrus Durgin : Cleaned for KMOD
60 * Adam Sulmicki : Bug Fix : Network Device Unload
61 * A network device unload needs to purge
63 * Paul Rusty Russell : SIOCSIFNAME
64 * Pekka Riikonen : Netdev boot-time settings code
65 * Andrew Morton : Make unregister_netdevice wait
66 * indefinitely on dev->refcnt
67 * J Hadi Salim : - Backlog queue sampling
68 * - netif_rx() feedback
71 #include <linux/uaccess.h>
72 #include <linux/bitops.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/mm.h>
81 #include <linux/mutex.h>
82 #include <linux/rwsem.h>
83 #include <linux/string.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/ethtool.h>
93 #include <linux/skbuff.h>
94 #include <linux/kthread.h>
95 #include <linux/bpf.h>
96 #include <linux/bpf_trace.h>
97 #include <net/net_namespace.h>
99 #include <net/busy_poll.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/stat.h>
104 #include <net/dst_metadata.h>
106 #include <net/pkt_sched.h>
107 #include <net/pkt_cls.h>
108 #include <net/checksum.h>
109 #include <net/xfrm.h>
110 #include <linux/highmem.h>
111 #include <linux/init.h>
112 #include <linux/module.h>
113 #include <linux/netpoll.h>
114 #include <linux/rcupdate.h>
115 #include <linux/delay.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
126 #include <net/mpls.h>
127 #include <linux/ipv6.h>
128 #include <linux/in.h>
129 #include <linux/jhash.h>
130 #include <linux/random.h>
131 #include <trace/events/napi.h>
132 #include <trace/events/net.h>
133 #include <trace/events/skb.h>
134 #include <trace/events/qdisc.h>
135 #include <linux/inetdevice.h>
136 #include <linux/cpu_rmap.h>
137 #include <linux/static_key.h>
138 #include <linux/hashtable.h>
139 #include <linux/vmalloc.h>
140 #include <linux/if_macvlan.h>
141 #include <linux/errqueue.h>
142 #include <linux/hrtimer.h>
143 #include <linux/netfilter_ingress.h>
144 #include <linux/crash_dump.h>
145 #include <linux/sctp.h>
146 #include <net/udp_tunnel.h>
147 #include <linux/net_namespace.h>
148 #include <linux/indirect_call_wrapper.h>
149 #include <net/devlink.h>
150 #include <linux/pm_runtime.h>
151 #include <linux/prandom.h>
152 #include <linux/once_lite.h>
154 #include "net-sysfs.h"
156 #define MAX_GRO_SKBS 8
158 /* This should be increased if a protocol with a bigger head is added. */
159 #define GRO_MAX_HEAD (MAX_HEADER + 128)
161 static DEFINE_SPINLOCK(ptype_lock
);
162 static DEFINE_SPINLOCK(offload_lock
);
163 struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
164 struct list_head ptype_all __read_mostly
; /* Taps */
165 static struct list_head offload_base __read_mostly
;
167 static int netif_rx_internal(struct sk_buff
*skb
);
168 static int call_netdevice_notifiers_info(unsigned long val
,
169 struct netdev_notifier_info
*info
);
170 static int call_netdevice_notifiers_extack(unsigned long val
,
171 struct net_device
*dev
,
172 struct netlink_ext_ack
*extack
);
173 static struct napi_struct
*napi_by_id(unsigned int napi_id
);
176 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
179 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
181 * Writers must hold the rtnl semaphore while they loop through the
182 * dev_base_head list, and hold dev_base_lock for writing when they do the
183 * actual updates. This allows pure readers to access the list even
184 * while a writer is preparing to update it.
186 * To put it another way, dev_base_lock is held for writing only to
187 * protect against pure readers; the rtnl semaphore provides the
188 * protection against other writers.
190 * See, for example usages, register_netdevice() and
191 * unregister_netdevice(), which must be called with the rtnl
194 DEFINE_RWLOCK(dev_base_lock
);
195 EXPORT_SYMBOL(dev_base_lock
);
197 static DEFINE_MUTEX(ifalias_mutex
);
199 /* protects napi_hash addition/deletion and napi_gen_id */
200 static DEFINE_SPINLOCK(napi_hash_lock
);
202 static unsigned int napi_gen_id
= NR_CPUS
;
203 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash
, 8);
205 static DECLARE_RWSEM(devnet_rename_sem
);
207 static inline void dev_base_seq_inc(struct net
*net
)
209 while (++net
->dev_base_seq
== 0)
213 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
215 unsigned int hash
= full_name_hash(net
, name
, strnlen(name
, IFNAMSIZ
));
217 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
220 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
222 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
225 static inline void rps_lock(struct softnet_data
*sd
)
228 spin_lock(&sd
->input_pkt_queue
.lock
);
232 static inline void rps_unlock(struct softnet_data
*sd
)
235 spin_unlock(&sd
->input_pkt_queue
.lock
);
239 static struct netdev_name_node
*netdev_name_node_alloc(struct net_device
*dev
,
242 struct netdev_name_node
*name_node
;
244 name_node
= kmalloc(sizeof(*name_node
), GFP_KERNEL
);
247 INIT_HLIST_NODE(&name_node
->hlist
);
248 name_node
->dev
= dev
;
249 name_node
->name
= name
;
253 static struct netdev_name_node
*
254 netdev_name_node_head_alloc(struct net_device
*dev
)
256 struct netdev_name_node
*name_node
;
258 name_node
= netdev_name_node_alloc(dev
, dev
->name
);
261 INIT_LIST_HEAD(&name_node
->list
);
265 static void netdev_name_node_free(struct netdev_name_node
*name_node
)
270 static void netdev_name_node_add(struct net
*net
,
271 struct netdev_name_node
*name_node
)
273 hlist_add_head_rcu(&name_node
->hlist
,
274 dev_name_hash(net
, name_node
->name
));
277 static void netdev_name_node_del(struct netdev_name_node
*name_node
)
279 hlist_del_rcu(&name_node
->hlist
);
282 static struct netdev_name_node
*netdev_name_node_lookup(struct net
*net
,
285 struct hlist_head
*head
= dev_name_hash(net
, name
);
286 struct netdev_name_node
*name_node
;
288 hlist_for_each_entry(name_node
, head
, hlist
)
289 if (!strcmp(name_node
->name
, name
))
294 static struct netdev_name_node
*netdev_name_node_lookup_rcu(struct net
*net
,
297 struct hlist_head
*head
= dev_name_hash(net
, name
);
298 struct netdev_name_node
*name_node
;
300 hlist_for_each_entry_rcu(name_node
, head
, hlist
)
301 if (!strcmp(name_node
->name
, name
))
306 int netdev_name_node_alt_create(struct net_device
*dev
, const char *name
)
308 struct netdev_name_node
*name_node
;
309 struct net
*net
= dev_net(dev
);
311 name_node
= netdev_name_node_lookup(net
, name
);
314 name_node
= netdev_name_node_alloc(dev
, name
);
317 netdev_name_node_add(net
, name_node
);
318 /* The node that holds dev->name acts as a head of per-device list. */
319 list_add_tail(&name_node
->list
, &dev
->name_node
->list
);
323 EXPORT_SYMBOL(netdev_name_node_alt_create
);
325 static void __netdev_name_node_alt_destroy(struct netdev_name_node
*name_node
)
327 list_del(&name_node
->list
);
328 netdev_name_node_del(name_node
);
329 kfree(name_node
->name
);
330 netdev_name_node_free(name_node
);
333 int netdev_name_node_alt_destroy(struct net_device
*dev
, const char *name
)
335 struct netdev_name_node
*name_node
;
336 struct net
*net
= dev_net(dev
);
338 name_node
= netdev_name_node_lookup(net
, name
);
341 /* lookup might have found our primary name or a name belonging
344 if (name_node
== dev
->name_node
|| name_node
->dev
!= dev
)
347 __netdev_name_node_alt_destroy(name_node
);
351 EXPORT_SYMBOL(netdev_name_node_alt_destroy
);
353 static void netdev_name_node_alt_flush(struct net_device
*dev
)
355 struct netdev_name_node
*name_node
, *tmp
;
357 list_for_each_entry_safe(name_node
, tmp
, &dev
->name_node
->list
, list
)
358 __netdev_name_node_alt_destroy(name_node
);
361 /* Device list insertion */
362 static void list_netdevice(struct net_device
*dev
)
364 struct net
*net
= dev_net(dev
);
368 write_lock_bh(&dev_base_lock
);
369 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
370 netdev_name_node_add(net
, dev
->name_node
);
371 hlist_add_head_rcu(&dev
->index_hlist
,
372 dev_index_hash(net
, dev
->ifindex
));
373 write_unlock_bh(&dev_base_lock
);
375 dev_base_seq_inc(net
);
378 /* Device list removal
379 * caller must respect a RCU grace period before freeing/reusing dev
381 static void unlist_netdevice(struct net_device
*dev
)
385 /* Unlink dev from the device chain */
386 write_lock_bh(&dev_base_lock
);
387 list_del_rcu(&dev
->dev_list
);
388 netdev_name_node_del(dev
->name_node
);
389 hlist_del_rcu(&dev
->index_hlist
);
390 write_unlock_bh(&dev_base_lock
);
392 dev_base_seq_inc(dev_net(dev
));
399 static RAW_NOTIFIER_HEAD(netdev_chain
);
402 * Device drivers call our routines to queue packets here. We empty the
403 * queue in the local softnet handler.
406 DEFINE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
407 EXPORT_PER_CPU_SYMBOL(softnet_data
);
409 #ifdef CONFIG_LOCKDEP
411 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
412 * according to dev->type
414 static const unsigned short netdev_lock_type
[] = {
415 ARPHRD_NETROM
, ARPHRD_ETHER
, ARPHRD_EETHER
, ARPHRD_AX25
,
416 ARPHRD_PRONET
, ARPHRD_CHAOS
, ARPHRD_IEEE802
, ARPHRD_ARCNET
,
417 ARPHRD_APPLETLK
, ARPHRD_DLCI
, ARPHRD_ATM
, ARPHRD_METRICOM
,
418 ARPHRD_IEEE1394
, ARPHRD_EUI64
, ARPHRD_INFINIBAND
, ARPHRD_SLIP
,
419 ARPHRD_CSLIP
, ARPHRD_SLIP6
, ARPHRD_CSLIP6
, ARPHRD_RSRVD
,
420 ARPHRD_ADAPT
, ARPHRD_ROSE
, ARPHRD_X25
, ARPHRD_HWX25
,
421 ARPHRD_PPP
, ARPHRD_CISCO
, ARPHRD_LAPB
, ARPHRD_DDCMP
,
422 ARPHRD_RAWHDLC
, ARPHRD_TUNNEL
, ARPHRD_TUNNEL6
, ARPHRD_FRAD
,
423 ARPHRD_SKIP
, ARPHRD_LOOPBACK
, ARPHRD_LOCALTLK
, ARPHRD_FDDI
,
424 ARPHRD_BIF
, ARPHRD_SIT
, ARPHRD_IPDDP
, ARPHRD_IPGRE
,
425 ARPHRD_PIMREG
, ARPHRD_HIPPI
, ARPHRD_ASH
, ARPHRD_ECONET
,
426 ARPHRD_IRDA
, ARPHRD_FCPP
, ARPHRD_FCAL
, ARPHRD_FCPL
,
427 ARPHRD_FCFABRIC
, ARPHRD_IEEE80211
, ARPHRD_IEEE80211_PRISM
,
428 ARPHRD_IEEE80211_RADIOTAP
, ARPHRD_PHONET
, ARPHRD_PHONET_PIPE
,
429 ARPHRD_IEEE802154
, ARPHRD_VOID
, ARPHRD_NONE
};
431 static const char *const netdev_lock_name
[] = {
432 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
433 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
434 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
435 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
436 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
437 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
438 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
439 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
440 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
441 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
442 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
443 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
444 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
445 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
446 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
448 static struct lock_class_key netdev_xmit_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
449 static struct lock_class_key netdev_addr_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
451 static inline unsigned short netdev_lock_pos(unsigned short dev_type
)
455 for (i
= 0; i
< ARRAY_SIZE(netdev_lock_type
); i
++)
456 if (netdev_lock_type
[i
] == dev_type
)
458 /* the last key is used by default */
459 return ARRAY_SIZE(netdev_lock_type
) - 1;
462 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
463 unsigned short dev_type
)
467 i
= netdev_lock_pos(dev_type
);
468 lockdep_set_class_and_name(lock
, &netdev_xmit_lock_key
[i
],
469 netdev_lock_name
[i
]);
472 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
476 i
= netdev_lock_pos(dev
->type
);
477 lockdep_set_class_and_name(&dev
->addr_list_lock
,
478 &netdev_addr_lock_key
[i
],
479 netdev_lock_name
[i
]);
482 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
483 unsigned short dev_type
)
487 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
492 /*******************************************************************************
494 * Protocol management and registration routines
496 *******************************************************************************/
500 * Add a protocol ID to the list. Now that the input handler is
501 * smarter we can dispense with all the messy stuff that used to be
504 * BEWARE!!! Protocol handlers, mangling input packets,
505 * MUST BE last in hash buckets and checking protocol handlers
506 * MUST start from promiscuous ptype_all chain in net_bh.
507 * It is true now, do not change it.
508 * Explanation follows: if protocol handler, mangling packet, will
509 * be the first on list, it is not able to sense, that packet
510 * is cloned and should be copied-on-write, so that it will
511 * change it and subsequent readers will get broken packet.
515 static inline struct list_head
*ptype_head(const struct packet_type
*pt
)
517 if (pt
->type
== htons(ETH_P_ALL
))
518 return pt
->dev
? &pt
->dev
->ptype_all
: &ptype_all
;
520 return pt
->dev
? &pt
->dev
->ptype_specific
:
521 &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
525 * dev_add_pack - add packet handler
526 * @pt: packet type declaration
528 * Add a protocol handler to the networking stack. The passed &packet_type
529 * is linked into kernel lists and may not be freed until it has been
530 * removed from the kernel lists.
532 * This call does not sleep therefore it can not
533 * guarantee all CPU's that are in middle of receiving packets
534 * will see the new packet type (until the next received packet).
537 void dev_add_pack(struct packet_type
*pt
)
539 struct list_head
*head
= ptype_head(pt
);
541 spin_lock(&ptype_lock
);
542 list_add_rcu(&pt
->list
, head
);
543 spin_unlock(&ptype_lock
);
545 EXPORT_SYMBOL(dev_add_pack
);
548 * __dev_remove_pack - remove packet handler
549 * @pt: packet type declaration
551 * Remove a protocol handler that was previously added to the kernel
552 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
553 * from the kernel lists and can be freed or reused once this function
556 * The packet type might still be in use by receivers
557 * and must not be freed until after all the CPU's have gone
558 * through a quiescent state.
560 void __dev_remove_pack(struct packet_type
*pt
)
562 struct list_head
*head
= ptype_head(pt
);
563 struct packet_type
*pt1
;
565 spin_lock(&ptype_lock
);
567 list_for_each_entry(pt1
, head
, list
) {
569 list_del_rcu(&pt
->list
);
574 pr_warn("dev_remove_pack: %p not found\n", pt
);
576 spin_unlock(&ptype_lock
);
578 EXPORT_SYMBOL(__dev_remove_pack
);
581 * dev_remove_pack - remove packet handler
582 * @pt: packet type declaration
584 * Remove a protocol handler that was previously added to the kernel
585 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
586 * from the kernel lists and can be freed or reused once this function
589 * This call sleeps to guarantee that no CPU is looking at the packet
592 void dev_remove_pack(struct packet_type
*pt
)
594 __dev_remove_pack(pt
);
598 EXPORT_SYMBOL(dev_remove_pack
);
602 * dev_add_offload - register offload handlers
603 * @po: protocol offload declaration
605 * Add protocol offload handlers to the networking stack. The passed
606 * &proto_offload is linked into kernel lists and may not be freed until
607 * it has been removed from the kernel lists.
609 * This call does not sleep therefore it can not
610 * guarantee all CPU's that are in middle of receiving packets
611 * will see the new offload handlers (until the next received packet).
613 void dev_add_offload(struct packet_offload
*po
)
615 struct packet_offload
*elem
;
617 spin_lock(&offload_lock
);
618 list_for_each_entry(elem
, &offload_base
, list
) {
619 if (po
->priority
< elem
->priority
)
622 list_add_rcu(&po
->list
, elem
->list
.prev
);
623 spin_unlock(&offload_lock
);
625 EXPORT_SYMBOL(dev_add_offload
);
628 * __dev_remove_offload - remove offload handler
629 * @po: packet offload declaration
631 * Remove a protocol offload handler that was previously added to the
632 * kernel offload handlers by dev_add_offload(). The passed &offload_type
633 * is removed from the kernel lists and can be freed or reused once this
636 * The packet type might still be in use by receivers
637 * and must not be freed until after all the CPU's have gone
638 * through a quiescent state.
640 static void __dev_remove_offload(struct packet_offload
*po
)
642 struct list_head
*head
= &offload_base
;
643 struct packet_offload
*po1
;
645 spin_lock(&offload_lock
);
647 list_for_each_entry(po1
, head
, list
) {
649 list_del_rcu(&po
->list
);
654 pr_warn("dev_remove_offload: %p not found\n", po
);
656 spin_unlock(&offload_lock
);
660 * dev_remove_offload - remove packet offload handler
661 * @po: packet offload declaration
663 * Remove a packet offload handler that was previously added to the kernel
664 * offload handlers by dev_add_offload(). The passed &offload_type is
665 * removed from the kernel lists and can be freed or reused once this
668 * This call sleeps to guarantee that no CPU is looking at the packet
671 void dev_remove_offload(struct packet_offload
*po
)
673 __dev_remove_offload(po
);
677 EXPORT_SYMBOL(dev_remove_offload
);
679 /*******************************************************************************
681 * Device Interface Subroutines
683 *******************************************************************************/
686 * dev_get_iflink - get 'iflink' value of a interface
687 * @dev: targeted interface
689 * Indicates the ifindex the interface is linked to.
690 * Physical interfaces have the same 'ifindex' and 'iflink' values.
693 int dev_get_iflink(const struct net_device
*dev
)
695 if (dev
->netdev_ops
&& dev
->netdev_ops
->ndo_get_iflink
)
696 return dev
->netdev_ops
->ndo_get_iflink(dev
);
700 EXPORT_SYMBOL(dev_get_iflink
);
703 * dev_fill_metadata_dst - Retrieve tunnel egress information.
704 * @dev: targeted interface
707 * For better visibility of tunnel traffic OVS needs to retrieve
708 * egress tunnel information for a packet. Following API allows
709 * user to get this info.
711 int dev_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
713 struct ip_tunnel_info
*info
;
715 if (!dev
->netdev_ops
|| !dev
->netdev_ops
->ndo_fill_metadata_dst
)
718 info
= skb_tunnel_info_unclone(skb
);
721 if (unlikely(!(info
->mode
& IP_TUNNEL_INFO_TX
)))
724 return dev
->netdev_ops
->ndo_fill_metadata_dst(dev
, skb
);
726 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst
);
728 static struct net_device_path
*dev_fwd_path(struct net_device_path_stack
*stack
)
730 int k
= stack
->num_paths
++;
732 if (WARN_ON_ONCE(k
>= NET_DEVICE_PATH_STACK_MAX
))
735 return &stack
->path
[k
];
738 int dev_fill_forward_path(const struct net_device
*dev
, const u8
*daddr
,
739 struct net_device_path_stack
*stack
)
741 const struct net_device
*last_dev
;
742 struct net_device_path_ctx ctx
= {
746 struct net_device_path
*path
;
749 stack
->num_paths
= 0;
750 while (ctx
.dev
&& ctx
.dev
->netdev_ops
->ndo_fill_forward_path
) {
752 path
= dev_fwd_path(stack
);
756 memset(path
, 0, sizeof(struct net_device_path
));
757 ret
= ctx
.dev
->netdev_ops
->ndo_fill_forward_path(&ctx
, path
);
761 if (WARN_ON_ONCE(last_dev
== ctx
.dev
))
764 path
= dev_fwd_path(stack
);
767 path
->type
= DEV_PATH_ETHERNET
;
772 EXPORT_SYMBOL_GPL(dev_fill_forward_path
);
775 * __dev_get_by_name - find a device by its name
776 * @net: the applicable net namespace
777 * @name: name to find
779 * Find an interface by name. Must be called under RTNL semaphore
780 * or @dev_base_lock. If the name is found a pointer to the device
781 * is returned. If the name is not found then %NULL is returned. The
782 * reference counters are not incremented so the caller must be
783 * careful with locks.
786 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
788 struct netdev_name_node
*node_name
;
790 node_name
= netdev_name_node_lookup(net
, name
);
791 return node_name
? node_name
->dev
: NULL
;
793 EXPORT_SYMBOL(__dev_get_by_name
);
796 * dev_get_by_name_rcu - find a device by its name
797 * @net: the applicable net namespace
798 * @name: name to find
800 * Find an interface by name.
801 * If the name is found a pointer to the device is returned.
802 * If the name is not found then %NULL is returned.
803 * The reference counters are not incremented so the caller must be
804 * careful with locks. The caller must hold RCU lock.
807 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
809 struct netdev_name_node
*node_name
;
811 node_name
= netdev_name_node_lookup_rcu(net
, name
);
812 return node_name
? node_name
->dev
: NULL
;
814 EXPORT_SYMBOL(dev_get_by_name_rcu
);
817 * dev_get_by_name - find a device by its name
818 * @net: the applicable net namespace
819 * @name: name to find
821 * Find an interface by name. This can be called from any
822 * context and does its own locking. The returned handle has
823 * the usage count incremented and the caller must use dev_put() to
824 * release it when it is no longer needed. %NULL is returned if no
825 * matching device is found.
828 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
830 struct net_device
*dev
;
833 dev
= dev_get_by_name_rcu(net
, name
);
838 EXPORT_SYMBOL(dev_get_by_name
);
841 * __dev_get_by_index - find a device by its ifindex
842 * @net: the applicable net namespace
843 * @ifindex: index of device
845 * Search for an interface by index. Returns %NULL if the device
846 * is not found or a pointer to the device. The device has not
847 * had its reference counter increased so the caller must be careful
848 * about locking. The caller must hold either the RTNL semaphore
852 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
854 struct net_device
*dev
;
855 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
857 hlist_for_each_entry(dev
, head
, index_hlist
)
858 if (dev
->ifindex
== ifindex
)
863 EXPORT_SYMBOL(__dev_get_by_index
);
866 * dev_get_by_index_rcu - find a device by its ifindex
867 * @net: the applicable net namespace
868 * @ifindex: index of device
870 * Search for an interface by index. Returns %NULL if the device
871 * is not found or a pointer to the device. The device has not
872 * had its reference counter increased so the caller must be careful
873 * about locking. The caller must hold RCU lock.
876 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
878 struct net_device
*dev
;
879 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
881 hlist_for_each_entry_rcu(dev
, head
, index_hlist
)
882 if (dev
->ifindex
== ifindex
)
887 EXPORT_SYMBOL(dev_get_by_index_rcu
);
891 * dev_get_by_index - find a device by its ifindex
892 * @net: the applicable net namespace
893 * @ifindex: index of device
895 * Search for an interface by index. Returns NULL if the device
896 * is not found or a pointer to the device. The device returned has
897 * had a reference added and the pointer is safe until the user calls
898 * dev_put to indicate they have finished with it.
901 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
903 struct net_device
*dev
;
906 dev
= dev_get_by_index_rcu(net
, ifindex
);
911 EXPORT_SYMBOL(dev_get_by_index
);
914 * dev_get_by_napi_id - find a device by napi_id
915 * @napi_id: ID of the NAPI struct
917 * Search for an interface by NAPI ID. Returns %NULL if the device
918 * is not found or a pointer to the device. The device has not had
919 * its reference counter increased so the caller must be careful
920 * about locking. The caller must hold RCU lock.
923 struct net_device
*dev_get_by_napi_id(unsigned int napi_id
)
925 struct napi_struct
*napi
;
927 WARN_ON_ONCE(!rcu_read_lock_held());
929 if (napi_id
< MIN_NAPI_ID
)
932 napi
= napi_by_id(napi_id
);
934 return napi
? napi
->dev
: NULL
;
936 EXPORT_SYMBOL(dev_get_by_napi_id
);
939 * netdev_get_name - get a netdevice name, knowing its ifindex.
940 * @net: network namespace
941 * @name: a pointer to the buffer where the name will be stored.
942 * @ifindex: the ifindex of the interface to get the name from.
944 int netdev_get_name(struct net
*net
, char *name
, int ifindex
)
946 struct net_device
*dev
;
949 down_read(&devnet_rename_sem
);
952 dev
= dev_get_by_index_rcu(net
, ifindex
);
958 strcpy(name
, dev
->name
);
963 up_read(&devnet_rename_sem
);
968 * dev_getbyhwaddr_rcu - find a device by its hardware address
969 * @net: the applicable net namespace
970 * @type: media type of device
971 * @ha: hardware address
973 * Search for an interface by MAC address. Returns NULL if the device
974 * is not found or a pointer to the device.
975 * The caller must hold RCU or RTNL.
976 * The returned device has not had its ref count increased
977 * and the caller must therefore be careful about locking
981 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
984 struct net_device
*dev
;
986 for_each_netdev_rcu(net
, dev
)
987 if (dev
->type
== type
&&
988 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
993 EXPORT_SYMBOL(dev_getbyhwaddr_rcu
);
995 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
997 struct net_device
*dev
, *ret
= NULL
;
1000 for_each_netdev_rcu(net
, dev
)
1001 if (dev
->type
== type
) {
1009 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
1012 * __dev_get_by_flags - find any device with given flags
1013 * @net: the applicable net namespace
1014 * @if_flags: IFF_* values
1015 * @mask: bitmask of bits in if_flags to check
1017 * Search for any interface with the given flags. Returns NULL if a device
1018 * is not found or a pointer to the device. Must be called inside
1019 * rtnl_lock(), and result refcount is unchanged.
1022 struct net_device
*__dev_get_by_flags(struct net
*net
, unsigned short if_flags
,
1023 unsigned short mask
)
1025 struct net_device
*dev
, *ret
;
1030 for_each_netdev(net
, dev
) {
1031 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
1038 EXPORT_SYMBOL(__dev_get_by_flags
);
1041 * dev_valid_name - check if name is okay for network device
1042 * @name: name string
1044 * Network device names need to be valid file names to
1045 * allow sysfs to work. We also disallow any kind of
1048 bool dev_valid_name(const char *name
)
1052 if (strnlen(name
, IFNAMSIZ
) == IFNAMSIZ
)
1054 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
1058 if (*name
== '/' || *name
== ':' || isspace(*name
))
1064 EXPORT_SYMBOL(dev_valid_name
);
1067 * __dev_alloc_name - allocate a name for a device
1068 * @net: network namespace to allocate the device name in
1069 * @name: name format string
1070 * @buf: scratch buffer and result name string
1072 * Passed a format string - eg "lt%d" it will try and find a suitable
1073 * id. It scans list of devices to build up a free map, then chooses
1074 * the first empty slot. The caller must hold the dev_base or rtnl lock
1075 * while allocating the name and adding the device in order to avoid
1077 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1078 * Returns the number of the unit assigned or a negative errno code.
1081 static int __dev_alloc_name(struct net
*net
, const char *name
, char *buf
)
1085 const int max_netdevices
= 8*PAGE_SIZE
;
1086 unsigned long *inuse
;
1087 struct net_device
*d
;
1089 if (!dev_valid_name(name
))
1092 p
= strchr(name
, '%');
1095 * Verify the string as this thing may have come from
1096 * the user. There must be either one "%d" and no other "%"
1099 if (p
[1] != 'd' || strchr(p
+ 2, '%'))
1102 /* Use one page as a bit array of possible slots */
1103 inuse
= (unsigned long *) get_zeroed_page(GFP_ATOMIC
);
1107 for_each_netdev(net
, d
) {
1108 struct netdev_name_node
*name_node
;
1109 list_for_each_entry(name_node
, &d
->name_node
->list
, list
) {
1110 if (!sscanf(name_node
->name
, name
, &i
))
1112 if (i
< 0 || i
>= max_netdevices
)
1115 /* avoid cases where sscanf is not exact inverse of printf */
1116 snprintf(buf
, IFNAMSIZ
, name
, i
);
1117 if (!strncmp(buf
, name_node
->name
, IFNAMSIZ
))
1120 if (!sscanf(d
->name
, name
, &i
))
1122 if (i
< 0 || i
>= max_netdevices
)
1125 /* avoid cases where sscanf is not exact inverse of printf */
1126 snprintf(buf
, IFNAMSIZ
, name
, i
);
1127 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
1131 i
= find_first_zero_bit(inuse
, max_netdevices
);
1132 free_page((unsigned long) inuse
);
1135 snprintf(buf
, IFNAMSIZ
, name
, i
);
1136 if (!__dev_get_by_name(net
, buf
))
1139 /* It is possible to run out of possible slots
1140 * when the name is long and there isn't enough space left
1141 * for the digits, or if all bits are used.
1146 static int dev_alloc_name_ns(struct net
*net
,
1147 struct net_device
*dev
,
1154 ret
= __dev_alloc_name(net
, name
, buf
);
1156 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1161 * dev_alloc_name - allocate a name for a device
1163 * @name: name format string
1165 * Passed a format string - eg "lt%d" it will try and find a suitable
1166 * id. It scans list of devices to build up a free map, then chooses
1167 * the first empty slot. The caller must hold the dev_base or rtnl lock
1168 * while allocating the name and adding the device in order to avoid
1170 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1171 * Returns the number of the unit assigned or a negative errno code.
1174 int dev_alloc_name(struct net_device
*dev
, const char *name
)
1176 return dev_alloc_name_ns(dev_net(dev
), dev
, name
);
1178 EXPORT_SYMBOL(dev_alloc_name
);
1180 static int dev_get_valid_name(struct net
*net
, struct net_device
*dev
,
1185 if (!dev_valid_name(name
))
1188 if (strchr(name
, '%'))
1189 return dev_alloc_name_ns(net
, dev
, name
);
1190 else if (__dev_get_by_name(net
, name
))
1192 else if (dev
->name
!= name
)
1193 strlcpy(dev
->name
, name
, IFNAMSIZ
);
1199 * dev_change_name - change name of a device
1201 * @newname: name (or format string) must be at least IFNAMSIZ
1203 * Change name of a device, can pass format strings "eth%d".
1206 int dev_change_name(struct net_device
*dev
, const char *newname
)
1208 unsigned char old_assign_type
;
1209 char oldname
[IFNAMSIZ
];
1215 BUG_ON(!dev_net(dev
));
1219 /* Some auto-enslaved devices e.g. failover slaves are
1220 * special, as userspace might rename the device after
1221 * the interface had been brought up and running since
1222 * the point kernel initiated auto-enslavement. Allow
1223 * live name change even when these slave devices are
1226 * Typically, users of these auto-enslaving devices
1227 * don't actually care about slave name change, as
1228 * they are supposed to operate on master interface
1231 if (dev
->flags
& IFF_UP
&&
1232 likely(!(dev
->priv_flags
& IFF_LIVE_RENAME_OK
)))
1235 down_write(&devnet_rename_sem
);
1237 if (strncmp(newname
, dev
->name
, IFNAMSIZ
) == 0) {
1238 up_write(&devnet_rename_sem
);
1242 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
1244 err
= dev_get_valid_name(net
, dev
, newname
);
1246 up_write(&devnet_rename_sem
);
1250 if (oldname
[0] && !strchr(oldname
, '%'))
1251 netdev_info(dev
, "renamed from %s\n", oldname
);
1253 old_assign_type
= dev
->name_assign_type
;
1254 dev
->name_assign_type
= NET_NAME_RENAMED
;
1257 ret
= device_rename(&dev
->dev
, dev
->name
);
1259 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1260 dev
->name_assign_type
= old_assign_type
;
1261 up_write(&devnet_rename_sem
);
1265 up_write(&devnet_rename_sem
);
1267 netdev_adjacent_rename_links(dev
, oldname
);
1269 write_lock_bh(&dev_base_lock
);
1270 netdev_name_node_del(dev
->name_node
);
1271 write_unlock_bh(&dev_base_lock
);
1275 write_lock_bh(&dev_base_lock
);
1276 netdev_name_node_add(net
, dev
->name_node
);
1277 write_unlock_bh(&dev_base_lock
);
1279 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1280 ret
= notifier_to_errno(ret
);
1283 /* err >= 0 after dev_alloc_name() or stores the first errno */
1286 down_write(&devnet_rename_sem
);
1287 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1288 memcpy(oldname
, newname
, IFNAMSIZ
);
1289 dev
->name_assign_type
= old_assign_type
;
1290 old_assign_type
= NET_NAME_RENAMED
;
1293 pr_err("%s: name change rollback failed: %d\n",
1302 * dev_set_alias - change ifalias of a device
1304 * @alias: name up to IFALIASZ
1305 * @len: limit of bytes to copy from info
1307 * Set ifalias for a device,
1309 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1311 struct dev_ifalias
*new_alias
= NULL
;
1313 if (len
>= IFALIASZ
)
1317 new_alias
= kmalloc(sizeof(*new_alias
) + len
+ 1, GFP_KERNEL
);
1321 memcpy(new_alias
->ifalias
, alias
, len
);
1322 new_alias
->ifalias
[len
] = 0;
1325 mutex_lock(&ifalias_mutex
);
1326 new_alias
= rcu_replace_pointer(dev
->ifalias
, new_alias
,
1327 mutex_is_locked(&ifalias_mutex
));
1328 mutex_unlock(&ifalias_mutex
);
1331 kfree_rcu(new_alias
, rcuhead
);
1335 EXPORT_SYMBOL(dev_set_alias
);
1338 * dev_get_alias - get ifalias of a device
1340 * @name: buffer to store name of ifalias
1341 * @len: size of buffer
1343 * get ifalias for a device. Caller must make sure dev cannot go
1344 * away, e.g. rcu read lock or own a reference count to device.
1346 int dev_get_alias(const struct net_device
*dev
, char *name
, size_t len
)
1348 const struct dev_ifalias
*alias
;
1352 alias
= rcu_dereference(dev
->ifalias
);
1354 ret
= snprintf(name
, len
, "%s", alias
->ifalias
);
1361 * netdev_features_change - device changes features
1362 * @dev: device to cause notification
1364 * Called to indicate a device has changed features.
1366 void netdev_features_change(struct net_device
*dev
)
1368 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1370 EXPORT_SYMBOL(netdev_features_change
);
1373 * netdev_state_change - device changes state
1374 * @dev: device to cause notification
1376 * Called to indicate a device has changed state. This function calls
1377 * the notifier chains for netdev_chain and sends a NEWLINK message
1378 * to the routing socket.
1380 void netdev_state_change(struct net_device
*dev
)
1382 if (dev
->flags
& IFF_UP
) {
1383 struct netdev_notifier_change_info change_info
= {
1387 call_netdevice_notifiers_info(NETDEV_CHANGE
,
1389 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0, GFP_KERNEL
);
1392 EXPORT_SYMBOL(netdev_state_change
);
1395 * __netdev_notify_peers - notify network peers about existence of @dev,
1396 * to be called when rtnl lock is already held.
1397 * @dev: network device
1399 * Generate traffic such that interested network peers are aware of
1400 * @dev, such as by generating a gratuitous ARP. This may be used when
1401 * a device wants to inform the rest of the network about some sort of
1402 * reconfiguration such as a failover event or virtual machine
1405 void __netdev_notify_peers(struct net_device
*dev
)
1408 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, dev
);
1409 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, dev
);
1411 EXPORT_SYMBOL(__netdev_notify_peers
);
1414 * netdev_notify_peers - notify network peers about existence of @dev
1415 * @dev: network device
1417 * Generate traffic such that interested network peers are aware of
1418 * @dev, such as by generating a gratuitous ARP. This may be used when
1419 * a device wants to inform the rest of the network about some sort of
1420 * reconfiguration such as a failover event or virtual machine
1423 void netdev_notify_peers(struct net_device
*dev
)
1426 __netdev_notify_peers(dev
);
1429 EXPORT_SYMBOL(netdev_notify_peers
);
1431 static int napi_threaded_poll(void *data
);
1433 static int napi_kthread_create(struct napi_struct
*n
)
1437 /* Create and wake up the kthread once to put it in
1438 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1439 * warning and work with loadavg.
1441 n
->thread
= kthread_run(napi_threaded_poll
, n
, "napi/%s-%d",
1442 n
->dev
->name
, n
->napi_id
);
1443 if (IS_ERR(n
->thread
)) {
1444 err
= PTR_ERR(n
->thread
);
1445 pr_err("kthread_run failed with err %d\n", err
);
1452 static int __dev_open(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
1454 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1459 if (!netif_device_present(dev
)) {
1460 /* may be detached because parent is runtime-suspended */
1461 if (dev
->dev
.parent
)
1462 pm_runtime_resume(dev
->dev
.parent
);
1463 if (!netif_device_present(dev
))
1467 /* Block netpoll from trying to do any rx path servicing.
1468 * If we don't do this there is a chance ndo_poll_controller
1469 * or ndo_poll may be running while we open the device
1471 netpoll_poll_disable(dev
);
1473 ret
= call_netdevice_notifiers_extack(NETDEV_PRE_UP
, dev
, extack
);
1474 ret
= notifier_to_errno(ret
);
1478 set_bit(__LINK_STATE_START
, &dev
->state
);
1480 if (ops
->ndo_validate_addr
)
1481 ret
= ops
->ndo_validate_addr(dev
);
1483 if (!ret
&& ops
->ndo_open
)
1484 ret
= ops
->ndo_open(dev
);
1486 netpoll_poll_enable(dev
);
1489 clear_bit(__LINK_STATE_START
, &dev
->state
);
1491 dev
->flags
|= IFF_UP
;
1492 dev_set_rx_mode(dev
);
1494 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
1501 * dev_open - prepare an interface for use.
1502 * @dev: device to open
1503 * @extack: netlink extended ack
1505 * Takes a device from down to up state. The device's private open
1506 * function is invoked and then the multicast lists are loaded. Finally
1507 * the device is moved into the up state and a %NETDEV_UP message is
1508 * sent to the netdev notifier chain.
1510 * Calling this function on an active interface is a nop. On a failure
1511 * a negative errno code is returned.
1513 int dev_open(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
1517 if (dev
->flags
& IFF_UP
)
1520 ret
= __dev_open(dev
, extack
);
1524 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1525 call_netdevice_notifiers(NETDEV_UP
, dev
);
1529 EXPORT_SYMBOL(dev_open
);
1531 static void __dev_close_many(struct list_head
*head
)
1533 struct net_device
*dev
;
1538 list_for_each_entry(dev
, head
, close_list
) {
1539 /* Temporarily disable netpoll until the interface is down */
1540 netpoll_poll_disable(dev
);
1542 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1544 clear_bit(__LINK_STATE_START
, &dev
->state
);
1546 /* Synchronize to scheduled poll. We cannot touch poll list, it
1547 * can be even on different cpu. So just clear netif_running().
1549 * dev->stop() will invoke napi_disable() on all of it's
1550 * napi_struct instances on this device.
1552 smp_mb__after_atomic(); /* Commit netif_running(). */
1555 dev_deactivate_many(head
);
1557 list_for_each_entry(dev
, head
, close_list
) {
1558 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1561 * Call the device specific close. This cannot fail.
1562 * Only if device is UP
1564 * We allow it to be called even after a DETACH hot-plug
1570 dev
->flags
&= ~IFF_UP
;
1571 netpoll_poll_enable(dev
);
1575 static void __dev_close(struct net_device
*dev
)
1579 list_add(&dev
->close_list
, &single
);
1580 __dev_close_many(&single
);
1584 void dev_close_many(struct list_head
*head
, bool unlink
)
1586 struct net_device
*dev
, *tmp
;
1588 /* Remove the devices that don't need to be closed */
1589 list_for_each_entry_safe(dev
, tmp
, head
, close_list
)
1590 if (!(dev
->flags
& IFF_UP
))
1591 list_del_init(&dev
->close_list
);
1593 __dev_close_many(head
);
1595 list_for_each_entry_safe(dev
, tmp
, head
, close_list
) {
1596 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1597 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1599 list_del_init(&dev
->close_list
);
1602 EXPORT_SYMBOL(dev_close_many
);
1605 * dev_close - shutdown an interface.
1606 * @dev: device to shutdown
1608 * This function moves an active device into down state. A
1609 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1610 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1613 void dev_close(struct net_device
*dev
)
1615 if (dev
->flags
& IFF_UP
) {
1618 list_add(&dev
->close_list
, &single
);
1619 dev_close_many(&single
, true);
1623 EXPORT_SYMBOL(dev_close
);
1627 * dev_disable_lro - disable Large Receive Offload on a device
1630 * Disable Large Receive Offload (LRO) on a net device. Must be
1631 * called under RTNL. This is needed if received packets may be
1632 * forwarded to another interface.
1634 void dev_disable_lro(struct net_device
*dev
)
1636 struct net_device
*lower_dev
;
1637 struct list_head
*iter
;
1639 dev
->wanted_features
&= ~NETIF_F_LRO
;
1640 netdev_update_features(dev
);
1642 if (unlikely(dev
->features
& NETIF_F_LRO
))
1643 netdev_WARN(dev
, "failed to disable LRO!\n");
1645 netdev_for_each_lower_dev(dev
, lower_dev
, iter
)
1646 dev_disable_lro(lower_dev
);
1648 EXPORT_SYMBOL(dev_disable_lro
);
1651 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1654 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1655 * called under RTNL. This is needed if Generic XDP is installed on
1658 static void dev_disable_gro_hw(struct net_device
*dev
)
1660 dev
->wanted_features
&= ~NETIF_F_GRO_HW
;
1661 netdev_update_features(dev
);
1663 if (unlikely(dev
->features
& NETIF_F_GRO_HW
))
1664 netdev_WARN(dev
, "failed to disable GRO_HW!\n");
1667 const char *netdev_cmd_to_name(enum netdev_cmd cmd
)
1670 case NETDEV_##val: \
1671 return "NETDEV_" __stringify(val);
1673 N(UP
) N(DOWN
) N(REBOOT
) N(CHANGE
) N(REGISTER
) N(UNREGISTER
)
1674 N(CHANGEMTU
) N(CHANGEADDR
) N(GOING_DOWN
) N(CHANGENAME
) N(FEAT_CHANGE
)
1675 N(BONDING_FAILOVER
) N(PRE_UP
) N(PRE_TYPE_CHANGE
) N(POST_TYPE_CHANGE
)
1676 N(POST_INIT
) N(RELEASE
) N(NOTIFY_PEERS
) N(JOIN
) N(CHANGEUPPER
)
1677 N(RESEND_IGMP
) N(PRECHANGEMTU
) N(CHANGEINFODATA
) N(BONDING_INFO
)
1678 N(PRECHANGEUPPER
) N(CHANGELOWERSTATE
) N(UDP_TUNNEL_PUSH_INFO
)
1679 N(UDP_TUNNEL_DROP_INFO
) N(CHANGE_TX_QUEUE_LEN
)
1680 N(CVLAN_FILTER_PUSH_INFO
) N(CVLAN_FILTER_DROP_INFO
)
1681 N(SVLAN_FILTER_PUSH_INFO
) N(SVLAN_FILTER_DROP_INFO
)
1685 return "UNKNOWN_NETDEV_EVENT";
1687 EXPORT_SYMBOL_GPL(netdev_cmd_to_name
);
1689 static int call_netdevice_notifier(struct notifier_block
*nb
, unsigned long val
,
1690 struct net_device
*dev
)
1692 struct netdev_notifier_info info
= {
1696 return nb
->notifier_call(nb
, val
, &info
);
1699 static int call_netdevice_register_notifiers(struct notifier_block
*nb
,
1700 struct net_device
*dev
)
1704 err
= call_netdevice_notifier(nb
, NETDEV_REGISTER
, dev
);
1705 err
= notifier_to_errno(err
);
1709 if (!(dev
->flags
& IFF_UP
))
1712 call_netdevice_notifier(nb
, NETDEV_UP
, dev
);
1716 static void call_netdevice_unregister_notifiers(struct notifier_block
*nb
,
1717 struct net_device
*dev
)
1719 if (dev
->flags
& IFF_UP
) {
1720 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1722 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1724 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1727 static int call_netdevice_register_net_notifiers(struct notifier_block
*nb
,
1730 struct net_device
*dev
;
1733 for_each_netdev(net
, dev
) {
1734 err
= call_netdevice_register_notifiers(nb
, dev
);
1741 for_each_netdev_continue_reverse(net
, dev
)
1742 call_netdevice_unregister_notifiers(nb
, dev
);
1746 static void call_netdevice_unregister_net_notifiers(struct notifier_block
*nb
,
1749 struct net_device
*dev
;
1751 for_each_netdev(net
, dev
)
1752 call_netdevice_unregister_notifiers(nb
, dev
);
1755 static int dev_boot_phase
= 1;
1758 * register_netdevice_notifier - register a network notifier block
1761 * Register a notifier to be called when network device events occur.
1762 * The notifier passed is linked into the kernel structures and must
1763 * not be reused until it has been unregistered. A negative errno code
1764 * is returned on a failure.
1766 * When registered all registration and up events are replayed
1767 * to the new notifier to allow device to have a race free
1768 * view of the network device list.
1771 int register_netdevice_notifier(struct notifier_block
*nb
)
1776 /* Close race with setup_net() and cleanup_net() */
1777 down_write(&pernet_ops_rwsem
);
1779 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1785 err
= call_netdevice_register_net_notifiers(nb
, net
);
1792 up_write(&pernet_ops_rwsem
);
1796 for_each_net_continue_reverse(net
)
1797 call_netdevice_unregister_net_notifiers(nb
, net
);
1799 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1802 EXPORT_SYMBOL(register_netdevice_notifier
);
1805 * unregister_netdevice_notifier - unregister a network notifier block
1808 * Unregister a notifier previously registered by
1809 * register_netdevice_notifier(). The notifier is unlinked into the
1810 * kernel structures and may then be reused. A negative errno code
1811 * is returned on a failure.
1813 * After unregistering unregister and down device events are synthesized
1814 * for all devices on the device list to the removed notifier to remove
1815 * the need for special case cleanup code.
1818 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1823 /* Close race with setup_net() and cleanup_net() */
1824 down_write(&pernet_ops_rwsem
);
1826 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1831 call_netdevice_unregister_net_notifiers(nb
, net
);
1835 up_write(&pernet_ops_rwsem
);
1838 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1840 static int __register_netdevice_notifier_net(struct net
*net
,
1841 struct notifier_block
*nb
,
1842 bool ignore_call_fail
)
1846 err
= raw_notifier_chain_register(&net
->netdev_chain
, nb
);
1852 err
= call_netdevice_register_net_notifiers(nb
, net
);
1853 if (err
&& !ignore_call_fail
)
1854 goto chain_unregister
;
1859 raw_notifier_chain_unregister(&net
->netdev_chain
, nb
);
1863 static int __unregister_netdevice_notifier_net(struct net
*net
,
1864 struct notifier_block
*nb
)
1868 err
= raw_notifier_chain_unregister(&net
->netdev_chain
, nb
);
1872 call_netdevice_unregister_net_notifiers(nb
, net
);
1877 * register_netdevice_notifier_net - register a per-netns network notifier block
1878 * @net: network namespace
1881 * Register a notifier to be called when network device events occur.
1882 * The notifier passed is linked into the kernel structures and must
1883 * not be reused until it has been unregistered. A negative errno code
1884 * is returned on a failure.
1886 * When registered all registration and up events are replayed
1887 * to the new notifier to allow device to have a race free
1888 * view of the network device list.
1891 int register_netdevice_notifier_net(struct net
*net
, struct notifier_block
*nb
)
1896 err
= __register_netdevice_notifier_net(net
, nb
, false);
1900 EXPORT_SYMBOL(register_netdevice_notifier_net
);
1903 * unregister_netdevice_notifier_net - unregister a per-netns
1904 * network notifier block
1905 * @net: network namespace
1908 * Unregister a notifier previously registered by
1909 * register_netdevice_notifier(). The notifier is unlinked into the
1910 * kernel structures and may then be reused. A negative errno code
1911 * is returned on a failure.
1913 * After unregistering unregister and down device events are synthesized
1914 * for all devices on the device list to the removed notifier to remove
1915 * the need for special case cleanup code.
1918 int unregister_netdevice_notifier_net(struct net
*net
,
1919 struct notifier_block
*nb
)
1924 err
= __unregister_netdevice_notifier_net(net
, nb
);
1928 EXPORT_SYMBOL(unregister_netdevice_notifier_net
);
1930 int register_netdevice_notifier_dev_net(struct net_device
*dev
,
1931 struct notifier_block
*nb
,
1932 struct netdev_net_notifier
*nn
)
1937 err
= __register_netdevice_notifier_net(dev_net(dev
), nb
, false);
1940 list_add(&nn
->list
, &dev
->net_notifier_list
);
1945 EXPORT_SYMBOL(register_netdevice_notifier_dev_net
);
1947 int unregister_netdevice_notifier_dev_net(struct net_device
*dev
,
1948 struct notifier_block
*nb
,
1949 struct netdev_net_notifier
*nn
)
1954 list_del(&nn
->list
);
1955 err
= __unregister_netdevice_notifier_net(dev_net(dev
), nb
);
1959 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net
);
1961 static void move_netdevice_notifiers_dev_net(struct net_device
*dev
,
1964 struct netdev_net_notifier
*nn
;
1966 list_for_each_entry(nn
, &dev
->net_notifier_list
, list
) {
1967 __unregister_netdevice_notifier_net(dev_net(dev
), nn
->nb
);
1968 __register_netdevice_notifier_net(net
, nn
->nb
, true);
1973 * call_netdevice_notifiers_info - call all network notifier blocks
1974 * @val: value passed unmodified to notifier function
1975 * @info: notifier information data
1977 * Call all network notifier blocks. Parameters and return value
1978 * are as for raw_notifier_call_chain().
1981 static int call_netdevice_notifiers_info(unsigned long val
,
1982 struct netdev_notifier_info
*info
)
1984 struct net
*net
= dev_net(info
->dev
);
1989 /* Run per-netns notifier block chain first, then run the global one.
1990 * Hopefully, one day, the global one is going to be removed after
1991 * all notifier block registrators get converted to be per-netns.
1993 ret
= raw_notifier_call_chain(&net
->netdev_chain
, val
, info
);
1994 if (ret
& NOTIFY_STOP_MASK
)
1996 return raw_notifier_call_chain(&netdev_chain
, val
, info
);
1999 static int call_netdevice_notifiers_extack(unsigned long val
,
2000 struct net_device
*dev
,
2001 struct netlink_ext_ack
*extack
)
2003 struct netdev_notifier_info info
= {
2008 return call_netdevice_notifiers_info(val
, &info
);
2012 * call_netdevice_notifiers - call all network notifier blocks
2013 * @val: value passed unmodified to notifier function
2014 * @dev: net_device pointer passed unmodified to notifier function
2016 * Call all network notifier blocks. Parameters and return value
2017 * are as for raw_notifier_call_chain().
2020 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
2022 return call_netdevice_notifiers_extack(val
, dev
, NULL
);
2024 EXPORT_SYMBOL(call_netdevice_notifiers
);
2027 * call_netdevice_notifiers_mtu - call all network notifier blocks
2028 * @val: value passed unmodified to notifier function
2029 * @dev: net_device pointer passed unmodified to notifier function
2030 * @arg: additional u32 argument passed to the notifier function
2032 * Call all network notifier blocks. Parameters and return value
2033 * are as for raw_notifier_call_chain().
2035 static int call_netdevice_notifiers_mtu(unsigned long val
,
2036 struct net_device
*dev
, u32 arg
)
2038 struct netdev_notifier_info_ext info
= {
2043 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext
, info
) != 0);
2045 return call_netdevice_notifiers_info(val
, &info
.info
);
2048 #ifdef CONFIG_NET_INGRESS
2049 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key
);
2051 void net_inc_ingress_queue(void)
2053 static_branch_inc(&ingress_needed_key
);
2055 EXPORT_SYMBOL_GPL(net_inc_ingress_queue
);
2057 void net_dec_ingress_queue(void)
2059 static_branch_dec(&ingress_needed_key
);
2061 EXPORT_SYMBOL_GPL(net_dec_ingress_queue
);
2064 #ifdef CONFIG_NET_EGRESS
2065 static DEFINE_STATIC_KEY_FALSE(egress_needed_key
);
2067 void net_inc_egress_queue(void)
2069 static_branch_inc(&egress_needed_key
);
2071 EXPORT_SYMBOL_GPL(net_inc_egress_queue
);
2073 void net_dec_egress_queue(void)
2075 static_branch_dec(&egress_needed_key
);
2077 EXPORT_SYMBOL_GPL(net_dec_egress_queue
);
2080 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key
);
2081 #ifdef CONFIG_JUMP_LABEL
2082 static atomic_t netstamp_needed_deferred
;
2083 static atomic_t netstamp_wanted
;
2084 static void netstamp_clear(struct work_struct
*work
)
2086 int deferred
= atomic_xchg(&netstamp_needed_deferred
, 0);
2089 wanted
= atomic_add_return(deferred
, &netstamp_wanted
);
2091 static_branch_enable(&netstamp_needed_key
);
2093 static_branch_disable(&netstamp_needed_key
);
2095 static DECLARE_WORK(netstamp_work
, netstamp_clear
);
2098 void net_enable_timestamp(void)
2100 #ifdef CONFIG_JUMP_LABEL
2104 wanted
= atomic_read(&netstamp_wanted
);
2107 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
+ 1) == wanted
)
2110 atomic_inc(&netstamp_needed_deferred
);
2111 schedule_work(&netstamp_work
);
2113 static_branch_inc(&netstamp_needed_key
);
2116 EXPORT_SYMBOL(net_enable_timestamp
);
2118 void net_disable_timestamp(void)
2120 #ifdef CONFIG_JUMP_LABEL
2124 wanted
= atomic_read(&netstamp_wanted
);
2127 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
- 1) == wanted
)
2130 atomic_dec(&netstamp_needed_deferred
);
2131 schedule_work(&netstamp_work
);
2133 static_branch_dec(&netstamp_needed_key
);
2136 EXPORT_SYMBOL(net_disable_timestamp
);
2138 static inline void net_timestamp_set(struct sk_buff
*skb
)
2141 if (static_branch_unlikely(&netstamp_needed_key
))
2142 __net_timestamp(skb
);
2145 #define net_timestamp_check(COND, SKB) \
2146 if (static_branch_unlikely(&netstamp_needed_key)) { \
2147 if ((COND) && !(SKB)->tstamp) \
2148 __net_timestamp(SKB); \
2151 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2153 return __is_skb_forwardable(dev
, skb
, true);
2155 EXPORT_SYMBOL_GPL(is_skb_forwardable
);
2157 static int __dev_forward_skb2(struct net_device
*dev
, struct sk_buff
*skb
,
2160 int ret
= ____dev_forward_skb(dev
, skb
, check_mtu
);
2163 skb
->protocol
= eth_type_trans(skb
, dev
);
2164 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
2170 int __dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
2172 return __dev_forward_skb2(dev
, skb
, true);
2174 EXPORT_SYMBOL_GPL(__dev_forward_skb
);
2177 * dev_forward_skb - loopback an skb to another netif
2179 * @dev: destination network device
2180 * @skb: buffer to forward
2183 * NET_RX_SUCCESS (no congestion)
2184 * NET_RX_DROP (packet was dropped, but freed)
2186 * dev_forward_skb can be used for injecting an skb from the
2187 * start_xmit function of one device into the receive queue
2188 * of another device.
2190 * The receiving device may be in another namespace, so
2191 * we have to clear all information in the skb that could
2192 * impact namespace isolation.
2194 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
2196 return __dev_forward_skb(dev
, skb
) ?: netif_rx_internal(skb
);
2198 EXPORT_SYMBOL_GPL(dev_forward_skb
);
2200 int dev_forward_skb_nomtu(struct net_device
*dev
, struct sk_buff
*skb
)
2202 return __dev_forward_skb2(dev
, skb
, false) ?: netif_rx_internal(skb
);
2205 static inline int deliver_skb(struct sk_buff
*skb
,
2206 struct packet_type
*pt_prev
,
2207 struct net_device
*orig_dev
)
2209 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
2211 refcount_inc(&skb
->users
);
2212 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
2215 static inline void deliver_ptype_list_skb(struct sk_buff
*skb
,
2216 struct packet_type
**pt
,
2217 struct net_device
*orig_dev
,
2219 struct list_head
*ptype_list
)
2221 struct packet_type
*ptype
, *pt_prev
= *pt
;
2223 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
2224 if (ptype
->type
!= type
)
2227 deliver_skb(skb
, pt_prev
, orig_dev
);
2233 static inline bool skb_loop_sk(struct packet_type
*ptype
, struct sk_buff
*skb
)
2235 if (!ptype
->af_packet_priv
|| !skb
->sk
)
2238 if (ptype
->id_match
)
2239 return ptype
->id_match(ptype
, skb
->sk
);
2240 else if ((struct sock
*)ptype
->af_packet_priv
== skb
->sk
)
2247 * dev_nit_active - return true if any network interface taps are in use
2249 * @dev: network device to check for the presence of taps
2251 bool dev_nit_active(struct net_device
*dev
)
2253 return !list_empty(&ptype_all
) || !list_empty(&dev
->ptype_all
);
2255 EXPORT_SYMBOL_GPL(dev_nit_active
);
2258 * Support routine. Sends outgoing frames to any network
2259 * taps currently in use.
2262 void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
2264 struct packet_type
*ptype
;
2265 struct sk_buff
*skb2
= NULL
;
2266 struct packet_type
*pt_prev
= NULL
;
2267 struct list_head
*ptype_list
= &ptype_all
;
2271 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
2272 if (ptype
->ignore_outgoing
)
2275 /* Never send packets back to the socket
2276 * they originated from - MvS (miquels@drinkel.ow.org)
2278 if (skb_loop_sk(ptype
, skb
))
2282 deliver_skb(skb2
, pt_prev
, skb
->dev
);
2287 /* need to clone skb, done only once */
2288 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2292 net_timestamp_set(skb2
);
2294 /* skb->nh should be correctly
2295 * set by sender, so that the second statement is
2296 * just protection against buggy protocols.
2298 skb_reset_mac_header(skb2
);
2300 if (skb_network_header(skb2
) < skb2
->data
||
2301 skb_network_header(skb2
) > skb_tail_pointer(skb2
)) {
2302 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2303 ntohs(skb2
->protocol
),
2305 skb_reset_network_header(skb2
);
2308 skb2
->transport_header
= skb2
->network_header
;
2309 skb2
->pkt_type
= PACKET_OUTGOING
;
2313 if (ptype_list
== &ptype_all
) {
2314 ptype_list
= &dev
->ptype_all
;
2319 if (!skb_orphan_frags_rx(skb2
, GFP_ATOMIC
))
2320 pt_prev
->func(skb2
, skb
->dev
, pt_prev
, skb
->dev
);
2326 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit
);
2329 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2330 * @dev: Network device
2331 * @txq: number of queues available
2333 * If real_num_tx_queues is changed the tc mappings may no longer be
2334 * valid. To resolve this verify the tc mapping remains valid and if
2335 * not NULL the mapping. With no priorities mapping to this
2336 * offset/count pair it will no longer be used. In the worst case TC0
2337 * is invalid nothing can be done so disable priority mappings. If is
2338 * expected that drivers will fix this mapping if they can before
2339 * calling netif_set_real_num_tx_queues.
2341 static void netif_setup_tc(struct net_device
*dev
, unsigned int txq
)
2344 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2346 /* If TC0 is invalidated disable TC mapping */
2347 if (tc
->offset
+ tc
->count
> txq
) {
2348 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2353 /* Invalidated prio to tc mappings set to TC0 */
2354 for (i
= 1; i
< TC_BITMASK
+ 1; i
++) {
2355 int q
= netdev_get_prio_tc_map(dev
, i
);
2357 tc
= &dev
->tc_to_txq
[q
];
2358 if (tc
->offset
+ tc
->count
> txq
) {
2359 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2361 netdev_set_prio_tc_map(dev
, i
, 0);
2366 int netdev_txq_to_tc(struct net_device
*dev
, unsigned int txq
)
2369 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2372 /* walk through the TCs and see if it falls into any of them */
2373 for (i
= 0; i
< TC_MAX_QUEUE
; i
++, tc
++) {
2374 if ((txq
- tc
->offset
) < tc
->count
)
2378 /* didn't find it, just return -1 to indicate no match */
2384 EXPORT_SYMBOL(netdev_txq_to_tc
);
2387 static struct static_key xps_needed __read_mostly
;
2388 static struct static_key xps_rxqs_needed __read_mostly
;
2389 static DEFINE_MUTEX(xps_map_mutex
);
2390 #define xmap_dereference(P) \
2391 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2393 static bool remove_xps_queue(struct xps_dev_maps
*dev_maps
,
2394 struct xps_dev_maps
*old_maps
, int tci
, u16 index
)
2396 struct xps_map
*map
= NULL
;
2400 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2404 for (pos
= map
->len
; pos
--;) {
2405 if (map
->queues
[pos
] != index
)
2409 map
->queues
[pos
] = map
->queues
[--map
->len
];
2414 RCU_INIT_POINTER(old_maps
->attr_map
[tci
], NULL
);
2415 RCU_INIT_POINTER(dev_maps
->attr_map
[tci
], NULL
);
2416 kfree_rcu(map
, rcu
);
2423 static bool remove_xps_queue_cpu(struct net_device
*dev
,
2424 struct xps_dev_maps
*dev_maps
,
2425 int cpu
, u16 offset
, u16 count
)
2427 int num_tc
= dev_maps
->num_tc
;
2428 bool active
= false;
2431 for (tci
= cpu
* num_tc
; num_tc
--; tci
++) {
2434 for (i
= count
, j
= offset
; i
--; j
++) {
2435 if (!remove_xps_queue(dev_maps
, NULL
, tci
, j
))
2445 static void reset_xps_maps(struct net_device
*dev
,
2446 struct xps_dev_maps
*dev_maps
,
2447 enum xps_map_type type
)
2449 static_key_slow_dec_cpuslocked(&xps_needed
);
2450 if (type
== XPS_RXQS
)
2451 static_key_slow_dec_cpuslocked(&xps_rxqs_needed
);
2453 RCU_INIT_POINTER(dev
->xps_maps
[type
], NULL
);
2455 kfree_rcu(dev_maps
, rcu
);
2458 static void clean_xps_maps(struct net_device
*dev
, enum xps_map_type type
,
2459 u16 offset
, u16 count
)
2461 struct xps_dev_maps
*dev_maps
;
2462 bool active
= false;
2465 dev_maps
= xmap_dereference(dev
->xps_maps
[type
]);
2469 for (j
= 0; j
< dev_maps
->nr_ids
; j
++)
2470 active
|= remove_xps_queue_cpu(dev
, dev_maps
, j
, offset
, count
);
2472 reset_xps_maps(dev
, dev_maps
, type
);
2474 if (type
== XPS_CPUS
) {
2475 for (i
= offset
+ (count
- 1); count
--; i
--)
2476 netdev_queue_numa_node_write(
2477 netdev_get_tx_queue(dev
, i
), NUMA_NO_NODE
);
2481 static void netif_reset_xps_queues(struct net_device
*dev
, u16 offset
,
2484 if (!static_key_false(&xps_needed
))
2488 mutex_lock(&xps_map_mutex
);
2490 if (static_key_false(&xps_rxqs_needed
))
2491 clean_xps_maps(dev
, XPS_RXQS
, offset
, count
);
2493 clean_xps_maps(dev
, XPS_CPUS
, offset
, count
);
2495 mutex_unlock(&xps_map_mutex
);
2499 static void netif_reset_xps_queues_gt(struct net_device
*dev
, u16 index
)
2501 netif_reset_xps_queues(dev
, index
, dev
->num_tx_queues
- index
);
2504 static struct xps_map
*expand_xps_map(struct xps_map
*map
, int attr_index
,
2505 u16 index
, bool is_rxqs_map
)
2507 struct xps_map
*new_map
;
2508 int alloc_len
= XPS_MIN_MAP_ALLOC
;
2511 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
2512 if (map
->queues
[pos
] != index
)
2517 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2519 if (pos
< map
->alloc_len
)
2522 alloc_len
= map
->alloc_len
* 2;
2525 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2529 new_map
= kzalloc(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
);
2531 new_map
= kzalloc_node(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
,
2532 cpu_to_node(attr_index
));
2536 for (i
= 0; i
< pos
; i
++)
2537 new_map
->queues
[i
] = map
->queues
[i
];
2538 new_map
->alloc_len
= alloc_len
;
2544 /* Copy xps maps at a given index */
2545 static void xps_copy_dev_maps(struct xps_dev_maps
*dev_maps
,
2546 struct xps_dev_maps
*new_dev_maps
, int index
,
2547 int tc
, bool skip_tc
)
2549 int i
, tci
= index
* dev_maps
->num_tc
;
2550 struct xps_map
*map
;
2552 /* copy maps belonging to foreign traffic classes */
2553 for (i
= 0; i
< dev_maps
->num_tc
; i
++, tci
++) {
2554 if (i
== tc
&& skip_tc
)
2557 /* fill in the new device map from the old device map */
2558 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2559 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2563 /* Must be called under cpus_read_lock */
2564 int __netif_set_xps_queue(struct net_device
*dev
, const unsigned long *mask
,
2565 u16 index
, enum xps_map_type type
)
2567 struct xps_dev_maps
*dev_maps
, *new_dev_maps
= NULL
, *old_dev_maps
= NULL
;
2568 const unsigned long *online_mask
= NULL
;
2569 bool active
= false, copy
= false;
2570 int i
, j
, tci
, numa_node_id
= -2;
2571 int maps_sz
, num_tc
= 1, tc
= 0;
2572 struct xps_map
*map
, *new_map
;
2573 unsigned int nr_ids
;
2576 /* Do not allow XPS on subordinate device directly */
2577 num_tc
= dev
->num_tc
;
2581 /* If queue belongs to subordinate dev use its map */
2582 dev
= netdev_get_tx_queue(dev
, index
)->sb_dev
? : dev
;
2584 tc
= netdev_txq_to_tc(dev
, index
);
2589 mutex_lock(&xps_map_mutex
);
2591 dev_maps
= xmap_dereference(dev
->xps_maps
[type
]);
2592 if (type
== XPS_RXQS
) {
2593 maps_sz
= XPS_RXQ_DEV_MAPS_SIZE(num_tc
, dev
->num_rx_queues
);
2594 nr_ids
= dev
->num_rx_queues
;
2596 maps_sz
= XPS_CPU_DEV_MAPS_SIZE(num_tc
);
2597 if (num_possible_cpus() > 1)
2598 online_mask
= cpumask_bits(cpu_online_mask
);
2599 nr_ids
= nr_cpu_ids
;
2602 if (maps_sz
< L1_CACHE_BYTES
)
2603 maps_sz
= L1_CACHE_BYTES
;
2605 /* The old dev_maps could be larger or smaller than the one we're
2606 * setting up now, as dev->num_tc or nr_ids could have been updated in
2607 * between. We could try to be smart, but let's be safe instead and only
2608 * copy foreign traffic classes if the two map sizes match.
2611 dev_maps
->num_tc
== num_tc
&& dev_maps
->nr_ids
== nr_ids
)
2614 /* allocate memory for queue storage */
2615 for (j
= -1; j
= netif_attrmask_next_and(j
, online_mask
, mask
, nr_ids
),
2617 if (!new_dev_maps
) {
2618 new_dev_maps
= kzalloc(maps_sz
, GFP_KERNEL
);
2619 if (!new_dev_maps
) {
2620 mutex_unlock(&xps_map_mutex
);
2624 new_dev_maps
->nr_ids
= nr_ids
;
2625 new_dev_maps
->num_tc
= num_tc
;
2628 tci
= j
* num_tc
+ tc
;
2629 map
= copy
? xmap_dereference(dev_maps
->attr_map
[tci
]) : NULL
;
2631 map
= expand_xps_map(map
, j
, index
, type
== XPS_RXQS
);
2635 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2639 goto out_no_new_maps
;
2642 /* Increment static keys at most once per type */
2643 static_key_slow_inc_cpuslocked(&xps_needed
);
2644 if (type
== XPS_RXQS
)
2645 static_key_slow_inc_cpuslocked(&xps_rxqs_needed
);
2648 for (j
= 0; j
< nr_ids
; j
++) {
2649 bool skip_tc
= false;
2651 tci
= j
* num_tc
+ tc
;
2652 if (netif_attr_test_mask(j
, mask
, nr_ids
) &&
2653 netif_attr_test_online(j
, online_mask
, nr_ids
)) {
2654 /* add tx-queue to CPU/rx-queue maps */
2659 map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2660 while ((pos
< map
->len
) && (map
->queues
[pos
] != index
))
2663 if (pos
== map
->len
)
2664 map
->queues
[map
->len
++] = index
;
2666 if (type
== XPS_CPUS
) {
2667 if (numa_node_id
== -2)
2668 numa_node_id
= cpu_to_node(j
);
2669 else if (numa_node_id
!= cpu_to_node(j
))
2676 xps_copy_dev_maps(dev_maps
, new_dev_maps
, j
, tc
,
2680 rcu_assign_pointer(dev
->xps_maps
[type
], new_dev_maps
);
2682 /* Cleanup old maps */
2684 goto out_no_old_maps
;
2686 for (j
= 0; j
< dev_maps
->nr_ids
; j
++) {
2687 for (i
= num_tc
, tci
= j
* dev_maps
->num_tc
; i
--; tci
++) {
2688 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2693 new_map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2698 RCU_INIT_POINTER(dev_maps
->attr_map
[tci
], NULL
);
2699 kfree_rcu(map
, rcu
);
2703 old_dev_maps
= dev_maps
;
2706 dev_maps
= new_dev_maps
;
2710 if (type
== XPS_CPUS
)
2711 /* update Tx queue numa node */
2712 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, index
),
2713 (numa_node_id
>= 0) ?
2714 numa_node_id
: NUMA_NO_NODE
);
2719 /* removes tx-queue from unused CPUs/rx-queues */
2720 for (j
= 0; j
< dev_maps
->nr_ids
; j
++) {
2721 tci
= j
* dev_maps
->num_tc
;
2723 for (i
= 0; i
< dev_maps
->num_tc
; i
++, tci
++) {
2725 netif_attr_test_mask(j
, mask
, dev_maps
->nr_ids
) &&
2726 netif_attr_test_online(j
, online_mask
, dev_maps
->nr_ids
))
2729 active
|= remove_xps_queue(dev_maps
,
2730 copy
? old_dev_maps
: NULL
,
2736 kfree_rcu(old_dev_maps
, rcu
);
2738 /* free map if not active */
2740 reset_xps_maps(dev
, dev_maps
, type
);
2743 mutex_unlock(&xps_map_mutex
);
2747 /* remove any maps that we added */
2748 for (j
= 0; j
< nr_ids
; j
++) {
2749 for (i
= num_tc
, tci
= j
* num_tc
; i
--; tci
++) {
2750 new_map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2752 xmap_dereference(dev_maps
->attr_map
[tci
]) :
2754 if (new_map
&& new_map
!= map
)
2759 mutex_unlock(&xps_map_mutex
);
2761 kfree(new_dev_maps
);
2764 EXPORT_SYMBOL_GPL(__netif_set_xps_queue
);
2766 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
2772 ret
= __netif_set_xps_queue(dev
, cpumask_bits(mask
), index
, XPS_CPUS
);
2777 EXPORT_SYMBOL(netif_set_xps_queue
);
2780 static void netdev_unbind_all_sb_channels(struct net_device
*dev
)
2782 struct netdev_queue
*txq
= &dev
->_tx
[dev
->num_tx_queues
];
2784 /* Unbind any subordinate channels */
2785 while (txq
-- != &dev
->_tx
[0]) {
2787 netdev_unbind_sb_channel(dev
, txq
->sb_dev
);
2791 void netdev_reset_tc(struct net_device
*dev
)
2794 netif_reset_xps_queues_gt(dev
, 0);
2796 netdev_unbind_all_sb_channels(dev
);
2798 /* Reset TC configuration of device */
2800 memset(dev
->tc_to_txq
, 0, sizeof(dev
->tc_to_txq
));
2801 memset(dev
->prio_tc_map
, 0, sizeof(dev
->prio_tc_map
));
2803 EXPORT_SYMBOL(netdev_reset_tc
);
2805 int netdev_set_tc_queue(struct net_device
*dev
, u8 tc
, u16 count
, u16 offset
)
2807 if (tc
>= dev
->num_tc
)
2811 netif_reset_xps_queues(dev
, offset
, count
);
2813 dev
->tc_to_txq
[tc
].count
= count
;
2814 dev
->tc_to_txq
[tc
].offset
= offset
;
2817 EXPORT_SYMBOL(netdev_set_tc_queue
);
2819 int netdev_set_num_tc(struct net_device
*dev
, u8 num_tc
)
2821 if (num_tc
> TC_MAX_QUEUE
)
2825 netif_reset_xps_queues_gt(dev
, 0);
2827 netdev_unbind_all_sb_channels(dev
);
2829 dev
->num_tc
= num_tc
;
2832 EXPORT_SYMBOL(netdev_set_num_tc
);
2834 void netdev_unbind_sb_channel(struct net_device
*dev
,
2835 struct net_device
*sb_dev
)
2837 struct netdev_queue
*txq
= &dev
->_tx
[dev
->num_tx_queues
];
2840 netif_reset_xps_queues_gt(sb_dev
, 0);
2842 memset(sb_dev
->tc_to_txq
, 0, sizeof(sb_dev
->tc_to_txq
));
2843 memset(sb_dev
->prio_tc_map
, 0, sizeof(sb_dev
->prio_tc_map
));
2845 while (txq
-- != &dev
->_tx
[0]) {
2846 if (txq
->sb_dev
== sb_dev
)
2850 EXPORT_SYMBOL(netdev_unbind_sb_channel
);
2852 int netdev_bind_sb_channel_queue(struct net_device
*dev
,
2853 struct net_device
*sb_dev
,
2854 u8 tc
, u16 count
, u16 offset
)
2856 /* Make certain the sb_dev and dev are already configured */
2857 if (sb_dev
->num_tc
>= 0 || tc
>= dev
->num_tc
)
2860 /* We cannot hand out queues we don't have */
2861 if ((offset
+ count
) > dev
->real_num_tx_queues
)
2864 /* Record the mapping */
2865 sb_dev
->tc_to_txq
[tc
].count
= count
;
2866 sb_dev
->tc_to_txq
[tc
].offset
= offset
;
2868 /* Provide a way for Tx queue to find the tc_to_txq map or
2869 * XPS map for itself.
2872 netdev_get_tx_queue(dev
, count
+ offset
)->sb_dev
= sb_dev
;
2876 EXPORT_SYMBOL(netdev_bind_sb_channel_queue
);
2878 int netdev_set_sb_channel(struct net_device
*dev
, u16 channel
)
2880 /* Do not use a multiqueue device to represent a subordinate channel */
2881 if (netif_is_multiqueue(dev
))
2884 /* We allow channels 1 - 32767 to be used for subordinate channels.
2885 * Channel 0 is meant to be "native" mode and used only to represent
2886 * the main root device. We allow writing 0 to reset the device back
2887 * to normal mode after being used as a subordinate channel.
2889 if (channel
> S16_MAX
)
2892 dev
->num_tc
= -channel
;
2896 EXPORT_SYMBOL(netdev_set_sb_channel
);
2899 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2900 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2902 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
)
2907 disabling
= txq
< dev
->real_num_tx_queues
;
2909 if (txq
< 1 || txq
> dev
->num_tx_queues
)
2912 if (dev
->reg_state
== NETREG_REGISTERED
||
2913 dev
->reg_state
== NETREG_UNREGISTERING
) {
2916 rc
= netdev_queue_update_kobjects(dev
, dev
->real_num_tx_queues
,
2922 netif_setup_tc(dev
, txq
);
2924 dev_qdisc_change_real_num_tx(dev
, txq
);
2926 dev
->real_num_tx_queues
= txq
;
2930 qdisc_reset_all_tx_gt(dev
, txq
);
2932 netif_reset_xps_queues_gt(dev
, txq
);
2936 dev
->real_num_tx_queues
= txq
;
2941 EXPORT_SYMBOL(netif_set_real_num_tx_queues
);
2945 * netif_set_real_num_rx_queues - set actual number of RX queues used
2946 * @dev: Network device
2947 * @rxq: Actual number of RX queues
2949 * This must be called either with the rtnl_lock held or before
2950 * registration of the net device. Returns 0 on success, or a
2951 * negative error code. If called before registration, it always
2954 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
)
2958 if (rxq
< 1 || rxq
> dev
->num_rx_queues
)
2961 if (dev
->reg_state
== NETREG_REGISTERED
) {
2964 rc
= net_rx_queue_update_kobjects(dev
, dev
->real_num_rx_queues
,
2970 dev
->real_num_rx_queues
= rxq
;
2973 EXPORT_SYMBOL(netif_set_real_num_rx_queues
);
2977 * netif_set_real_num_queues - set actual number of RX and TX queues used
2978 * @dev: Network device
2979 * @txq: Actual number of TX queues
2980 * @rxq: Actual number of RX queues
2982 * Set the real number of both TX and RX queues.
2983 * Does nothing if the number of queues is already correct.
2985 int netif_set_real_num_queues(struct net_device
*dev
,
2986 unsigned int txq
, unsigned int rxq
)
2988 unsigned int old_rxq
= dev
->real_num_rx_queues
;
2991 if (txq
< 1 || txq
> dev
->num_tx_queues
||
2992 rxq
< 1 || rxq
> dev
->num_rx_queues
)
2995 /* Start from increases, so the error path only does decreases -
2996 * decreases can't fail.
2998 if (rxq
> dev
->real_num_rx_queues
) {
2999 err
= netif_set_real_num_rx_queues(dev
, rxq
);
3003 if (txq
> dev
->real_num_tx_queues
) {
3004 err
= netif_set_real_num_tx_queues(dev
, txq
);
3008 if (rxq
< dev
->real_num_rx_queues
)
3009 WARN_ON(netif_set_real_num_rx_queues(dev
, rxq
));
3010 if (txq
< dev
->real_num_tx_queues
)
3011 WARN_ON(netif_set_real_num_tx_queues(dev
, txq
));
3015 WARN_ON(netif_set_real_num_rx_queues(dev
, old_rxq
));
3018 EXPORT_SYMBOL(netif_set_real_num_queues
);
3021 * netif_get_num_default_rss_queues - default number of RSS queues
3023 * This routine should set an upper limit on the number of RSS queues
3024 * used by default by multiqueue devices.
3026 int netif_get_num_default_rss_queues(void)
3028 return is_kdump_kernel() ?
3029 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES
, num_online_cpus());
3031 EXPORT_SYMBOL(netif_get_num_default_rss_queues
);
3033 static void __netif_reschedule(struct Qdisc
*q
)
3035 struct softnet_data
*sd
;
3036 unsigned long flags
;
3038 local_irq_save(flags
);
3039 sd
= this_cpu_ptr(&softnet_data
);
3040 q
->next_sched
= NULL
;
3041 *sd
->output_queue_tailp
= q
;
3042 sd
->output_queue_tailp
= &q
->next_sched
;
3043 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
3044 local_irq_restore(flags
);
3047 void __netif_schedule(struct Qdisc
*q
)
3049 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
3050 __netif_reschedule(q
);
3052 EXPORT_SYMBOL(__netif_schedule
);
3054 struct dev_kfree_skb_cb
{
3055 enum skb_free_reason reason
;
3058 static struct dev_kfree_skb_cb
*get_kfree_skb_cb(const struct sk_buff
*skb
)
3060 return (struct dev_kfree_skb_cb
*)skb
->cb
;
3063 void netif_schedule_queue(struct netdev_queue
*txq
)
3066 if (!netif_xmit_stopped(txq
)) {
3067 struct Qdisc
*q
= rcu_dereference(txq
->qdisc
);
3069 __netif_schedule(q
);
3073 EXPORT_SYMBOL(netif_schedule_queue
);
3075 void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
3077 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
)) {
3081 q
= rcu_dereference(dev_queue
->qdisc
);
3082 __netif_schedule(q
);
3086 EXPORT_SYMBOL(netif_tx_wake_queue
);
3088 void __dev_kfree_skb_irq(struct sk_buff
*skb
, enum skb_free_reason reason
)
3090 unsigned long flags
;
3095 if (likely(refcount_read(&skb
->users
) == 1)) {
3097 refcount_set(&skb
->users
, 0);
3098 } else if (likely(!refcount_dec_and_test(&skb
->users
))) {
3101 get_kfree_skb_cb(skb
)->reason
= reason
;
3102 local_irq_save(flags
);
3103 skb
->next
= __this_cpu_read(softnet_data
.completion_queue
);
3104 __this_cpu_write(softnet_data
.completion_queue
, skb
);
3105 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
3106 local_irq_restore(flags
);
3108 EXPORT_SYMBOL(__dev_kfree_skb_irq
);
3110 void __dev_kfree_skb_any(struct sk_buff
*skb
, enum skb_free_reason reason
)
3112 if (in_hardirq() || irqs_disabled())
3113 __dev_kfree_skb_irq(skb
, reason
);
3117 EXPORT_SYMBOL(__dev_kfree_skb_any
);
3121 * netif_device_detach - mark device as removed
3122 * @dev: network device
3124 * Mark device as removed from system and therefore no longer available.
3126 void netif_device_detach(struct net_device
*dev
)
3128 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
3129 netif_running(dev
)) {
3130 netif_tx_stop_all_queues(dev
);
3133 EXPORT_SYMBOL(netif_device_detach
);
3136 * netif_device_attach - mark device as attached
3137 * @dev: network device
3139 * Mark device as attached from system and restart if needed.
3141 void netif_device_attach(struct net_device
*dev
)
3143 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
3144 netif_running(dev
)) {
3145 netif_tx_wake_all_queues(dev
);
3146 __netdev_watchdog_up(dev
);
3149 EXPORT_SYMBOL(netif_device_attach
);
3152 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3153 * to be used as a distribution range.
3155 static u16
skb_tx_hash(const struct net_device
*dev
,
3156 const struct net_device
*sb_dev
,
3157 struct sk_buff
*skb
)
3161 u16 qcount
= dev
->real_num_tx_queues
;
3164 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
3166 qoffset
= sb_dev
->tc_to_txq
[tc
].offset
;
3167 qcount
= sb_dev
->tc_to_txq
[tc
].count
;
3168 if (unlikely(!qcount
)) {
3169 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
3170 sb_dev
->name
, qoffset
, tc
);
3172 qcount
= dev
->real_num_tx_queues
;
3176 if (skb_rx_queue_recorded(skb
)) {
3177 hash
= skb_get_rx_queue(skb
);
3178 if (hash
>= qoffset
)
3180 while (unlikely(hash
>= qcount
))
3182 return hash
+ qoffset
;
3185 return (u16
) reciprocal_scale(skb_get_hash(skb
), qcount
) + qoffset
;
3188 static void skb_warn_bad_offload(const struct sk_buff
*skb
)
3190 static const netdev_features_t null_features
;
3191 struct net_device
*dev
= skb
->dev
;
3192 const char *name
= "";
3194 if (!net_ratelimit())
3198 if (dev
->dev
.parent
)
3199 name
= dev_driver_string(dev
->dev
.parent
);
3201 name
= netdev_name(dev
);
3203 skb_dump(KERN_WARNING
, skb
, false);
3204 WARN(1, "%s: caps=(%pNF, %pNF)\n",
3205 name
, dev
? &dev
->features
: &null_features
,
3206 skb
->sk
? &skb
->sk
->sk_route_caps
: &null_features
);
3210 * Invalidate hardware checksum when packet is to be mangled, and
3211 * complete checksum manually on outgoing path.
3213 int skb_checksum_help(struct sk_buff
*skb
)
3216 int ret
= 0, offset
;
3218 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
3219 goto out_set_summed
;
3221 if (unlikely(skb_is_gso(skb
))) {
3222 skb_warn_bad_offload(skb
);
3226 /* Before computing a checksum, we should make sure no frag could
3227 * be modified by an external entity : checksum could be wrong.
3229 if (skb_has_shared_frag(skb
)) {
3230 ret
= __skb_linearize(skb
);
3235 offset
= skb_checksum_start_offset(skb
);
3236 BUG_ON(offset
>= skb_headlen(skb
));
3237 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
3239 offset
+= skb
->csum_offset
;
3240 BUG_ON(offset
+ sizeof(__sum16
) > skb_headlen(skb
));
3242 ret
= skb_ensure_writable(skb
, offset
+ sizeof(__sum16
));
3246 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
) ?: CSUM_MANGLED_0
;
3248 skb
->ip_summed
= CHECKSUM_NONE
;
3252 EXPORT_SYMBOL(skb_checksum_help
);
3254 int skb_crc32c_csum_help(struct sk_buff
*skb
)
3257 int ret
= 0, offset
, start
;
3259 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3262 if (unlikely(skb_is_gso(skb
)))
3265 /* Before computing a checksum, we should make sure no frag could
3266 * be modified by an external entity : checksum could be wrong.
3268 if (unlikely(skb_has_shared_frag(skb
))) {
3269 ret
= __skb_linearize(skb
);
3273 start
= skb_checksum_start_offset(skb
);
3274 offset
= start
+ offsetof(struct sctphdr
, checksum
);
3275 if (WARN_ON_ONCE(offset
>= skb_headlen(skb
))) {
3280 ret
= skb_ensure_writable(skb
, offset
+ sizeof(__le32
));
3284 crc32c_csum
= cpu_to_le32(~__skb_checksum(skb
, start
,
3285 skb
->len
- start
, ~(__u32
)0,
3287 *(__le32
*)(skb
->data
+ offset
) = crc32c_csum
;
3288 skb
->ip_summed
= CHECKSUM_NONE
;
3289 skb
->csum_not_inet
= 0;
3294 __be16
skb_network_protocol(struct sk_buff
*skb
, int *depth
)
3296 __be16 type
= skb
->protocol
;
3298 /* Tunnel gso handlers can set protocol to ethernet. */
3299 if (type
== htons(ETH_P_TEB
)) {
3302 if (unlikely(!pskb_may_pull(skb
, sizeof(struct ethhdr
))))
3305 eth
= (struct ethhdr
*)skb
->data
;
3306 type
= eth
->h_proto
;
3309 return __vlan_get_protocol(skb
, type
, depth
);
3313 * skb_mac_gso_segment - mac layer segmentation handler.
3314 * @skb: buffer to segment
3315 * @features: features for the output path (see dev->features)
3317 struct sk_buff
*skb_mac_gso_segment(struct sk_buff
*skb
,
3318 netdev_features_t features
)
3320 struct sk_buff
*segs
= ERR_PTR(-EPROTONOSUPPORT
);
3321 struct packet_offload
*ptype
;
3322 int vlan_depth
= skb
->mac_len
;
3323 __be16 type
= skb_network_protocol(skb
, &vlan_depth
);
3325 if (unlikely(!type
))
3326 return ERR_PTR(-EINVAL
);
3328 __skb_pull(skb
, vlan_depth
);
3331 list_for_each_entry_rcu(ptype
, &offload_base
, list
) {
3332 if (ptype
->type
== type
&& ptype
->callbacks
.gso_segment
) {
3333 segs
= ptype
->callbacks
.gso_segment(skb
, features
);
3339 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
3343 EXPORT_SYMBOL(skb_mac_gso_segment
);
3346 /* openvswitch calls this on rx path, so we need a different check.
3348 static inline bool skb_needs_check(struct sk_buff
*skb
, bool tx_path
)
3351 return skb
->ip_summed
!= CHECKSUM_PARTIAL
&&
3352 skb
->ip_summed
!= CHECKSUM_UNNECESSARY
;
3354 return skb
->ip_summed
== CHECKSUM_NONE
;
3358 * __skb_gso_segment - Perform segmentation on skb.
3359 * @skb: buffer to segment
3360 * @features: features for the output path (see dev->features)
3361 * @tx_path: whether it is called in TX path
3363 * This function segments the given skb and returns a list of segments.
3365 * It may return NULL if the skb requires no segmentation. This is
3366 * only possible when GSO is used for verifying header integrity.
3368 * Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.
3370 struct sk_buff
*__skb_gso_segment(struct sk_buff
*skb
,
3371 netdev_features_t features
, bool tx_path
)
3373 struct sk_buff
*segs
;
3375 if (unlikely(skb_needs_check(skb
, tx_path
))) {
3378 /* We're going to init ->check field in TCP or UDP header */
3379 err
= skb_cow_head(skb
, 0);
3381 return ERR_PTR(err
);
3384 /* Only report GSO partial support if it will enable us to
3385 * support segmentation on this frame without needing additional
3388 if (features
& NETIF_F_GSO_PARTIAL
) {
3389 netdev_features_t partial_features
= NETIF_F_GSO_ROBUST
;
3390 struct net_device
*dev
= skb
->dev
;
3392 partial_features
|= dev
->features
& dev
->gso_partial_features
;
3393 if (!skb_gso_ok(skb
, features
| partial_features
))
3394 features
&= ~NETIF_F_GSO_PARTIAL
;
3397 BUILD_BUG_ON(SKB_GSO_CB_OFFSET
+
3398 sizeof(*SKB_GSO_CB(skb
)) > sizeof(skb
->cb
));
3400 SKB_GSO_CB(skb
)->mac_offset
= skb_headroom(skb
);
3401 SKB_GSO_CB(skb
)->encap_level
= 0;
3403 skb_reset_mac_header(skb
);
3404 skb_reset_mac_len(skb
);
3406 segs
= skb_mac_gso_segment(skb
, features
);
3408 if (segs
!= skb
&& unlikely(skb_needs_check(skb
, tx_path
) && !IS_ERR(segs
)))
3409 skb_warn_bad_offload(skb
);
3413 EXPORT_SYMBOL(__skb_gso_segment
);
3415 /* Take action when hardware reception checksum errors are detected. */
3417 static void do_netdev_rx_csum_fault(struct net_device
*dev
, struct sk_buff
*skb
)
3419 pr_err("%s: hw csum failure\n", dev
? dev
->name
: "<unknown>");
3420 skb_dump(KERN_ERR
, skb
, true);
3424 void netdev_rx_csum_fault(struct net_device
*dev
, struct sk_buff
*skb
)
3426 DO_ONCE_LITE(do_netdev_rx_csum_fault
, dev
, skb
);
3428 EXPORT_SYMBOL(netdev_rx_csum_fault
);
3431 /* XXX: check that highmem exists at all on the given machine. */
3432 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
3434 #ifdef CONFIG_HIGHMEM
3437 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
3438 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3439 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3441 if (PageHighMem(skb_frag_page(frag
)))
3449 /* If MPLS offload request, verify we are testing hardware MPLS features
3450 * instead of standard features for the netdev.
3452 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3453 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
3454 netdev_features_t features
,
3457 if (eth_p_mpls(type
))
3458 features
&= skb
->dev
->mpls_features
;
3463 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
3464 netdev_features_t features
,
3471 static netdev_features_t
harmonize_features(struct sk_buff
*skb
,
3472 netdev_features_t features
)
3476 type
= skb_network_protocol(skb
, NULL
);
3477 features
= net_mpls_features(skb
, features
, type
);
3479 if (skb
->ip_summed
!= CHECKSUM_NONE
&&
3480 !can_checksum_protocol(features
, type
)) {
3481 features
&= ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
3483 if (illegal_highdma(skb
->dev
, skb
))
3484 features
&= ~NETIF_F_SG
;
3489 netdev_features_t
passthru_features_check(struct sk_buff
*skb
,
3490 struct net_device
*dev
,
3491 netdev_features_t features
)
3495 EXPORT_SYMBOL(passthru_features_check
);
3497 static netdev_features_t
dflt_features_check(struct sk_buff
*skb
,
3498 struct net_device
*dev
,
3499 netdev_features_t features
)
3501 return vlan_features_check(skb
, features
);
3504 static netdev_features_t
gso_features_check(const struct sk_buff
*skb
,
3505 struct net_device
*dev
,
3506 netdev_features_t features
)
3508 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
;
3510 if (gso_segs
> dev
->gso_max_segs
)
3511 return features
& ~NETIF_F_GSO_MASK
;
3513 if (!skb_shinfo(skb
)->gso_type
) {
3514 skb_warn_bad_offload(skb
);
3515 return features
& ~NETIF_F_GSO_MASK
;
3518 /* Support for GSO partial features requires software
3519 * intervention before we can actually process the packets
3520 * so we need to strip support for any partial features now
3521 * and we can pull them back in after we have partially
3522 * segmented the frame.
3524 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
))
3525 features
&= ~dev
->gso_partial_features
;
3527 /* Make sure to clear the IPv4 ID mangling feature if the
3528 * IPv4 header has the potential to be fragmented.
3530 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
3531 struct iphdr
*iph
= skb
->encapsulation
?
3532 inner_ip_hdr(skb
) : ip_hdr(skb
);
3534 if (!(iph
->frag_off
& htons(IP_DF
)))
3535 features
&= ~NETIF_F_TSO_MANGLEID
;
3541 netdev_features_t
netif_skb_features(struct sk_buff
*skb
)
3543 struct net_device
*dev
= skb
->dev
;
3544 netdev_features_t features
= dev
->features
;
3546 if (skb_is_gso(skb
))
3547 features
= gso_features_check(skb
, dev
, features
);
3549 /* If encapsulation offload request, verify we are testing
3550 * hardware encapsulation features instead of standard
3551 * features for the netdev
3553 if (skb
->encapsulation
)
3554 features
&= dev
->hw_enc_features
;
3556 if (skb_vlan_tagged(skb
))
3557 features
= netdev_intersect_features(features
,
3558 dev
->vlan_features
|
3559 NETIF_F_HW_VLAN_CTAG_TX
|
3560 NETIF_F_HW_VLAN_STAG_TX
);
3562 if (dev
->netdev_ops
->ndo_features_check
)
3563 features
&= dev
->netdev_ops
->ndo_features_check(skb
, dev
,
3566 features
&= dflt_features_check(skb
, dev
, features
);
3568 return harmonize_features(skb
, features
);
3570 EXPORT_SYMBOL(netif_skb_features
);
3572 static int xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
3573 struct netdev_queue
*txq
, bool more
)
3578 if (dev_nit_active(dev
))
3579 dev_queue_xmit_nit(skb
, dev
);
3582 PRANDOM_ADD_NOISE(skb
, dev
, txq
, len
+ jiffies
);
3583 trace_net_dev_start_xmit(skb
, dev
);
3584 rc
= netdev_start_xmit(skb
, dev
, txq
, more
);
3585 trace_net_dev_xmit(skb
, rc
, dev
, len
);
3590 struct sk_buff
*dev_hard_start_xmit(struct sk_buff
*first
, struct net_device
*dev
,
3591 struct netdev_queue
*txq
, int *ret
)
3593 struct sk_buff
*skb
= first
;
3594 int rc
= NETDEV_TX_OK
;
3597 struct sk_buff
*next
= skb
->next
;
3599 skb_mark_not_on_list(skb
);
3600 rc
= xmit_one(skb
, dev
, txq
, next
!= NULL
);
3601 if (unlikely(!dev_xmit_complete(rc
))) {
3607 if (netif_tx_queue_stopped(txq
) && skb
) {
3608 rc
= NETDEV_TX_BUSY
;
3618 static struct sk_buff
*validate_xmit_vlan(struct sk_buff
*skb
,
3619 netdev_features_t features
)
3621 if (skb_vlan_tag_present(skb
) &&
3622 !vlan_hw_offload_capable(features
, skb
->vlan_proto
))
3623 skb
= __vlan_hwaccel_push_inside(skb
);
3627 int skb_csum_hwoffload_help(struct sk_buff
*skb
,
3628 const netdev_features_t features
)
3630 if (unlikely(skb_csum_is_sctp(skb
)))
3631 return !!(features
& NETIF_F_SCTP_CRC
) ? 0 :
3632 skb_crc32c_csum_help(skb
);
3634 if (features
& NETIF_F_HW_CSUM
)
3637 if (features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)) {
3638 switch (skb
->csum_offset
) {
3639 case offsetof(struct tcphdr
, check
):
3640 case offsetof(struct udphdr
, check
):
3645 return skb_checksum_help(skb
);
3647 EXPORT_SYMBOL(skb_csum_hwoffload_help
);
3649 static struct sk_buff
*validate_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3651 netdev_features_t features
;
3653 features
= netif_skb_features(skb
);
3654 skb
= validate_xmit_vlan(skb
, features
);
3658 skb
= sk_validate_xmit_skb(skb
, dev
);
3662 if (netif_needs_gso(skb
, features
)) {
3663 struct sk_buff
*segs
;
3665 segs
= skb_gso_segment(skb
, features
);
3673 if (skb_needs_linearize(skb
, features
) &&
3674 __skb_linearize(skb
))
3677 /* If packet is not checksummed and device does not
3678 * support checksumming for this protocol, complete
3679 * checksumming here.
3681 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3682 if (skb
->encapsulation
)
3683 skb_set_inner_transport_header(skb
,
3684 skb_checksum_start_offset(skb
));
3686 skb_set_transport_header(skb
,
3687 skb_checksum_start_offset(skb
));
3688 if (skb_csum_hwoffload_help(skb
, features
))
3693 skb
= validate_xmit_xfrm(skb
, features
, again
);
3700 atomic_long_inc(&dev
->tx_dropped
);
3704 struct sk_buff
*validate_xmit_skb_list(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3706 struct sk_buff
*next
, *head
= NULL
, *tail
;
3708 for (; skb
!= NULL
; skb
= next
) {
3710 skb_mark_not_on_list(skb
);
3712 /* in case skb wont be segmented, point to itself */
3715 skb
= validate_xmit_skb(skb
, dev
, again
);
3723 /* If skb was segmented, skb->prev points to
3724 * the last segment. If not, it still contains skb.
3730 EXPORT_SYMBOL_GPL(validate_xmit_skb_list
);
3732 static void qdisc_pkt_len_init(struct sk_buff
*skb
)
3734 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
3736 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
3738 /* To get more precise estimation of bytes sent on wire,
3739 * we add to pkt_len the headers size of all segments
3741 if (shinfo
->gso_size
&& skb_transport_header_was_set(skb
)) {
3742 unsigned int hdr_len
;
3743 u16 gso_segs
= shinfo
->gso_segs
;
3745 /* mac layer + network layer */
3746 hdr_len
= skb_transport_header(skb
) - skb_mac_header(skb
);
3748 /* + transport layer */
3749 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))) {
3750 const struct tcphdr
*th
;
3751 struct tcphdr _tcphdr
;
3753 th
= skb_header_pointer(skb
, skb_transport_offset(skb
),
3754 sizeof(_tcphdr
), &_tcphdr
);
3756 hdr_len
+= __tcp_hdrlen(th
);
3758 struct udphdr _udphdr
;
3760 if (skb_header_pointer(skb
, skb_transport_offset(skb
),
3761 sizeof(_udphdr
), &_udphdr
))
3762 hdr_len
+= sizeof(struct udphdr
);
3765 if (shinfo
->gso_type
& SKB_GSO_DODGY
)
3766 gso_segs
= DIV_ROUND_UP(skb
->len
- hdr_len
,
3769 qdisc_skb_cb(skb
)->pkt_len
+= (gso_segs
- 1) * hdr_len
;
3773 static int dev_qdisc_enqueue(struct sk_buff
*skb
, struct Qdisc
*q
,
3774 struct sk_buff
**to_free
,
3775 struct netdev_queue
*txq
)
3779 rc
= q
->enqueue(skb
, q
, to_free
) & NET_XMIT_MASK
;
3780 if (rc
== NET_XMIT_SUCCESS
)
3781 trace_qdisc_enqueue(q
, txq
, skb
);
3785 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
3786 struct net_device
*dev
,
3787 struct netdev_queue
*txq
)
3789 spinlock_t
*root_lock
= qdisc_lock(q
);
3790 struct sk_buff
*to_free
= NULL
;
3794 qdisc_calculate_pkt_len(skb
, q
);
3796 if (q
->flags
& TCQ_F_NOLOCK
) {
3797 if (q
->flags
& TCQ_F_CAN_BYPASS
&& nolock_qdisc_is_empty(q
) &&
3798 qdisc_run_begin(q
)) {
3799 /* Retest nolock_qdisc_is_empty() within the protection
3800 * of q->seqlock to protect from racing with requeuing.
3802 if (unlikely(!nolock_qdisc_is_empty(q
))) {
3803 rc
= dev_qdisc_enqueue(skb
, q
, &to_free
, txq
);
3810 qdisc_bstats_cpu_update(q
, skb
);
3811 if (sch_direct_xmit(skb
, q
, dev
, txq
, NULL
, true) &&
3812 !nolock_qdisc_is_empty(q
))
3816 return NET_XMIT_SUCCESS
;
3819 rc
= dev_qdisc_enqueue(skb
, q
, &to_free
, txq
);
3823 if (unlikely(to_free
))
3824 kfree_skb_list(to_free
);
3829 * Heuristic to force contended enqueues to serialize on a
3830 * separate lock before trying to get qdisc main lock.
3831 * This permits qdisc->running owner to get the lock more
3832 * often and dequeue packets faster.
3834 contended
= qdisc_is_running(q
);
3835 if (unlikely(contended
))
3836 spin_lock(&q
->busylock
);
3838 spin_lock(root_lock
);
3839 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
3840 __qdisc_drop(skb
, &to_free
);
3842 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
3843 qdisc_run_begin(q
)) {
3845 * This is a work-conserving queue; there are no old skbs
3846 * waiting to be sent out; and the qdisc is not running -
3847 * xmit the skb directly.
3850 qdisc_bstats_update(q
, skb
);
3852 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, true)) {
3853 if (unlikely(contended
)) {
3854 spin_unlock(&q
->busylock
);
3861 rc
= NET_XMIT_SUCCESS
;
3863 rc
= dev_qdisc_enqueue(skb
, q
, &to_free
, txq
);
3864 if (qdisc_run_begin(q
)) {
3865 if (unlikely(contended
)) {
3866 spin_unlock(&q
->busylock
);
3873 spin_unlock(root_lock
);
3874 if (unlikely(to_free
))
3875 kfree_skb_list(to_free
);
3876 if (unlikely(contended
))
3877 spin_unlock(&q
->busylock
);
3881 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3882 static void skb_update_prio(struct sk_buff
*skb
)
3884 const struct netprio_map
*map
;
3885 const struct sock
*sk
;
3886 unsigned int prioidx
;
3890 map
= rcu_dereference_bh(skb
->dev
->priomap
);
3893 sk
= skb_to_full_sk(skb
);
3897 prioidx
= sock_cgroup_prioidx(&sk
->sk_cgrp_data
);
3899 if (prioidx
< map
->priomap_len
)
3900 skb
->priority
= map
->priomap
[prioidx
];
3903 #define skb_update_prio(skb)
3907 * dev_loopback_xmit - loop back @skb
3908 * @net: network namespace this loopback is happening in
3909 * @sk: sk needed to be a netfilter okfn
3910 * @skb: buffer to transmit
3912 int dev_loopback_xmit(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
3914 skb_reset_mac_header(skb
);
3915 __skb_pull(skb
, skb_network_offset(skb
));
3916 skb
->pkt_type
= PACKET_LOOPBACK
;
3917 if (skb
->ip_summed
== CHECKSUM_NONE
)
3918 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3919 WARN_ON(!skb_dst(skb
));
3924 EXPORT_SYMBOL(dev_loopback_xmit
);
3926 #ifdef CONFIG_NET_EGRESS
3927 static struct sk_buff
*
3928 sch_handle_egress(struct sk_buff
*skb
, int *ret
, struct net_device
*dev
)
3930 struct mini_Qdisc
*miniq
= rcu_dereference_bh(dev
->miniq_egress
);
3931 struct tcf_result cl_res
;
3936 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3937 tc_skb_cb(skb
)->mru
= 0;
3938 tc_skb_cb(skb
)->post_ct
= false;
3939 mini_qdisc_bstats_cpu_update(miniq
, skb
);
3941 switch (tcf_classify(skb
, miniq
->block
, miniq
->filter_list
, &cl_res
, false)) {
3943 case TC_ACT_RECLASSIFY
:
3944 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
3947 mini_qdisc_qstats_cpu_drop(miniq
);
3948 *ret
= NET_XMIT_DROP
;
3954 *ret
= NET_XMIT_SUCCESS
;
3957 case TC_ACT_REDIRECT
:
3958 /* No need to push/pop skb's mac_header here on egress! */
3959 skb_do_redirect(skb
);
3960 *ret
= NET_XMIT_SUCCESS
;
3968 #endif /* CONFIG_NET_EGRESS */
3971 static int __get_xps_queue_idx(struct net_device
*dev
, struct sk_buff
*skb
,
3972 struct xps_dev_maps
*dev_maps
, unsigned int tci
)
3974 int tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
3975 struct xps_map
*map
;
3976 int queue_index
= -1;
3978 if (tc
>= dev_maps
->num_tc
|| tci
>= dev_maps
->nr_ids
)
3981 tci
*= dev_maps
->num_tc
;
3984 map
= rcu_dereference(dev_maps
->attr_map
[tci
]);
3987 queue_index
= map
->queues
[0];
3989 queue_index
= map
->queues
[reciprocal_scale(
3990 skb_get_hash(skb
), map
->len
)];
3991 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
3998 static int get_xps_queue(struct net_device
*dev
, struct net_device
*sb_dev
,
3999 struct sk_buff
*skb
)
4002 struct xps_dev_maps
*dev_maps
;
4003 struct sock
*sk
= skb
->sk
;
4004 int queue_index
= -1;
4006 if (!static_key_false(&xps_needed
))
4010 if (!static_key_false(&xps_rxqs_needed
))
4013 dev_maps
= rcu_dereference(sb_dev
->xps_maps
[XPS_RXQS
]);
4015 int tci
= sk_rx_queue_get(sk
);
4018 queue_index
= __get_xps_queue_idx(dev
, skb
, dev_maps
,
4023 if (queue_index
< 0) {
4024 dev_maps
= rcu_dereference(sb_dev
->xps_maps
[XPS_CPUS
]);
4026 unsigned int tci
= skb
->sender_cpu
- 1;
4028 queue_index
= __get_xps_queue_idx(dev
, skb
, dev_maps
,
4040 u16
dev_pick_tx_zero(struct net_device
*dev
, struct sk_buff
*skb
,
4041 struct net_device
*sb_dev
)
4045 EXPORT_SYMBOL(dev_pick_tx_zero
);
4047 u16
dev_pick_tx_cpu_id(struct net_device
*dev
, struct sk_buff
*skb
,
4048 struct net_device
*sb_dev
)
4050 return (u16
)raw_smp_processor_id() % dev
->real_num_tx_queues
;
4052 EXPORT_SYMBOL(dev_pick_tx_cpu_id
);
4054 u16
netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
,
4055 struct net_device
*sb_dev
)
4057 struct sock
*sk
= skb
->sk
;
4058 int queue_index
= sk_tx_queue_get(sk
);
4060 sb_dev
= sb_dev
? : dev
;
4062 if (queue_index
< 0 || skb
->ooo_okay
||
4063 queue_index
>= dev
->real_num_tx_queues
) {
4064 int new_index
= get_xps_queue(dev
, sb_dev
, skb
);
4067 new_index
= skb_tx_hash(dev
, sb_dev
, skb
);
4069 if (queue_index
!= new_index
&& sk
&&
4071 rcu_access_pointer(sk
->sk_dst_cache
))
4072 sk_tx_queue_set(sk
, new_index
);
4074 queue_index
= new_index
;
4079 EXPORT_SYMBOL(netdev_pick_tx
);
4081 struct netdev_queue
*netdev_core_pick_tx(struct net_device
*dev
,
4082 struct sk_buff
*skb
,
4083 struct net_device
*sb_dev
)
4085 int queue_index
= 0;
4088 u32 sender_cpu
= skb
->sender_cpu
- 1;
4090 if (sender_cpu
>= (u32
)NR_CPUS
)
4091 skb
->sender_cpu
= raw_smp_processor_id() + 1;
4094 if (dev
->real_num_tx_queues
!= 1) {
4095 const struct net_device_ops
*ops
= dev
->netdev_ops
;
4097 if (ops
->ndo_select_queue
)
4098 queue_index
= ops
->ndo_select_queue(dev
, skb
, sb_dev
);
4100 queue_index
= netdev_pick_tx(dev
, skb
, sb_dev
);
4102 queue_index
= netdev_cap_txqueue(dev
, queue_index
);
4105 skb_set_queue_mapping(skb
, queue_index
);
4106 return netdev_get_tx_queue(dev
, queue_index
);
4110 * __dev_queue_xmit - transmit a buffer
4111 * @skb: buffer to transmit
4112 * @sb_dev: suboordinate device used for L2 forwarding offload
4114 * Queue a buffer for transmission to a network device. The caller must
4115 * have set the device and priority and built the buffer before calling
4116 * this function. The function can be called from an interrupt.
4118 * A negative errno code is returned on a failure. A success does not
4119 * guarantee the frame will be transmitted as it may be dropped due
4120 * to congestion or traffic shaping.
4122 * -----------------------------------------------------------------------------------
4123 * I notice this method can also return errors from the queue disciplines,
4124 * including NET_XMIT_DROP, which is a positive value. So, errors can also
4127 * Regardless of the return value, the skb is consumed, so it is currently
4128 * difficult to retry a send to this method. (You can bump the ref count
4129 * before sending to hold a reference for retry if you are careful.)
4131 * When calling this method, interrupts MUST be enabled. This is because
4132 * the BH enable code must have IRQs enabled so that it will not deadlock.
4135 static int __dev_queue_xmit(struct sk_buff
*skb
, struct net_device
*sb_dev
)
4137 struct net_device
*dev
= skb
->dev
;
4138 struct netdev_queue
*txq
;
4143 skb_reset_mac_header(skb
);
4145 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_SCHED_TSTAMP
))
4146 __skb_tstamp_tx(skb
, NULL
, NULL
, skb
->sk
, SCM_TSTAMP_SCHED
);
4148 /* Disable soft irqs for various locks below. Also
4149 * stops preemption for RCU.
4153 skb_update_prio(skb
);
4155 qdisc_pkt_len_init(skb
);
4156 #ifdef CONFIG_NET_CLS_ACT
4157 skb
->tc_at_ingress
= 0;
4158 # ifdef CONFIG_NET_EGRESS
4159 if (static_branch_unlikely(&egress_needed_key
)) {
4160 skb
= sch_handle_egress(skb
, &rc
, dev
);
4166 /* If device/qdisc don't need skb->dst, release it right now while
4167 * its hot in this cpu cache.
4169 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
4174 txq
= netdev_core_pick_tx(dev
, skb
, sb_dev
);
4175 q
= rcu_dereference_bh(txq
->qdisc
);
4177 trace_net_dev_queue(skb
);
4179 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
4183 /* The device has no queue. Common case for software devices:
4184 * loopback, all the sorts of tunnels...
4186 * Really, it is unlikely that netif_tx_lock protection is necessary
4187 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
4189 * However, it is possible, that they rely on protection
4192 * Check this and shot the lock. It is not prone from deadlocks.
4193 *Either shot noqueue qdisc, it is even simpler 8)
4195 if (dev
->flags
& IFF_UP
) {
4196 int cpu
= smp_processor_id(); /* ok because BHs are off */
4198 /* Other cpus might concurrently change txq->xmit_lock_owner
4199 * to -1 or to their cpu id, but not to our id.
4201 if (READ_ONCE(txq
->xmit_lock_owner
) != cpu
) {
4202 if (dev_xmit_recursion())
4203 goto recursion_alert
;
4205 skb
= validate_xmit_skb(skb
, dev
, &again
);
4209 PRANDOM_ADD_NOISE(skb
, dev
, txq
, jiffies
);
4210 HARD_TX_LOCK(dev
, txq
, cpu
);
4212 if (!netif_xmit_stopped(txq
)) {
4213 dev_xmit_recursion_inc();
4214 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &rc
);
4215 dev_xmit_recursion_dec();
4216 if (dev_xmit_complete(rc
)) {
4217 HARD_TX_UNLOCK(dev
, txq
);
4221 HARD_TX_UNLOCK(dev
, txq
);
4222 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4225 /* Recursion is detected! It is possible,
4229 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4235 rcu_read_unlock_bh();
4237 atomic_long_inc(&dev
->tx_dropped
);
4238 kfree_skb_list(skb
);
4241 rcu_read_unlock_bh();
4245 int dev_queue_xmit(struct sk_buff
*skb
)
4247 return __dev_queue_xmit(skb
, NULL
);
4249 EXPORT_SYMBOL(dev_queue_xmit
);
4251 int dev_queue_xmit_accel(struct sk_buff
*skb
, struct net_device
*sb_dev
)
4253 return __dev_queue_xmit(skb
, sb_dev
);
4255 EXPORT_SYMBOL(dev_queue_xmit_accel
);
4257 int __dev_direct_xmit(struct sk_buff
*skb
, u16 queue_id
)
4259 struct net_device
*dev
= skb
->dev
;
4260 struct sk_buff
*orig_skb
= skb
;
4261 struct netdev_queue
*txq
;
4262 int ret
= NETDEV_TX_BUSY
;
4265 if (unlikely(!netif_running(dev
) ||
4266 !netif_carrier_ok(dev
)))
4269 skb
= validate_xmit_skb_list(skb
, dev
, &again
);
4270 if (skb
!= orig_skb
)
4273 skb_set_queue_mapping(skb
, queue_id
);
4274 txq
= skb_get_tx_queue(dev
, skb
);
4275 PRANDOM_ADD_NOISE(skb
, dev
, txq
, jiffies
);
4279 dev_xmit_recursion_inc();
4280 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
4281 if (!netif_xmit_frozen_or_drv_stopped(txq
))
4282 ret
= netdev_start_xmit(skb
, dev
, txq
, false);
4283 HARD_TX_UNLOCK(dev
, txq
);
4284 dev_xmit_recursion_dec();
4289 atomic_long_inc(&dev
->tx_dropped
);
4290 kfree_skb_list(skb
);
4291 return NET_XMIT_DROP
;
4293 EXPORT_SYMBOL(__dev_direct_xmit
);
4295 /*************************************************************************
4297 *************************************************************************/
4299 int netdev_max_backlog __read_mostly
= 1000;
4300 EXPORT_SYMBOL(netdev_max_backlog
);
4302 int netdev_tstamp_prequeue __read_mostly
= 1;
4303 int netdev_budget __read_mostly
= 300;
4304 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
4305 unsigned int __read_mostly netdev_budget_usecs
= 2 * USEC_PER_SEC
/ HZ
;
4306 int weight_p __read_mostly
= 64; /* old backlog weight */
4307 int dev_weight_rx_bias __read_mostly
= 1; /* bias for backlog weight */
4308 int dev_weight_tx_bias __read_mostly
= 1; /* bias for output_queue quota */
4309 int dev_rx_weight __read_mostly
= 64;
4310 int dev_tx_weight __read_mostly
= 64;
4311 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
4312 int gro_normal_batch __read_mostly
= 8;
4314 /* Called with irq disabled */
4315 static inline void ____napi_schedule(struct softnet_data
*sd
,
4316 struct napi_struct
*napi
)
4318 struct task_struct
*thread
;
4320 if (test_bit(NAPI_STATE_THREADED
, &napi
->state
)) {
4321 /* Paired with smp_mb__before_atomic() in
4322 * napi_enable()/dev_set_threaded().
4323 * Use READ_ONCE() to guarantee a complete
4324 * read on napi->thread. Only call
4325 * wake_up_process() when it's not NULL.
4327 thread
= READ_ONCE(napi
->thread
);
4329 /* Avoid doing set_bit() if the thread is in
4330 * INTERRUPTIBLE state, cause napi_thread_wait()
4331 * makes sure to proceed with napi polling
4332 * if the thread is explicitly woken from here.
4334 if (READ_ONCE(thread
->__state
) != TASK_INTERRUPTIBLE
)
4335 set_bit(NAPI_STATE_SCHED_THREADED
, &napi
->state
);
4336 wake_up_process(thread
);
4341 list_add_tail(&napi
->poll_list
, &sd
->poll_list
);
4342 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4347 /* One global table that all flow-based protocols share. */
4348 struct rps_sock_flow_table __rcu
*rps_sock_flow_table __read_mostly
;
4349 EXPORT_SYMBOL(rps_sock_flow_table
);
4350 u32 rps_cpu_mask __read_mostly
;
4351 EXPORT_SYMBOL(rps_cpu_mask
);
4353 struct static_key_false rps_needed __read_mostly
;
4354 EXPORT_SYMBOL(rps_needed
);
4355 struct static_key_false rfs_needed __read_mostly
;
4356 EXPORT_SYMBOL(rfs_needed
);
4358 static struct rps_dev_flow
*
4359 set_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
4360 struct rps_dev_flow
*rflow
, u16 next_cpu
)
4362 if (next_cpu
< nr_cpu_ids
) {
4363 #ifdef CONFIG_RFS_ACCEL
4364 struct netdev_rx_queue
*rxqueue
;
4365 struct rps_dev_flow_table
*flow_table
;
4366 struct rps_dev_flow
*old_rflow
;
4371 /* Should we steer this flow to a different hardware queue? */
4372 if (!skb_rx_queue_recorded(skb
) || !dev
->rx_cpu_rmap
||
4373 !(dev
->features
& NETIF_F_NTUPLE
))
4375 rxq_index
= cpu_rmap_lookup_index(dev
->rx_cpu_rmap
, next_cpu
);
4376 if (rxq_index
== skb_get_rx_queue(skb
))
4379 rxqueue
= dev
->_rx
+ rxq_index
;
4380 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4383 flow_id
= skb_get_hash(skb
) & flow_table
->mask
;
4384 rc
= dev
->netdev_ops
->ndo_rx_flow_steer(dev
, skb
,
4385 rxq_index
, flow_id
);
4389 rflow
= &flow_table
->flows
[flow_id
];
4391 if (old_rflow
->filter
== rflow
->filter
)
4392 old_rflow
->filter
= RPS_NO_FILTER
;
4396 per_cpu(softnet_data
, next_cpu
).input_queue_head
;
4399 rflow
->cpu
= next_cpu
;
4404 * get_rps_cpu is called from netif_receive_skb and returns the target
4405 * CPU from the RPS map of the receiving queue for a given skb.
4406 * rcu_read_lock must be held on entry.
4408 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
4409 struct rps_dev_flow
**rflowp
)
4411 const struct rps_sock_flow_table
*sock_flow_table
;
4412 struct netdev_rx_queue
*rxqueue
= dev
->_rx
;
4413 struct rps_dev_flow_table
*flow_table
;
4414 struct rps_map
*map
;
4419 if (skb_rx_queue_recorded(skb
)) {
4420 u16 index
= skb_get_rx_queue(skb
);
4422 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
4423 WARN_ONCE(dev
->real_num_rx_queues
> 1,
4424 "%s received packet on queue %u, but number "
4425 "of RX queues is %u\n",
4426 dev
->name
, index
, dev
->real_num_rx_queues
);
4432 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4434 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4435 map
= rcu_dereference(rxqueue
->rps_map
);
4436 if (!flow_table
&& !map
)
4439 skb_reset_network_header(skb
);
4440 hash
= skb_get_hash(skb
);
4444 sock_flow_table
= rcu_dereference(rps_sock_flow_table
);
4445 if (flow_table
&& sock_flow_table
) {
4446 struct rps_dev_flow
*rflow
;
4450 /* First check into global flow table if there is a match */
4451 ident
= sock_flow_table
->ents
[hash
& sock_flow_table
->mask
];
4452 if ((ident
^ hash
) & ~rps_cpu_mask
)
4455 next_cpu
= ident
& rps_cpu_mask
;
4457 /* OK, now we know there is a match,
4458 * we can look at the local (per receive queue) flow table
4460 rflow
= &flow_table
->flows
[hash
& flow_table
->mask
];
4464 * If the desired CPU (where last recvmsg was done) is
4465 * different from current CPU (one in the rx-queue flow
4466 * table entry), switch if one of the following holds:
4467 * - Current CPU is unset (>= nr_cpu_ids).
4468 * - Current CPU is offline.
4469 * - The current CPU's queue tail has advanced beyond the
4470 * last packet that was enqueued using this table entry.
4471 * This guarantees that all previous packets for the flow
4472 * have been dequeued, thus preserving in order delivery.
4474 if (unlikely(tcpu
!= next_cpu
) &&
4475 (tcpu
>= nr_cpu_ids
|| !cpu_online(tcpu
) ||
4476 ((int)(per_cpu(softnet_data
, tcpu
).input_queue_head
-
4477 rflow
->last_qtail
)) >= 0)) {
4479 rflow
= set_rps_cpu(dev
, skb
, rflow
, next_cpu
);
4482 if (tcpu
< nr_cpu_ids
&& cpu_online(tcpu
)) {
4492 tcpu
= map
->cpus
[reciprocal_scale(hash
, map
->len
)];
4493 if (cpu_online(tcpu
)) {
4503 #ifdef CONFIG_RFS_ACCEL
4506 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4507 * @dev: Device on which the filter was set
4508 * @rxq_index: RX queue index
4509 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4510 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4512 * Drivers that implement ndo_rx_flow_steer() should periodically call
4513 * this function for each installed filter and remove the filters for
4514 * which it returns %true.
4516 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
4517 u32 flow_id
, u16 filter_id
)
4519 struct netdev_rx_queue
*rxqueue
= dev
->_rx
+ rxq_index
;
4520 struct rps_dev_flow_table
*flow_table
;
4521 struct rps_dev_flow
*rflow
;
4526 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4527 if (flow_table
&& flow_id
<= flow_table
->mask
) {
4528 rflow
= &flow_table
->flows
[flow_id
];
4529 cpu
= READ_ONCE(rflow
->cpu
);
4530 if (rflow
->filter
== filter_id
&& cpu
< nr_cpu_ids
&&
4531 ((int)(per_cpu(softnet_data
, cpu
).input_queue_head
-
4532 rflow
->last_qtail
) <
4533 (int)(10 * flow_table
->mask
)))
4539 EXPORT_SYMBOL(rps_may_expire_flow
);
4541 #endif /* CONFIG_RFS_ACCEL */
4543 /* Called from hardirq (IPI) context */
4544 static void rps_trigger_softirq(void *data
)
4546 struct softnet_data
*sd
= data
;
4548 ____napi_schedule(sd
, &sd
->backlog
);
4552 #endif /* CONFIG_RPS */
4555 * Check if this softnet_data structure is another cpu one
4556 * If yes, queue it to our IPI list and return 1
4559 static int rps_ipi_queued(struct softnet_data
*sd
)
4562 struct softnet_data
*mysd
= this_cpu_ptr(&softnet_data
);
4565 sd
->rps_ipi_next
= mysd
->rps_ipi_list
;
4566 mysd
->rps_ipi_list
= sd
;
4568 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4571 #endif /* CONFIG_RPS */
4575 #ifdef CONFIG_NET_FLOW_LIMIT
4576 int netdev_flow_limit_table_len __read_mostly
= (1 << 12);
4579 static bool skb_flow_limit(struct sk_buff
*skb
, unsigned int qlen
)
4581 #ifdef CONFIG_NET_FLOW_LIMIT
4582 struct sd_flow_limit
*fl
;
4583 struct softnet_data
*sd
;
4584 unsigned int old_flow
, new_flow
;
4586 if (qlen
< (netdev_max_backlog
>> 1))
4589 sd
= this_cpu_ptr(&softnet_data
);
4592 fl
= rcu_dereference(sd
->flow_limit
);
4594 new_flow
= skb_get_hash(skb
) & (fl
->num_buckets
- 1);
4595 old_flow
= fl
->history
[fl
->history_head
];
4596 fl
->history
[fl
->history_head
] = new_flow
;
4599 fl
->history_head
&= FLOW_LIMIT_HISTORY
- 1;
4601 if (likely(fl
->buckets
[old_flow
]))
4602 fl
->buckets
[old_flow
]--;
4604 if (++fl
->buckets
[new_flow
] > (FLOW_LIMIT_HISTORY
>> 1)) {
4616 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4617 * queue (may be a remote CPU queue).
4619 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
,
4620 unsigned int *qtail
)
4622 struct softnet_data
*sd
;
4623 unsigned long flags
;
4626 sd
= &per_cpu(softnet_data
, cpu
);
4628 local_irq_save(flags
);
4631 if (!netif_running(skb
->dev
))
4633 qlen
= skb_queue_len(&sd
->input_pkt_queue
);
4634 if (qlen
<= netdev_max_backlog
&& !skb_flow_limit(skb
, qlen
)) {
4637 __skb_queue_tail(&sd
->input_pkt_queue
, skb
);
4638 input_queue_tail_incr_save(sd
, qtail
);
4640 local_irq_restore(flags
);
4641 return NET_RX_SUCCESS
;
4644 /* Schedule NAPI for backlog device
4645 * We can use non atomic operation since we own the queue lock
4647 if (!__test_and_set_bit(NAPI_STATE_SCHED
, &sd
->backlog
.state
)) {
4648 if (!rps_ipi_queued(sd
))
4649 ____napi_schedule(sd
, &sd
->backlog
);
4658 local_irq_restore(flags
);
4660 atomic_long_inc(&skb
->dev
->rx_dropped
);
4665 static struct netdev_rx_queue
*netif_get_rxqueue(struct sk_buff
*skb
)
4667 struct net_device
*dev
= skb
->dev
;
4668 struct netdev_rx_queue
*rxqueue
;
4672 if (skb_rx_queue_recorded(skb
)) {
4673 u16 index
= skb_get_rx_queue(skb
);
4675 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
4676 WARN_ONCE(dev
->real_num_rx_queues
> 1,
4677 "%s received packet on queue %u, but number "
4678 "of RX queues is %u\n",
4679 dev
->name
, index
, dev
->real_num_rx_queues
);
4681 return rxqueue
; /* Return first rxqueue */
4688 u32
bpf_prog_run_generic_xdp(struct sk_buff
*skb
, struct xdp_buff
*xdp
,
4689 struct bpf_prog
*xdp_prog
)
4691 void *orig_data
, *orig_data_end
, *hard_start
;
4692 struct netdev_rx_queue
*rxqueue
;
4693 bool orig_bcast
, orig_host
;
4694 u32 mac_len
, frame_sz
;
4695 __be16 orig_eth_type
;
4700 /* The XDP program wants to see the packet starting at the MAC
4703 mac_len
= skb
->data
- skb_mac_header(skb
);
4704 hard_start
= skb
->data
- skb_headroom(skb
);
4706 /* SKB "head" area always have tailroom for skb_shared_info */
4707 frame_sz
= (void *)skb_end_pointer(skb
) - hard_start
;
4708 frame_sz
+= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
4710 rxqueue
= netif_get_rxqueue(skb
);
4711 xdp_init_buff(xdp
, frame_sz
, &rxqueue
->xdp_rxq
);
4712 xdp_prepare_buff(xdp
, hard_start
, skb_headroom(skb
) - mac_len
,
4713 skb_headlen(skb
) + mac_len
, true);
4715 orig_data_end
= xdp
->data_end
;
4716 orig_data
= xdp
->data
;
4717 eth
= (struct ethhdr
*)xdp
->data
;
4718 orig_host
= ether_addr_equal_64bits(eth
->h_dest
, skb
->dev
->dev_addr
);
4719 orig_bcast
= is_multicast_ether_addr_64bits(eth
->h_dest
);
4720 orig_eth_type
= eth
->h_proto
;
4722 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
4724 /* check if bpf_xdp_adjust_head was used */
4725 off
= xdp
->data
- orig_data
;
4728 __skb_pull(skb
, off
);
4730 __skb_push(skb
, -off
);
4732 skb
->mac_header
+= off
;
4733 skb_reset_network_header(skb
);
4736 /* check if bpf_xdp_adjust_tail was used */
4737 off
= xdp
->data_end
- orig_data_end
;
4739 skb_set_tail_pointer(skb
, xdp
->data_end
- xdp
->data
);
4740 skb
->len
+= off
; /* positive on grow, negative on shrink */
4743 /* check if XDP changed eth hdr such SKB needs update */
4744 eth
= (struct ethhdr
*)xdp
->data
;
4745 if ((orig_eth_type
!= eth
->h_proto
) ||
4746 (orig_host
!= ether_addr_equal_64bits(eth
->h_dest
,
4747 skb
->dev
->dev_addr
)) ||
4748 (orig_bcast
!= is_multicast_ether_addr_64bits(eth
->h_dest
))) {
4749 __skb_push(skb
, ETH_HLEN
);
4750 skb
->pkt_type
= PACKET_HOST
;
4751 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
4754 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
4755 * before calling us again on redirect path. We do not call do_redirect
4756 * as we leave that up to the caller.
4758 * Caller is responsible for managing lifetime of skb (i.e. calling
4759 * kfree_skb in response to actions it cannot handle/XDP_DROP).
4764 __skb_push(skb
, mac_len
);
4767 metalen
= xdp
->data
- xdp
->data_meta
;
4769 skb_metadata_set(skb
, metalen
);
4776 static u32
netif_receive_generic_xdp(struct sk_buff
*skb
,
4777 struct xdp_buff
*xdp
,
4778 struct bpf_prog
*xdp_prog
)
4782 /* Reinjected packets coming from act_mirred or similar should
4783 * not get XDP generic processing.
4785 if (skb_is_redirected(skb
))
4788 /* XDP packets must be linear and must have sufficient headroom
4789 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4790 * native XDP provides, thus we need to do it here as well.
4792 if (skb_cloned(skb
) || skb_is_nonlinear(skb
) ||
4793 skb_headroom(skb
) < XDP_PACKET_HEADROOM
) {
4794 int hroom
= XDP_PACKET_HEADROOM
- skb_headroom(skb
);
4795 int troom
= skb
->tail
+ skb
->data_len
- skb
->end
;
4797 /* In case we have to go down the path and also linearize,
4798 * then lets do the pskb_expand_head() work just once here.
4800 if (pskb_expand_head(skb
,
4801 hroom
> 0 ? ALIGN(hroom
, NET_SKB_PAD
) : 0,
4802 troom
> 0 ? troom
+ 128 : 0, GFP_ATOMIC
))
4804 if (skb_linearize(skb
))
4808 act
= bpf_prog_run_generic_xdp(skb
, xdp
, xdp_prog
);
4815 bpf_warn_invalid_xdp_action(act
);
4818 trace_xdp_exception(skb
->dev
, xdp_prog
, act
);
4829 /* When doing generic XDP we have to bypass the qdisc layer and the
4830 * network taps in order to match in-driver-XDP behavior.
4832 void generic_xdp_tx(struct sk_buff
*skb
, struct bpf_prog
*xdp_prog
)
4834 struct net_device
*dev
= skb
->dev
;
4835 struct netdev_queue
*txq
;
4836 bool free_skb
= true;
4839 txq
= netdev_core_pick_tx(dev
, skb
, NULL
);
4840 cpu
= smp_processor_id();
4841 HARD_TX_LOCK(dev
, txq
, cpu
);
4842 if (!netif_xmit_stopped(txq
)) {
4843 rc
= netdev_start_xmit(skb
, dev
, txq
, 0);
4844 if (dev_xmit_complete(rc
))
4847 HARD_TX_UNLOCK(dev
, txq
);
4849 trace_xdp_exception(dev
, xdp_prog
, XDP_TX
);
4854 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key
);
4856 int do_xdp_generic(struct bpf_prog
*xdp_prog
, struct sk_buff
*skb
)
4859 struct xdp_buff xdp
;
4863 act
= netif_receive_generic_xdp(skb
, &xdp
, xdp_prog
);
4864 if (act
!= XDP_PASS
) {
4867 err
= xdp_do_generic_redirect(skb
->dev
, skb
,
4873 generic_xdp_tx(skb
, xdp_prog
);
4884 EXPORT_SYMBOL_GPL(do_xdp_generic
);
4886 static int netif_rx_internal(struct sk_buff
*skb
)
4890 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
4892 trace_netif_rx(skb
);
4895 if (static_branch_unlikely(&rps_needed
)) {
4896 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
4902 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
4904 cpu
= smp_processor_id();
4906 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
4915 ret
= enqueue_to_backlog(skb
, get_cpu(), &qtail
);
4922 * netif_rx - post buffer to the network code
4923 * @skb: buffer to post
4925 * This function receives a packet from a device driver and queues it for
4926 * the upper (protocol) levels to process. It always succeeds. The buffer
4927 * may be dropped during processing for congestion control or by the
4931 * NET_RX_SUCCESS (no congestion)
4932 * NET_RX_DROP (packet was dropped)
4936 int netif_rx(struct sk_buff
*skb
)
4940 trace_netif_rx_entry(skb
);
4942 ret
= netif_rx_internal(skb
);
4943 trace_netif_rx_exit(ret
);
4947 EXPORT_SYMBOL(netif_rx
);
4949 int netif_rx_ni(struct sk_buff
*skb
)
4953 trace_netif_rx_ni_entry(skb
);
4956 err
= netif_rx_internal(skb
);
4957 if (local_softirq_pending())
4960 trace_netif_rx_ni_exit(err
);
4964 EXPORT_SYMBOL(netif_rx_ni
);
4966 int netif_rx_any_context(struct sk_buff
*skb
)
4969 * If invoked from contexts which do not invoke bottom half
4970 * processing either at return from interrupt or when softrqs are
4971 * reenabled, use netif_rx_ni() which invokes bottomhalf processing
4975 return netif_rx(skb
);
4977 return netif_rx_ni(skb
);
4979 EXPORT_SYMBOL(netif_rx_any_context
);
4981 static __latent_entropy
void net_tx_action(struct softirq_action
*h
)
4983 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
4985 if (sd
->completion_queue
) {
4986 struct sk_buff
*clist
;
4988 local_irq_disable();
4989 clist
= sd
->completion_queue
;
4990 sd
->completion_queue
= NULL
;
4994 struct sk_buff
*skb
= clist
;
4996 clist
= clist
->next
;
4998 WARN_ON(refcount_read(&skb
->users
));
4999 if (likely(get_kfree_skb_cb(skb
)->reason
== SKB_REASON_CONSUMED
))
5000 trace_consume_skb(skb
);
5002 trace_kfree_skb(skb
, net_tx_action
);
5004 if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
)
5007 __kfree_skb_defer(skb
);
5011 if (sd
->output_queue
) {
5014 local_irq_disable();
5015 head
= sd
->output_queue
;
5016 sd
->output_queue
= NULL
;
5017 sd
->output_queue_tailp
= &sd
->output_queue
;
5023 struct Qdisc
*q
= head
;
5024 spinlock_t
*root_lock
= NULL
;
5026 head
= head
->next_sched
;
5028 /* We need to make sure head->next_sched is read
5029 * before clearing __QDISC_STATE_SCHED
5031 smp_mb__before_atomic();
5033 if (!(q
->flags
& TCQ_F_NOLOCK
)) {
5034 root_lock
= qdisc_lock(q
);
5035 spin_lock(root_lock
);
5036 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
,
5038 /* There is a synchronize_net() between
5039 * STATE_DEACTIVATED flag being set and
5040 * qdisc_reset()/some_qdisc_is_busy() in
5041 * dev_deactivate(), so we can safely bail out
5042 * early here to avoid data race between
5043 * qdisc_deactivate() and some_qdisc_is_busy()
5044 * for lockless qdisc.
5046 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
5050 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
5053 spin_unlock(root_lock
);
5059 xfrm_dev_backlog(sd
);
5062 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
5063 /* This hook is defined here for ATM LANE */
5064 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
5065 unsigned char *addr
) __read_mostly
;
5066 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
5069 static inline struct sk_buff
*
5070 sch_handle_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
, int *ret
,
5071 struct net_device
*orig_dev
, bool *another
)
5073 #ifdef CONFIG_NET_CLS_ACT
5074 struct mini_Qdisc
*miniq
= rcu_dereference_bh(skb
->dev
->miniq_ingress
);
5075 struct tcf_result cl_res
;
5077 /* If there's at least one ingress present somewhere (so
5078 * we get here via enabled static key), remaining devices
5079 * that are not configured with an ingress qdisc will bail
5086 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
5090 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
5091 tc_skb_cb(skb
)->mru
= 0;
5092 tc_skb_cb(skb
)->post_ct
= false;
5093 skb
->tc_at_ingress
= 1;
5094 mini_qdisc_bstats_cpu_update(miniq
, skb
);
5096 switch (tcf_classify(skb
, miniq
->block
, miniq
->filter_list
, &cl_res
, false)) {
5098 case TC_ACT_RECLASSIFY
:
5099 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
5102 mini_qdisc_qstats_cpu_drop(miniq
);
5110 case TC_ACT_REDIRECT
:
5111 /* skb_mac_header check was done by cls/act_bpf, so
5112 * we can safely push the L2 header back before
5113 * redirecting to another netdev
5115 __skb_push(skb
, skb
->mac_len
);
5116 if (skb_do_redirect(skb
) == -EAGAIN
) {
5117 __skb_pull(skb
, skb
->mac_len
);
5122 case TC_ACT_CONSUMED
:
5127 #endif /* CONFIG_NET_CLS_ACT */
5132 * netdev_is_rx_handler_busy - check if receive handler is registered
5133 * @dev: device to check
5135 * Check if a receive handler is already registered for a given device.
5136 * Return true if there one.
5138 * The caller must hold the rtnl_mutex.
5140 bool netdev_is_rx_handler_busy(struct net_device
*dev
)
5143 return dev
&& rtnl_dereference(dev
->rx_handler
);
5145 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy
);
5148 * netdev_rx_handler_register - register receive handler
5149 * @dev: device to register a handler for
5150 * @rx_handler: receive handler to register
5151 * @rx_handler_data: data pointer that is used by rx handler
5153 * Register a receive handler for a device. This handler will then be
5154 * called from __netif_receive_skb. A negative errno code is returned
5157 * The caller must hold the rtnl_mutex.
5159 * For a general description of rx_handler, see enum rx_handler_result.
5161 int netdev_rx_handler_register(struct net_device
*dev
,
5162 rx_handler_func_t
*rx_handler
,
5163 void *rx_handler_data
)
5165 if (netdev_is_rx_handler_busy(dev
))
5168 if (dev
->priv_flags
& IFF_NO_RX_HANDLER
)
5171 /* Note: rx_handler_data must be set before rx_handler */
5172 rcu_assign_pointer(dev
->rx_handler_data
, rx_handler_data
);
5173 rcu_assign_pointer(dev
->rx_handler
, rx_handler
);
5177 EXPORT_SYMBOL_GPL(netdev_rx_handler_register
);
5180 * netdev_rx_handler_unregister - unregister receive handler
5181 * @dev: device to unregister a handler from
5183 * Unregister a receive handler from a device.
5185 * The caller must hold the rtnl_mutex.
5187 void netdev_rx_handler_unregister(struct net_device
*dev
)
5191 RCU_INIT_POINTER(dev
->rx_handler
, NULL
);
5192 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5193 * section has a guarantee to see a non NULL rx_handler_data
5197 RCU_INIT_POINTER(dev
->rx_handler_data
, NULL
);
5199 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister
);
5202 * Limit the use of PFMEMALLOC reserves to those protocols that implement
5203 * the special handling of PFMEMALLOC skbs.
5205 static bool skb_pfmemalloc_protocol(struct sk_buff
*skb
)
5207 switch (skb
->protocol
) {
5208 case htons(ETH_P_ARP
):
5209 case htons(ETH_P_IP
):
5210 case htons(ETH_P_IPV6
):
5211 case htons(ETH_P_8021Q
):
5212 case htons(ETH_P_8021AD
):
5219 static inline int nf_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
,
5220 int *ret
, struct net_device
*orig_dev
)
5222 if (nf_hook_ingress_active(skb
)) {
5226 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
5231 ingress_retval
= nf_hook_ingress(skb
);
5233 return ingress_retval
;
5238 static int __netif_receive_skb_core(struct sk_buff
**pskb
, bool pfmemalloc
,
5239 struct packet_type
**ppt_prev
)
5241 struct packet_type
*ptype
, *pt_prev
;
5242 rx_handler_func_t
*rx_handler
;
5243 struct sk_buff
*skb
= *pskb
;
5244 struct net_device
*orig_dev
;
5245 bool deliver_exact
= false;
5246 int ret
= NET_RX_DROP
;
5249 net_timestamp_check(!netdev_tstamp_prequeue
, skb
);
5251 trace_netif_receive_skb(skb
);
5253 orig_dev
= skb
->dev
;
5255 skb_reset_network_header(skb
);
5256 if (!skb_transport_header_was_set(skb
))
5257 skb_reset_transport_header(skb
);
5258 skb_reset_mac_len(skb
);
5263 skb
->skb_iif
= skb
->dev
->ifindex
;
5265 __this_cpu_inc(softnet_data
.processed
);
5267 if (static_branch_unlikely(&generic_xdp_needed_key
)) {
5271 ret2
= do_xdp_generic(rcu_dereference(skb
->dev
->xdp_prog
), skb
);
5274 if (ret2
!= XDP_PASS
) {
5280 if (eth_type_vlan(skb
->protocol
)) {
5281 skb
= skb_vlan_untag(skb
);
5286 if (skb_skip_tc_classify(skb
))
5292 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
5294 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5298 list_for_each_entry_rcu(ptype
, &skb
->dev
->ptype_all
, list
) {
5300 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5305 #ifdef CONFIG_NET_INGRESS
5306 if (static_branch_unlikely(&ingress_needed_key
)) {
5307 bool another
= false;
5309 skb
= sch_handle_ingress(skb
, &pt_prev
, &ret
, orig_dev
,
5316 if (nf_ingress(skb
, &pt_prev
, &ret
, orig_dev
) < 0)
5320 skb_reset_redirect(skb
);
5322 if (pfmemalloc
&& !skb_pfmemalloc_protocol(skb
))
5325 if (skb_vlan_tag_present(skb
)) {
5327 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5330 if (vlan_do_receive(&skb
))
5332 else if (unlikely(!skb
))
5336 rx_handler
= rcu_dereference(skb
->dev
->rx_handler
);
5339 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5342 switch (rx_handler(&skb
)) {
5343 case RX_HANDLER_CONSUMED
:
5344 ret
= NET_RX_SUCCESS
;
5346 case RX_HANDLER_ANOTHER
:
5348 case RX_HANDLER_EXACT
:
5349 deliver_exact
= true;
5351 case RX_HANDLER_PASS
:
5358 if (unlikely(skb_vlan_tag_present(skb
)) && !netdev_uses_dsa(skb
->dev
)) {
5360 if (skb_vlan_tag_get_id(skb
)) {
5361 /* Vlan id is non 0 and vlan_do_receive() above couldn't
5364 skb
->pkt_type
= PACKET_OTHERHOST
;
5365 } else if (eth_type_vlan(skb
->protocol
)) {
5366 /* Outer header is 802.1P with vlan 0, inner header is
5367 * 802.1Q or 802.1AD and vlan_do_receive() above could
5368 * not find vlan dev for vlan id 0.
5370 __vlan_hwaccel_clear_tag(skb
);
5371 skb
= skb_vlan_untag(skb
);
5374 if (vlan_do_receive(&skb
))
5375 /* After stripping off 802.1P header with vlan 0
5376 * vlan dev is found for inner header.
5379 else if (unlikely(!skb
))
5382 /* We have stripped outer 802.1P vlan 0 header.
5383 * But could not find vlan dev.
5384 * check again for vlan id to set OTHERHOST.
5388 /* Note: we might in the future use prio bits
5389 * and set skb->priority like in vlan_do_receive()
5390 * For the time being, just ignore Priority Code Point
5392 __vlan_hwaccel_clear_tag(skb
);
5395 type
= skb
->protocol
;
5397 /* deliver only exact match when indicated */
5398 if (likely(!deliver_exact
)) {
5399 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
5400 &ptype_base
[ntohs(type
) &
5404 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
5405 &orig_dev
->ptype_specific
);
5407 if (unlikely(skb
->dev
!= orig_dev
)) {
5408 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
5409 &skb
->dev
->ptype_specific
);
5413 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
5415 *ppt_prev
= pt_prev
;
5419 atomic_long_inc(&skb
->dev
->rx_dropped
);
5421 atomic_long_inc(&skb
->dev
->rx_nohandler
);
5423 /* Jamal, now you will not able to escape explaining
5424 * me how you were going to use this. :-)
5430 /* The invariant here is that if *ppt_prev is not NULL
5431 * then skb should also be non-NULL.
5433 * Apparently *ppt_prev assignment above holds this invariant due to
5434 * skb dereferencing near it.
5440 static int __netif_receive_skb_one_core(struct sk_buff
*skb
, bool pfmemalloc
)
5442 struct net_device
*orig_dev
= skb
->dev
;
5443 struct packet_type
*pt_prev
= NULL
;
5446 ret
= __netif_receive_skb_core(&skb
, pfmemalloc
, &pt_prev
);
5448 ret
= INDIRECT_CALL_INET(pt_prev
->func
, ipv6_rcv
, ip_rcv
, skb
,
5449 skb
->dev
, pt_prev
, orig_dev
);
5454 * netif_receive_skb_core - special purpose version of netif_receive_skb
5455 * @skb: buffer to process
5457 * More direct receive version of netif_receive_skb(). It should
5458 * only be used by callers that have a need to skip RPS and Generic XDP.
5459 * Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5461 * This function may only be called from softirq context and interrupts
5462 * should be enabled.
5464 * Return values (usually ignored):
5465 * NET_RX_SUCCESS: no congestion
5466 * NET_RX_DROP: packet was dropped
5468 int netif_receive_skb_core(struct sk_buff
*skb
)
5473 ret
= __netif_receive_skb_one_core(skb
, false);
5478 EXPORT_SYMBOL(netif_receive_skb_core
);
5480 static inline void __netif_receive_skb_list_ptype(struct list_head
*head
,
5481 struct packet_type
*pt_prev
,
5482 struct net_device
*orig_dev
)
5484 struct sk_buff
*skb
, *next
;
5488 if (list_empty(head
))
5490 if (pt_prev
->list_func
!= NULL
)
5491 INDIRECT_CALL_INET(pt_prev
->list_func
, ipv6_list_rcv
,
5492 ip_list_rcv
, head
, pt_prev
, orig_dev
);
5494 list_for_each_entry_safe(skb
, next
, head
, list
) {
5495 skb_list_del_init(skb
);
5496 pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
5500 static void __netif_receive_skb_list_core(struct list_head
*head
, bool pfmemalloc
)
5502 /* Fast-path assumptions:
5503 * - There is no RX handler.
5504 * - Only one packet_type matches.
5505 * If either of these fails, we will end up doing some per-packet
5506 * processing in-line, then handling the 'last ptype' for the whole
5507 * sublist. This can't cause out-of-order delivery to any single ptype,
5508 * because the 'last ptype' must be constant across the sublist, and all
5509 * other ptypes are handled per-packet.
5511 /* Current (common) ptype of sublist */
5512 struct packet_type
*pt_curr
= NULL
;
5513 /* Current (common) orig_dev of sublist */
5514 struct net_device
*od_curr
= NULL
;
5515 struct list_head sublist
;
5516 struct sk_buff
*skb
, *next
;
5518 INIT_LIST_HEAD(&sublist
);
5519 list_for_each_entry_safe(skb
, next
, head
, list
) {
5520 struct net_device
*orig_dev
= skb
->dev
;
5521 struct packet_type
*pt_prev
= NULL
;
5523 skb_list_del_init(skb
);
5524 __netif_receive_skb_core(&skb
, pfmemalloc
, &pt_prev
);
5527 if (pt_curr
!= pt_prev
|| od_curr
!= orig_dev
) {
5528 /* dispatch old sublist */
5529 __netif_receive_skb_list_ptype(&sublist
, pt_curr
, od_curr
);
5530 /* start new sublist */
5531 INIT_LIST_HEAD(&sublist
);
5535 list_add_tail(&skb
->list
, &sublist
);
5538 /* dispatch final sublist */
5539 __netif_receive_skb_list_ptype(&sublist
, pt_curr
, od_curr
);
5542 static int __netif_receive_skb(struct sk_buff
*skb
)
5546 if (sk_memalloc_socks() && skb_pfmemalloc(skb
)) {
5547 unsigned int noreclaim_flag
;
5550 * PFMEMALLOC skbs are special, they should
5551 * - be delivered to SOCK_MEMALLOC sockets only
5552 * - stay away from userspace
5553 * - have bounded memory usage
5555 * Use PF_MEMALLOC as this saves us from propagating the allocation
5556 * context down to all allocation sites.
5558 noreclaim_flag
= memalloc_noreclaim_save();
5559 ret
= __netif_receive_skb_one_core(skb
, true);
5560 memalloc_noreclaim_restore(noreclaim_flag
);
5562 ret
= __netif_receive_skb_one_core(skb
, false);
5567 static void __netif_receive_skb_list(struct list_head
*head
)
5569 unsigned long noreclaim_flag
= 0;
5570 struct sk_buff
*skb
, *next
;
5571 bool pfmemalloc
= false; /* Is current sublist PF_MEMALLOC? */
5573 list_for_each_entry_safe(skb
, next
, head
, list
) {
5574 if ((sk_memalloc_socks() && skb_pfmemalloc(skb
)) != pfmemalloc
) {
5575 struct list_head sublist
;
5577 /* Handle the previous sublist */
5578 list_cut_before(&sublist
, head
, &skb
->list
);
5579 if (!list_empty(&sublist
))
5580 __netif_receive_skb_list_core(&sublist
, pfmemalloc
);
5581 pfmemalloc
= !pfmemalloc
;
5582 /* See comments in __netif_receive_skb */
5584 noreclaim_flag
= memalloc_noreclaim_save();
5586 memalloc_noreclaim_restore(noreclaim_flag
);
5589 /* Handle the remaining sublist */
5590 if (!list_empty(head
))
5591 __netif_receive_skb_list_core(head
, pfmemalloc
);
5592 /* Restore pflags */
5594 memalloc_noreclaim_restore(noreclaim_flag
);
5597 static int generic_xdp_install(struct net_device
*dev
, struct netdev_bpf
*xdp
)
5599 struct bpf_prog
*old
= rtnl_dereference(dev
->xdp_prog
);
5600 struct bpf_prog
*new = xdp
->prog
;
5603 switch (xdp
->command
) {
5604 case XDP_SETUP_PROG
:
5605 rcu_assign_pointer(dev
->xdp_prog
, new);
5610 static_branch_dec(&generic_xdp_needed_key
);
5611 } else if (new && !old
) {
5612 static_branch_inc(&generic_xdp_needed_key
);
5613 dev_disable_lro(dev
);
5614 dev_disable_gro_hw(dev
);
5626 static int netif_receive_skb_internal(struct sk_buff
*skb
)
5630 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
5632 if (skb_defer_rx_timestamp(skb
))
5633 return NET_RX_SUCCESS
;
5637 if (static_branch_unlikely(&rps_needed
)) {
5638 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
5639 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
5642 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
5648 ret
= __netif_receive_skb(skb
);
5653 static void netif_receive_skb_list_internal(struct list_head
*head
)
5655 struct sk_buff
*skb
, *next
;
5656 struct list_head sublist
;
5658 INIT_LIST_HEAD(&sublist
);
5659 list_for_each_entry_safe(skb
, next
, head
, list
) {
5660 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
5661 skb_list_del_init(skb
);
5662 if (!skb_defer_rx_timestamp(skb
))
5663 list_add_tail(&skb
->list
, &sublist
);
5665 list_splice_init(&sublist
, head
);
5669 if (static_branch_unlikely(&rps_needed
)) {
5670 list_for_each_entry_safe(skb
, next
, head
, list
) {
5671 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
5672 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
5675 /* Will be handled, remove from list */
5676 skb_list_del_init(skb
);
5677 enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
5682 __netif_receive_skb_list(head
);
5687 * netif_receive_skb - process receive buffer from network
5688 * @skb: buffer to process
5690 * netif_receive_skb() is the main receive data processing function.
5691 * It always succeeds. The buffer may be dropped during processing
5692 * for congestion control or by the protocol layers.
5694 * This function may only be called from softirq context and interrupts
5695 * should be enabled.
5697 * Return values (usually ignored):
5698 * NET_RX_SUCCESS: no congestion
5699 * NET_RX_DROP: packet was dropped
5701 int netif_receive_skb(struct sk_buff
*skb
)
5705 trace_netif_receive_skb_entry(skb
);
5707 ret
= netif_receive_skb_internal(skb
);
5708 trace_netif_receive_skb_exit(ret
);
5712 EXPORT_SYMBOL(netif_receive_skb
);
5715 * netif_receive_skb_list - process many receive buffers from network
5716 * @head: list of skbs to process.
5718 * Since return value of netif_receive_skb() is normally ignored, and
5719 * wouldn't be meaningful for a list, this function returns void.
5721 * This function may only be called from softirq context and interrupts
5722 * should be enabled.
5724 void netif_receive_skb_list(struct list_head
*head
)
5726 struct sk_buff
*skb
;
5728 if (list_empty(head
))
5730 if (trace_netif_receive_skb_list_entry_enabled()) {
5731 list_for_each_entry(skb
, head
, list
)
5732 trace_netif_receive_skb_list_entry(skb
);
5734 netif_receive_skb_list_internal(head
);
5735 trace_netif_receive_skb_list_exit(0);
5737 EXPORT_SYMBOL(netif_receive_skb_list
);
5739 static DEFINE_PER_CPU(struct work_struct
, flush_works
);
5741 /* Network device is going away, flush any packets still pending */
5742 static void flush_backlog(struct work_struct
*work
)
5744 struct sk_buff
*skb
, *tmp
;
5745 struct softnet_data
*sd
;
5748 sd
= this_cpu_ptr(&softnet_data
);
5750 local_irq_disable();
5752 skb_queue_walk_safe(&sd
->input_pkt_queue
, skb
, tmp
) {
5753 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
5754 __skb_unlink(skb
, &sd
->input_pkt_queue
);
5755 dev_kfree_skb_irq(skb
);
5756 input_queue_head_incr(sd
);
5762 skb_queue_walk_safe(&sd
->process_queue
, skb
, tmp
) {
5763 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
5764 __skb_unlink(skb
, &sd
->process_queue
);
5766 input_queue_head_incr(sd
);
5772 static bool flush_required(int cpu
)
5774 #if IS_ENABLED(CONFIG_RPS)
5775 struct softnet_data
*sd
= &per_cpu(softnet_data
, cpu
);
5778 local_irq_disable();
5781 /* as insertion into process_queue happens with the rps lock held,
5782 * process_queue access may race only with dequeue
5784 do_flush
= !skb_queue_empty(&sd
->input_pkt_queue
) ||
5785 !skb_queue_empty_lockless(&sd
->process_queue
);
5791 /* without RPS we can't safely check input_pkt_queue: during a
5792 * concurrent remote skb_queue_splice() we can detect as empty both
5793 * input_pkt_queue and process_queue even if the latter could end-up
5794 * containing a lot of packets.
5799 static void flush_all_backlogs(void)
5801 static cpumask_t flush_cpus
;
5804 /* since we are under rtnl lock protection we can use static data
5805 * for the cpumask and avoid allocating on stack the possibly
5812 cpumask_clear(&flush_cpus
);
5813 for_each_online_cpu(cpu
) {
5814 if (flush_required(cpu
)) {
5815 queue_work_on(cpu
, system_highpri_wq
,
5816 per_cpu_ptr(&flush_works
, cpu
));
5817 cpumask_set_cpu(cpu
, &flush_cpus
);
5821 /* we can have in flight packet[s] on the cpus we are not flushing,
5822 * synchronize_net() in unregister_netdevice_many() will take care of
5825 for_each_cpu(cpu
, &flush_cpus
)
5826 flush_work(per_cpu_ptr(&flush_works
, cpu
));
5831 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
5832 static void gro_normal_list(struct napi_struct
*napi
)
5834 if (!napi
->rx_count
)
5836 netif_receive_skb_list_internal(&napi
->rx_list
);
5837 INIT_LIST_HEAD(&napi
->rx_list
);
5841 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
5842 * pass the whole batch up to the stack.
5844 static void gro_normal_one(struct napi_struct
*napi
, struct sk_buff
*skb
, int segs
)
5846 list_add_tail(&skb
->list
, &napi
->rx_list
);
5847 napi
->rx_count
+= segs
;
5848 if (napi
->rx_count
>= gro_normal_batch
)
5849 gro_normal_list(napi
);
5852 static int napi_gro_complete(struct napi_struct
*napi
, struct sk_buff
*skb
)
5854 struct packet_offload
*ptype
;
5855 __be16 type
= skb
->protocol
;
5856 struct list_head
*head
= &offload_base
;
5859 BUILD_BUG_ON(sizeof(struct napi_gro_cb
) > sizeof(skb
->cb
));
5861 if (NAPI_GRO_CB(skb
)->count
== 1) {
5862 skb_shinfo(skb
)->gso_size
= 0;
5867 list_for_each_entry_rcu(ptype
, head
, list
) {
5868 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
5871 err
= INDIRECT_CALL_INET(ptype
->callbacks
.gro_complete
,
5872 ipv6_gro_complete
, inet_gro_complete
,
5879 WARN_ON(&ptype
->list
== head
);
5881 return NET_RX_SUCCESS
;
5885 gro_normal_one(napi
, skb
, NAPI_GRO_CB(skb
)->count
);
5886 return NET_RX_SUCCESS
;
5889 static void __napi_gro_flush_chain(struct napi_struct
*napi
, u32 index
,
5892 struct list_head
*head
= &napi
->gro_hash
[index
].list
;
5893 struct sk_buff
*skb
, *p
;
5895 list_for_each_entry_safe_reverse(skb
, p
, head
, list
) {
5896 if (flush_old
&& NAPI_GRO_CB(skb
)->age
== jiffies
)
5898 skb_list_del_init(skb
);
5899 napi_gro_complete(napi
, skb
);
5900 napi
->gro_hash
[index
].count
--;
5903 if (!napi
->gro_hash
[index
].count
)
5904 __clear_bit(index
, &napi
->gro_bitmask
);
5907 /* napi->gro_hash[].list contains packets ordered by age.
5908 * youngest packets at the head of it.
5909 * Complete skbs in reverse order to reduce latencies.
5911 void napi_gro_flush(struct napi_struct
*napi
, bool flush_old
)
5913 unsigned long bitmask
= napi
->gro_bitmask
;
5914 unsigned int i
, base
= ~0U;
5916 while ((i
= ffs(bitmask
)) != 0) {
5919 __napi_gro_flush_chain(napi
, base
, flush_old
);
5922 EXPORT_SYMBOL(napi_gro_flush
);
5924 static void gro_list_prepare(const struct list_head
*head
,
5925 const struct sk_buff
*skb
)
5927 unsigned int maclen
= skb
->dev
->hard_header_len
;
5928 u32 hash
= skb_get_hash_raw(skb
);
5931 list_for_each_entry(p
, head
, list
) {
5932 unsigned long diffs
;
5934 NAPI_GRO_CB(p
)->flush
= 0;
5936 if (hash
!= skb_get_hash_raw(p
)) {
5937 NAPI_GRO_CB(p
)->same_flow
= 0;
5941 diffs
= (unsigned long)p
->dev
^ (unsigned long)skb
->dev
;
5942 diffs
|= skb_vlan_tag_present(p
) ^ skb_vlan_tag_present(skb
);
5943 if (skb_vlan_tag_present(p
))
5944 diffs
|= skb_vlan_tag_get(p
) ^ skb_vlan_tag_get(skb
);
5945 diffs
|= skb_metadata_differs(p
, skb
);
5946 if (maclen
== ETH_HLEN
)
5947 diffs
|= compare_ether_header(skb_mac_header(p
),
5948 skb_mac_header(skb
));
5950 diffs
= memcmp(skb_mac_header(p
),
5951 skb_mac_header(skb
),
5954 /* in most common scenarions 'slow_gro' is 0
5955 * otherwise we are already on some slower paths
5956 * either skip all the infrequent tests altogether or
5957 * avoid trying too hard to skip each of them individually
5959 if (!diffs
&& unlikely(skb
->slow_gro
| p
->slow_gro
)) {
5960 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
5961 struct tc_skb_ext
*skb_ext
;
5962 struct tc_skb_ext
*p_ext
;
5965 diffs
|= p
->sk
!= skb
->sk
;
5966 diffs
|= skb_metadata_dst_cmp(p
, skb
);
5967 diffs
|= skb_get_nfct(p
) ^ skb_get_nfct(skb
);
5969 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
5970 skb_ext
= skb_ext_find(skb
, TC_SKB_EXT
);
5971 p_ext
= skb_ext_find(p
, TC_SKB_EXT
);
5973 diffs
|= (!!p_ext
) ^ (!!skb_ext
);
5974 if (!diffs
&& unlikely(skb_ext
))
5975 diffs
|= p_ext
->chain
^ skb_ext
->chain
;
5979 NAPI_GRO_CB(p
)->same_flow
= !diffs
;
5983 static inline void skb_gro_reset_offset(struct sk_buff
*skb
, u32 nhoff
)
5985 const struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
5986 const skb_frag_t
*frag0
= &pinfo
->frags
[0];
5988 NAPI_GRO_CB(skb
)->data_offset
= 0;
5989 NAPI_GRO_CB(skb
)->frag0
= NULL
;
5990 NAPI_GRO_CB(skb
)->frag0_len
= 0;
5992 if (!skb_headlen(skb
) && pinfo
->nr_frags
&&
5993 !PageHighMem(skb_frag_page(frag0
)) &&
5994 (!NET_IP_ALIGN
|| !((skb_frag_off(frag0
) + nhoff
) & 3))) {
5995 NAPI_GRO_CB(skb
)->frag0
= skb_frag_address(frag0
);
5996 NAPI_GRO_CB(skb
)->frag0_len
= min_t(unsigned int,
5997 skb_frag_size(frag0
),
5998 skb
->end
- skb
->tail
);
6002 static void gro_pull_from_frag0(struct sk_buff
*skb
, int grow
)
6004 struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
6006 BUG_ON(skb
->end
- skb
->tail
< grow
);
6008 memcpy(skb_tail_pointer(skb
), NAPI_GRO_CB(skb
)->frag0
, grow
);
6010 skb
->data_len
-= grow
;
6013 skb_frag_off_add(&pinfo
->frags
[0], grow
);
6014 skb_frag_size_sub(&pinfo
->frags
[0], grow
);
6016 if (unlikely(!skb_frag_size(&pinfo
->frags
[0]))) {
6017 skb_frag_unref(skb
, 0);
6018 memmove(pinfo
->frags
, pinfo
->frags
+ 1,
6019 --pinfo
->nr_frags
* sizeof(pinfo
->frags
[0]));
6023 static void gro_flush_oldest(struct napi_struct
*napi
, struct list_head
*head
)
6025 struct sk_buff
*oldest
;
6027 oldest
= list_last_entry(head
, struct sk_buff
, list
);
6029 /* We are called with head length >= MAX_GRO_SKBS, so this is
6032 if (WARN_ON_ONCE(!oldest
))
6035 /* Do not adjust napi->gro_hash[].count, caller is adding a new
6038 skb_list_del_init(oldest
);
6039 napi_gro_complete(napi
, oldest
);
6042 static enum gro_result
dev_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
6044 u32 bucket
= skb_get_hash_raw(skb
) & (GRO_HASH_BUCKETS
- 1);
6045 struct gro_list
*gro_list
= &napi
->gro_hash
[bucket
];
6046 struct list_head
*head
= &offload_base
;
6047 struct packet_offload
*ptype
;
6048 __be16 type
= skb
->protocol
;
6049 struct sk_buff
*pp
= NULL
;
6050 enum gro_result ret
;
6054 if (netif_elide_gro(skb
->dev
))
6057 gro_list_prepare(&gro_list
->list
, skb
);
6060 list_for_each_entry_rcu(ptype
, head
, list
) {
6061 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
6064 skb_set_network_header(skb
, skb_gro_offset(skb
));
6065 skb_reset_mac_len(skb
);
6066 NAPI_GRO_CB(skb
)->same_flow
= 0;
6067 NAPI_GRO_CB(skb
)->flush
= skb_is_gso(skb
) || skb_has_frag_list(skb
);
6068 NAPI_GRO_CB(skb
)->free
= 0;
6069 NAPI_GRO_CB(skb
)->encap_mark
= 0;
6070 NAPI_GRO_CB(skb
)->recursion_counter
= 0;
6071 NAPI_GRO_CB(skb
)->is_fou
= 0;
6072 NAPI_GRO_CB(skb
)->is_atomic
= 1;
6073 NAPI_GRO_CB(skb
)->gro_remcsum_start
= 0;
6075 /* Setup for GRO checksum validation */
6076 switch (skb
->ip_summed
) {
6077 case CHECKSUM_COMPLETE
:
6078 NAPI_GRO_CB(skb
)->csum
= skb
->csum
;
6079 NAPI_GRO_CB(skb
)->csum_valid
= 1;
6080 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
6082 case CHECKSUM_UNNECESSARY
:
6083 NAPI_GRO_CB(skb
)->csum_cnt
= skb
->csum_level
+ 1;
6084 NAPI_GRO_CB(skb
)->csum_valid
= 0;
6087 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
6088 NAPI_GRO_CB(skb
)->csum_valid
= 0;
6091 pp
= INDIRECT_CALL_INET(ptype
->callbacks
.gro_receive
,
6092 ipv6_gro_receive
, inet_gro_receive
,
6093 &gro_list
->list
, skb
);
6098 if (&ptype
->list
== head
)
6101 if (PTR_ERR(pp
) == -EINPROGRESS
) {
6106 same_flow
= NAPI_GRO_CB(skb
)->same_flow
;
6107 ret
= NAPI_GRO_CB(skb
)->free
? GRO_MERGED_FREE
: GRO_MERGED
;
6110 skb_list_del_init(pp
);
6111 napi_gro_complete(napi
, pp
);
6118 if (NAPI_GRO_CB(skb
)->flush
)
6121 if (unlikely(gro_list
->count
>= MAX_GRO_SKBS
))
6122 gro_flush_oldest(napi
, &gro_list
->list
);
6126 NAPI_GRO_CB(skb
)->count
= 1;
6127 NAPI_GRO_CB(skb
)->age
= jiffies
;
6128 NAPI_GRO_CB(skb
)->last
= skb
;
6129 skb_shinfo(skb
)->gso_size
= skb_gro_len(skb
);
6130 list_add(&skb
->list
, &gro_list
->list
);
6134 grow
= skb_gro_offset(skb
) - skb_headlen(skb
);
6136 gro_pull_from_frag0(skb
, grow
);
6138 if (gro_list
->count
) {
6139 if (!test_bit(bucket
, &napi
->gro_bitmask
))
6140 __set_bit(bucket
, &napi
->gro_bitmask
);
6141 } else if (test_bit(bucket
, &napi
->gro_bitmask
)) {
6142 __clear_bit(bucket
, &napi
->gro_bitmask
);
6152 struct packet_offload
*gro_find_receive_by_type(__be16 type
)
6154 struct list_head
*offload_head
= &offload_base
;
6155 struct packet_offload
*ptype
;
6157 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
6158 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
6164 EXPORT_SYMBOL(gro_find_receive_by_type
);
6166 struct packet_offload
*gro_find_complete_by_type(__be16 type
)
6168 struct list_head
*offload_head
= &offload_base
;
6169 struct packet_offload
*ptype
;
6171 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
6172 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
6178 EXPORT_SYMBOL(gro_find_complete_by_type
);
6180 static gro_result_t
napi_skb_finish(struct napi_struct
*napi
,
6181 struct sk_buff
*skb
,
6186 gro_normal_one(napi
, skb
, 1);
6189 case GRO_MERGED_FREE
:
6190 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
6191 napi_skb_free_stolen_head(skb
);
6192 else if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
)
6195 __kfree_skb_defer(skb
);
6207 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
6211 skb_mark_napi_id(skb
, napi
);
6212 trace_napi_gro_receive_entry(skb
);
6214 skb_gro_reset_offset(skb
, 0);
6216 ret
= napi_skb_finish(napi
, skb
, dev_gro_receive(napi
, skb
));
6217 trace_napi_gro_receive_exit(ret
);
6221 EXPORT_SYMBOL(napi_gro_receive
);
6223 static void napi_reuse_skb(struct napi_struct
*napi
, struct sk_buff
*skb
)
6225 if (unlikely(skb
->pfmemalloc
)) {
6229 __skb_pull(skb
, skb_headlen(skb
));
6230 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
6231 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
- skb_headroom(skb
));
6232 __vlan_hwaccel_clear_tag(skb
);
6233 skb
->dev
= napi
->dev
;
6236 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
6237 skb
->pkt_type
= PACKET_HOST
;
6239 skb
->encapsulation
= 0;
6240 skb_shinfo(skb
)->gso_type
= 0;
6241 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
6242 if (unlikely(skb
->slow_gro
)) {
6252 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
)
6254 struct sk_buff
*skb
= napi
->skb
;
6257 skb
= napi_alloc_skb(napi
, GRO_MAX_HEAD
);
6260 skb_mark_napi_id(skb
, napi
);
6265 EXPORT_SYMBOL(napi_get_frags
);
6267 static gro_result_t
napi_frags_finish(struct napi_struct
*napi
,
6268 struct sk_buff
*skb
,
6274 __skb_push(skb
, ETH_HLEN
);
6275 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
6276 if (ret
== GRO_NORMAL
)
6277 gro_normal_one(napi
, skb
, 1);
6280 case GRO_MERGED_FREE
:
6281 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
6282 napi_skb_free_stolen_head(skb
);
6284 napi_reuse_skb(napi
, skb
);
6295 /* Upper GRO stack assumes network header starts at gro_offset=0
6296 * Drivers could call both napi_gro_frags() and napi_gro_receive()
6297 * We copy ethernet header into skb->data to have a common layout.
6299 static struct sk_buff
*napi_frags_skb(struct napi_struct
*napi
)
6301 struct sk_buff
*skb
= napi
->skb
;
6302 const struct ethhdr
*eth
;
6303 unsigned int hlen
= sizeof(*eth
);
6307 skb_reset_mac_header(skb
);
6308 skb_gro_reset_offset(skb
, hlen
);
6310 if (unlikely(skb_gro_header_hard(skb
, hlen
))) {
6311 eth
= skb_gro_header_slow(skb
, hlen
, 0);
6312 if (unlikely(!eth
)) {
6313 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
6314 __func__
, napi
->dev
->name
);
6315 napi_reuse_skb(napi
, skb
);
6319 eth
= (const struct ethhdr
*)skb
->data
;
6320 gro_pull_from_frag0(skb
, hlen
);
6321 NAPI_GRO_CB(skb
)->frag0
+= hlen
;
6322 NAPI_GRO_CB(skb
)->frag0_len
-= hlen
;
6324 __skb_pull(skb
, hlen
);
6327 * This works because the only protocols we care about don't require
6329 * We'll fix it up properly in napi_frags_finish()
6331 skb
->protocol
= eth
->h_proto
;
6336 gro_result_t
napi_gro_frags(struct napi_struct
*napi
)
6339 struct sk_buff
*skb
= napi_frags_skb(napi
);
6341 trace_napi_gro_frags_entry(skb
);
6343 ret
= napi_frags_finish(napi
, skb
, dev_gro_receive(napi
, skb
));
6344 trace_napi_gro_frags_exit(ret
);
6348 EXPORT_SYMBOL(napi_gro_frags
);
6350 /* Compute the checksum from gro_offset and return the folded value
6351 * after adding in any pseudo checksum.
6353 __sum16
__skb_gro_checksum_complete(struct sk_buff
*skb
)
6358 wsum
= skb_checksum(skb
, skb_gro_offset(skb
), skb_gro_len(skb
), 0);
6360 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
6361 sum
= csum_fold(csum_add(NAPI_GRO_CB(skb
)->csum
, wsum
));
6362 /* See comments in __skb_checksum_complete(). */
6364 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
6365 !skb
->csum_complete_sw
)
6366 netdev_rx_csum_fault(skb
->dev
, skb
);
6369 NAPI_GRO_CB(skb
)->csum
= wsum
;
6370 NAPI_GRO_CB(skb
)->csum_valid
= 1;
6374 EXPORT_SYMBOL(__skb_gro_checksum_complete
);
6376 static void net_rps_send_ipi(struct softnet_data
*remsd
)
6380 struct softnet_data
*next
= remsd
->rps_ipi_next
;
6382 if (cpu_online(remsd
->cpu
))
6383 smp_call_function_single_async(remsd
->cpu
, &remsd
->csd
);
6390 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
6391 * Note: called with local irq disabled, but exits with local irq enabled.
6393 static void net_rps_action_and_irq_enable(struct softnet_data
*sd
)
6396 struct softnet_data
*remsd
= sd
->rps_ipi_list
;
6399 sd
->rps_ipi_list
= NULL
;
6403 /* Send pending IPI's to kick RPS processing on remote cpus. */
6404 net_rps_send_ipi(remsd
);
6410 static bool sd_has_rps_ipi_waiting(struct softnet_data
*sd
)
6413 return sd
->rps_ipi_list
!= NULL
;
6419 static int process_backlog(struct napi_struct
*napi
, int quota
)
6421 struct softnet_data
*sd
= container_of(napi
, struct softnet_data
, backlog
);
6425 /* Check if we have pending ipi, its better to send them now,
6426 * not waiting net_rx_action() end.
6428 if (sd_has_rps_ipi_waiting(sd
)) {
6429 local_irq_disable();
6430 net_rps_action_and_irq_enable(sd
);
6433 napi
->weight
= dev_rx_weight
;
6435 struct sk_buff
*skb
;
6437 while ((skb
= __skb_dequeue(&sd
->process_queue
))) {
6439 __netif_receive_skb(skb
);
6441 input_queue_head_incr(sd
);
6442 if (++work
>= quota
)
6447 local_irq_disable();
6449 if (skb_queue_empty(&sd
->input_pkt_queue
)) {
6451 * Inline a custom version of __napi_complete().
6452 * only current cpu owns and manipulates this napi,
6453 * and NAPI_STATE_SCHED is the only possible flag set
6455 * We can use a plain write instead of clear_bit(),
6456 * and we dont need an smp_mb() memory barrier.
6461 skb_queue_splice_tail_init(&sd
->input_pkt_queue
,
6462 &sd
->process_queue
);
6472 * __napi_schedule - schedule for receive
6473 * @n: entry to schedule
6475 * The entry's receive function will be scheduled to run.
6476 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
6478 void __napi_schedule(struct napi_struct
*n
)
6480 unsigned long flags
;
6482 local_irq_save(flags
);
6483 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
6484 local_irq_restore(flags
);
6486 EXPORT_SYMBOL(__napi_schedule
);
6489 * napi_schedule_prep - check if napi can be scheduled
6492 * Test if NAPI routine is already running, and if not mark
6493 * it as running. This is used as a condition variable to
6494 * insure only one NAPI poll instance runs. We also make
6495 * sure there is no pending NAPI disable.
6497 bool napi_schedule_prep(struct napi_struct
*n
)
6499 unsigned long val
, new;
6502 val
= READ_ONCE(n
->state
);
6503 if (unlikely(val
& NAPIF_STATE_DISABLE
))
6505 new = val
| NAPIF_STATE_SCHED
;
6507 /* Sets STATE_MISSED bit if STATE_SCHED was already set
6508 * This was suggested by Alexander Duyck, as compiler
6509 * emits better code than :
6510 * if (val & NAPIF_STATE_SCHED)
6511 * new |= NAPIF_STATE_MISSED;
6513 new |= (val
& NAPIF_STATE_SCHED
) / NAPIF_STATE_SCHED
*
6515 } while (cmpxchg(&n
->state
, val
, new) != val
);
6517 return !(val
& NAPIF_STATE_SCHED
);
6519 EXPORT_SYMBOL(napi_schedule_prep
);
6522 * __napi_schedule_irqoff - schedule for receive
6523 * @n: entry to schedule
6525 * Variant of __napi_schedule() assuming hard irqs are masked.
6527 * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
6528 * because the interrupt disabled assumption might not be true
6529 * due to force-threaded interrupts and spinlock substitution.
6531 void __napi_schedule_irqoff(struct napi_struct
*n
)
6533 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
6534 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
6538 EXPORT_SYMBOL(__napi_schedule_irqoff
);
6540 bool napi_complete_done(struct napi_struct
*n
, int work_done
)
6542 unsigned long flags
, val
, new, timeout
= 0;
6546 * 1) Don't let napi dequeue from the cpu poll list
6547 * just in case its running on a different cpu.
6548 * 2) If we are busy polling, do nothing here, we have
6549 * the guarantee we will be called later.
6551 if (unlikely(n
->state
& (NAPIF_STATE_NPSVC
|
6552 NAPIF_STATE_IN_BUSY_POLL
)))
6557 timeout
= READ_ONCE(n
->dev
->gro_flush_timeout
);
6558 n
->defer_hard_irqs_count
= READ_ONCE(n
->dev
->napi_defer_hard_irqs
);
6560 if (n
->defer_hard_irqs_count
> 0) {
6561 n
->defer_hard_irqs_count
--;
6562 timeout
= READ_ONCE(n
->dev
->gro_flush_timeout
);
6566 if (n
->gro_bitmask
) {
6567 /* When the NAPI instance uses a timeout and keeps postponing
6568 * it, we need to bound somehow the time packets are kept in
6571 napi_gro_flush(n
, !!timeout
);
6576 if (unlikely(!list_empty(&n
->poll_list
))) {
6577 /* If n->poll_list is not empty, we need to mask irqs */
6578 local_irq_save(flags
);
6579 list_del_init(&n
->poll_list
);
6580 local_irq_restore(flags
);
6584 val
= READ_ONCE(n
->state
);
6586 WARN_ON_ONCE(!(val
& NAPIF_STATE_SCHED
));
6588 new = val
& ~(NAPIF_STATE_MISSED
| NAPIF_STATE_SCHED
|
6589 NAPIF_STATE_SCHED_THREADED
|
6590 NAPIF_STATE_PREFER_BUSY_POLL
);
6592 /* If STATE_MISSED was set, leave STATE_SCHED set,
6593 * because we will call napi->poll() one more time.
6594 * This C code was suggested by Alexander Duyck to help gcc.
6596 new |= (val
& NAPIF_STATE_MISSED
) / NAPIF_STATE_MISSED
*
6598 } while (cmpxchg(&n
->state
, val
, new) != val
);
6600 if (unlikely(val
& NAPIF_STATE_MISSED
)) {
6606 hrtimer_start(&n
->timer
, ns_to_ktime(timeout
),
6607 HRTIMER_MODE_REL_PINNED
);
6610 EXPORT_SYMBOL(napi_complete_done
);
6612 /* must be called under rcu_read_lock(), as we dont take a reference */
6613 static struct napi_struct
*napi_by_id(unsigned int napi_id
)
6615 unsigned int hash
= napi_id
% HASH_SIZE(napi_hash
);
6616 struct napi_struct
*napi
;
6618 hlist_for_each_entry_rcu(napi
, &napi_hash
[hash
], napi_hash_node
)
6619 if (napi
->napi_id
== napi_id
)
6625 #if defined(CONFIG_NET_RX_BUSY_POLL)
6627 static void __busy_poll_stop(struct napi_struct
*napi
, bool skip_schedule
)
6629 if (!skip_schedule
) {
6630 gro_normal_list(napi
);
6631 __napi_schedule(napi
);
6635 if (napi
->gro_bitmask
) {
6636 /* flush too old packets
6637 * If HZ < 1000, flush all packets.
6639 napi_gro_flush(napi
, HZ
>= 1000);
6642 gro_normal_list(napi
);
6643 clear_bit(NAPI_STATE_SCHED
, &napi
->state
);
6646 static void busy_poll_stop(struct napi_struct
*napi
, void *have_poll_lock
, bool prefer_busy_poll
,
6649 bool skip_schedule
= false;
6650 unsigned long timeout
;
6653 /* Busy polling means there is a high chance device driver hard irq
6654 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6655 * set in napi_schedule_prep().
6656 * Since we are about to call napi->poll() once more, we can safely
6657 * clear NAPI_STATE_MISSED.
6659 * Note: x86 could use a single "lock and ..." instruction
6660 * to perform these two clear_bit()
6662 clear_bit(NAPI_STATE_MISSED
, &napi
->state
);
6663 clear_bit(NAPI_STATE_IN_BUSY_POLL
, &napi
->state
);
6667 if (prefer_busy_poll
) {
6668 napi
->defer_hard_irqs_count
= READ_ONCE(napi
->dev
->napi_defer_hard_irqs
);
6669 timeout
= READ_ONCE(napi
->dev
->gro_flush_timeout
);
6670 if (napi
->defer_hard_irqs_count
&& timeout
) {
6671 hrtimer_start(&napi
->timer
, ns_to_ktime(timeout
), HRTIMER_MODE_REL_PINNED
);
6672 skip_schedule
= true;
6676 /* All we really want here is to re-enable device interrupts.
6677 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6679 rc
= napi
->poll(napi
, budget
);
6680 /* We can't gro_normal_list() here, because napi->poll() might have
6681 * rearmed the napi (napi_complete_done()) in which case it could
6682 * already be running on another CPU.
6684 trace_napi_poll(napi
, rc
, budget
);
6685 netpoll_poll_unlock(have_poll_lock
);
6687 __busy_poll_stop(napi
, skip_schedule
);
6691 void napi_busy_loop(unsigned int napi_id
,
6692 bool (*loop_end
)(void *, unsigned long),
6693 void *loop_end_arg
, bool prefer_busy_poll
, u16 budget
)
6695 unsigned long start_time
= loop_end
? busy_loop_current_time() : 0;
6696 int (*napi_poll
)(struct napi_struct
*napi
, int budget
);
6697 void *have_poll_lock
= NULL
;
6698 struct napi_struct
*napi
;
6705 napi
= napi_by_id(napi_id
);
6715 unsigned long val
= READ_ONCE(napi
->state
);
6717 /* If multiple threads are competing for this napi,
6718 * we avoid dirtying napi->state as much as we can.
6720 if (val
& (NAPIF_STATE_DISABLE
| NAPIF_STATE_SCHED
|
6721 NAPIF_STATE_IN_BUSY_POLL
)) {
6722 if (prefer_busy_poll
)
6723 set_bit(NAPI_STATE_PREFER_BUSY_POLL
, &napi
->state
);
6726 if (cmpxchg(&napi
->state
, val
,
6727 val
| NAPIF_STATE_IN_BUSY_POLL
|
6728 NAPIF_STATE_SCHED
) != val
) {
6729 if (prefer_busy_poll
)
6730 set_bit(NAPI_STATE_PREFER_BUSY_POLL
, &napi
->state
);
6733 have_poll_lock
= netpoll_poll_lock(napi
);
6734 napi_poll
= napi
->poll
;
6736 work
= napi_poll(napi
, budget
);
6737 trace_napi_poll(napi
, work
, budget
);
6738 gro_normal_list(napi
);
6741 __NET_ADD_STATS(dev_net(napi
->dev
),
6742 LINUX_MIB_BUSYPOLLRXPACKETS
, work
);
6745 if (!loop_end
|| loop_end(loop_end_arg
, start_time
))
6748 if (unlikely(need_resched())) {
6750 busy_poll_stop(napi
, have_poll_lock
, prefer_busy_poll
, budget
);
6754 if (loop_end(loop_end_arg
, start_time
))
6761 busy_poll_stop(napi
, have_poll_lock
, prefer_busy_poll
, budget
);
6766 EXPORT_SYMBOL(napi_busy_loop
);
6768 #endif /* CONFIG_NET_RX_BUSY_POLL */
6770 static void napi_hash_add(struct napi_struct
*napi
)
6772 if (test_bit(NAPI_STATE_NO_BUSY_POLL
, &napi
->state
))
6775 spin_lock(&napi_hash_lock
);
6777 /* 0..NR_CPUS range is reserved for sender_cpu use */
6779 if (unlikely(++napi_gen_id
< MIN_NAPI_ID
))
6780 napi_gen_id
= MIN_NAPI_ID
;
6781 } while (napi_by_id(napi_gen_id
));
6782 napi
->napi_id
= napi_gen_id
;
6784 hlist_add_head_rcu(&napi
->napi_hash_node
,
6785 &napi_hash
[napi
->napi_id
% HASH_SIZE(napi_hash
)]);
6787 spin_unlock(&napi_hash_lock
);
6790 /* Warning : caller is responsible to make sure rcu grace period
6791 * is respected before freeing memory containing @napi
6793 static void napi_hash_del(struct napi_struct
*napi
)
6795 spin_lock(&napi_hash_lock
);
6797 hlist_del_init_rcu(&napi
->napi_hash_node
);
6799 spin_unlock(&napi_hash_lock
);
6802 static enum hrtimer_restart
napi_watchdog(struct hrtimer
*timer
)
6804 struct napi_struct
*napi
;
6806 napi
= container_of(timer
, struct napi_struct
, timer
);
6808 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6809 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6811 if (!napi_disable_pending(napi
) &&
6812 !test_and_set_bit(NAPI_STATE_SCHED
, &napi
->state
)) {
6813 clear_bit(NAPI_STATE_PREFER_BUSY_POLL
, &napi
->state
);
6814 __napi_schedule_irqoff(napi
);
6817 return HRTIMER_NORESTART
;
6820 static void init_gro_hash(struct napi_struct
*napi
)
6824 for (i
= 0; i
< GRO_HASH_BUCKETS
; i
++) {
6825 INIT_LIST_HEAD(&napi
->gro_hash
[i
].list
);
6826 napi
->gro_hash
[i
].count
= 0;
6828 napi
->gro_bitmask
= 0;
6831 int dev_set_threaded(struct net_device
*dev
, bool threaded
)
6833 struct napi_struct
*napi
;
6836 if (dev
->threaded
== threaded
)
6840 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
6841 if (!napi
->thread
) {
6842 err
= napi_kthread_create(napi
);
6851 dev
->threaded
= threaded
;
6853 /* Make sure kthread is created before THREADED bit
6856 smp_mb__before_atomic();
6858 /* Setting/unsetting threaded mode on a napi might not immediately
6859 * take effect, if the current napi instance is actively being
6860 * polled. In this case, the switch between threaded mode and
6861 * softirq mode will happen in the next round of napi_schedule().
6862 * This should not cause hiccups/stalls to the live traffic.
6864 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
6866 set_bit(NAPI_STATE_THREADED
, &napi
->state
);
6868 clear_bit(NAPI_STATE_THREADED
, &napi
->state
);
6873 EXPORT_SYMBOL(dev_set_threaded
);
6875 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
6876 int (*poll
)(struct napi_struct
*, int), int weight
)
6878 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED
, &napi
->state
)))
6881 INIT_LIST_HEAD(&napi
->poll_list
);
6882 INIT_HLIST_NODE(&napi
->napi_hash_node
);
6883 hrtimer_init(&napi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
6884 napi
->timer
.function
= napi_watchdog
;
6885 init_gro_hash(napi
);
6887 INIT_LIST_HEAD(&napi
->rx_list
);
6890 if (weight
> NAPI_POLL_WEIGHT
)
6891 netdev_err_once(dev
, "%s() called with weight %d\n", __func__
,
6893 napi
->weight
= weight
;
6895 #ifdef CONFIG_NETPOLL
6896 napi
->poll_owner
= -1;
6898 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
6899 set_bit(NAPI_STATE_NPSVC
, &napi
->state
);
6900 list_add_rcu(&napi
->dev_list
, &dev
->napi_list
);
6901 napi_hash_add(napi
);
6902 /* Create kthread for this napi if dev->threaded is set.
6903 * Clear dev->threaded if kthread creation failed so that
6904 * threaded mode will not be enabled in napi_enable().
6906 if (dev
->threaded
&& napi_kthread_create(napi
))
6909 EXPORT_SYMBOL(netif_napi_add
);
6911 void napi_disable(struct napi_struct
*n
)
6914 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
6916 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
6918 while (test_and_set_bit(NAPI_STATE_NPSVC
, &n
->state
))
6921 hrtimer_cancel(&n
->timer
);
6923 clear_bit(NAPI_STATE_PREFER_BUSY_POLL
, &n
->state
);
6924 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
6925 clear_bit(NAPI_STATE_THREADED
, &n
->state
);
6927 EXPORT_SYMBOL(napi_disable
);
6930 * napi_enable - enable NAPI scheduling
6933 * Resume NAPI from being scheduled on this context.
6934 * Must be paired with napi_disable.
6936 void napi_enable(struct napi_struct
*n
)
6938 unsigned long val
, new;
6941 val
= READ_ONCE(n
->state
);
6942 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &val
));
6944 new = val
& ~(NAPIF_STATE_SCHED
| NAPIF_STATE_NPSVC
);
6945 if (n
->dev
->threaded
&& n
->thread
)
6946 new |= NAPIF_STATE_THREADED
;
6947 } while (cmpxchg(&n
->state
, val
, new) != val
);
6949 EXPORT_SYMBOL(napi_enable
);
6951 static void flush_gro_hash(struct napi_struct
*napi
)
6955 for (i
= 0; i
< GRO_HASH_BUCKETS
; i
++) {
6956 struct sk_buff
*skb
, *n
;
6958 list_for_each_entry_safe(skb
, n
, &napi
->gro_hash
[i
].list
, list
)
6960 napi
->gro_hash
[i
].count
= 0;
6964 /* Must be called in process context */
6965 void __netif_napi_del(struct napi_struct
*napi
)
6967 if (!test_and_clear_bit(NAPI_STATE_LISTED
, &napi
->state
))
6970 napi_hash_del(napi
);
6971 list_del_rcu(&napi
->dev_list
);
6972 napi_free_frags(napi
);
6974 flush_gro_hash(napi
);
6975 napi
->gro_bitmask
= 0;
6978 kthread_stop(napi
->thread
);
6979 napi
->thread
= NULL
;
6982 EXPORT_SYMBOL(__netif_napi_del
);
6984 static int __napi_poll(struct napi_struct
*n
, bool *repoll
)
6990 /* This NAPI_STATE_SCHED test is for avoiding a race
6991 * with netpoll's poll_napi(). Only the entity which
6992 * obtains the lock and sees NAPI_STATE_SCHED set will
6993 * actually make the ->poll() call. Therefore we avoid
6994 * accidentally calling ->poll() when NAPI is not scheduled.
6997 if (test_bit(NAPI_STATE_SCHED
, &n
->state
)) {
6998 work
= n
->poll(n
, weight
);
6999 trace_napi_poll(n
, work
, weight
);
7002 if (unlikely(work
> weight
))
7003 pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
7004 n
->poll
, work
, weight
);
7006 if (likely(work
< weight
))
7009 /* Drivers must not modify the NAPI state if they
7010 * consume the entire weight. In such cases this code
7011 * still "owns" the NAPI instance and therefore can
7012 * move the instance around on the list at-will.
7014 if (unlikely(napi_disable_pending(n
))) {
7019 /* The NAPI context has more processing work, but busy-polling
7020 * is preferred. Exit early.
7022 if (napi_prefer_busy_poll(n
)) {
7023 if (napi_complete_done(n
, work
)) {
7024 /* If timeout is not set, we need to make sure
7025 * that the NAPI is re-scheduled.
7032 if (n
->gro_bitmask
) {
7033 /* flush too old packets
7034 * If HZ < 1000, flush all packets.
7036 napi_gro_flush(n
, HZ
>= 1000);
7041 /* Some drivers may have called napi_schedule
7042 * prior to exhausting their budget.
7044 if (unlikely(!list_empty(&n
->poll_list
))) {
7045 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
7046 n
->dev
? n
->dev
->name
: "backlog");
7055 static int napi_poll(struct napi_struct
*n
, struct list_head
*repoll
)
7057 bool do_repoll
= false;
7061 list_del_init(&n
->poll_list
);
7063 have
= netpoll_poll_lock(n
);
7065 work
= __napi_poll(n
, &do_repoll
);
7068 list_add_tail(&n
->poll_list
, repoll
);
7070 netpoll_poll_unlock(have
);
7075 static int napi_thread_wait(struct napi_struct
*napi
)
7079 set_current_state(TASK_INTERRUPTIBLE
);
7081 while (!kthread_should_stop()) {
7082 /* Testing SCHED_THREADED bit here to make sure the current
7083 * kthread owns this napi and could poll on this napi.
7084 * Testing SCHED bit is not enough because SCHED bit might be
7085 * set by some other busy poll thread or by napi_disable().
7087 if (test_bit(NAPI_STATE_SCHED_THREADED
, &napi
->state
) || woken
) {
7088 WARN_ON(!list_empty(&napi
->poll_list
));
7089 __set_current_state(TASK_RUNNING
);
7094 /* woken being true indicates this thread owns this napi. */
7096 set_current_state(TASK_INTERRUPTIBLE
);
7098 __set_current_state(TASK_RUNNING
);
7103 static int napi_threaded_poll(void *data
)
7105 struct napi_struct
*napi
= data
;
7108 while (!napi_thread_wait(napi
)) {
7110 bool repoll
= false;
7114 have
= netpoll_poll_lock(napi
);
7115 __napi_poll(napi
, &repoll
);
7116 netpoll_poll_unlock(have
);
7129 static __latent_entropy
void net_rx_action(struct softirq_action
*h
)
7131 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
7132 unsigned long time_limit
= jiffies
+
7133 usecs_to_jiffies(netdev_budget_usecs
);
7134 int budget
= netdev_budget
;
7138 local_irq_disable();
7139 list_splice_init(&sd
->poll_list
, &list
);
7143 struct napi_struct
*n
;
7145 if (list_empty(&list
)) {
7146 if (!sd_has_rps_ipi_waiting(sd
) && list_empty(&repoll
))
7151 n
= list_first_entry(&list
, struct napi_struct
, poll_list
);
7152 budget
-= napi_poll(n
, &repoll
);
7154 /* If softirq window is exhausted then punt.
7155 * Allow this to run for 2 jiffies since which will allow
7156 * an average latency of 1.5/HZ.
7158 if (unlikely(budget
<= 0 ||
7159 time_after_eq(jiffies
, time_limit
))) {
7165 local_irq_disable();
7167 list_splice_tail_init(&sd
->poll_list
, &list
);
7168 list_splice_tail(&repoll
, &list
);
7169 list_splice(&list
, &sd
->poll_list
);
7170 if (!list_empty(&sd
->poll_list
))
7171 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
7173 net_rps_action_and_irq_enable(sd
);
7176 struct netdev_adjacent
{
7177 struct net_device
*dev
;
7179 /* upper master flag, there can only be one master device per list */
7182 /* lookup ignore flag */
7185 /* counter for the number of times this device was added to us */
7188 /* private field for the users */
7191 struct list_head list
;
7192 struct rcu_head rcu
;
7195 static struct netdev_adjacent
*__netdev_find_adj(struct net_device
*adj_dev
,
7196 struct list_head
*adj_list
)
7198 struct netdev_adjacent
*adj
;
7200 list_for_each_entry(adj
, adj_list
, list
) {
7201 if (adj
->dev
== adj_dev
)
7207 static int ____netdev_has_upper_dev(struct net_device
*upper_dev
,
7208 struct netdev_nested_priv
*priv
)
7210 struct net_device
*dev
= (struct net_device
*)priv
->data
;
7212 return upper_dev
== dev
;
7216 * netdev_has_upper_dev - Check if device is linked to an upper device
7218 * @upper_dev: upper device to check
7220 * Find out if a device is linked to specified upper device and return true
7221 * in case it is. Note that this checks only immediate upper device,
7222 * not through a complete stack of devices. The caller must hold the RTNL lock.
7224 bool netdev_has_upper_dev(struct net_device
*dev
,
7225 struct net_device
*upper_dev
)
7227 struct netdev_nested_priv priv
= {
7228 .data
= (void *)upper_dev
,
7233 return netdev_walk_all_upper_dev_rcu(dev
, ____netdev_has_upper_dev
,
7236 EXPORT_SYMBOL(netdev_has_upper_dev
);
7239 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
7241 * @upper_dev: upper device to check
7243 * Find out if a device is linked to specified upper device and return true
7244 * in case it is. Note that this checks the entire upper device chain.
7245 * The caller must hold rcu lock.
7248 bool netdev_has_upper_dev_all_rcu(struct net_device
*dev
,
7249 struct net_device
*upper_dev
)
7251 struct netdev_nested_priv priv
= {
7252 .data
= (void *)upper_dev
,
7255 return !!netdev_walk_all_upper_dev_rcu(dev
, ____netdev_has_upper_dev
,
7258 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu
);
7261 * netdev_has_any_upper_dev - Check if device is linked to some device
7264 * Find out if a device is linked to an upper device and return true in case
7265 * it is. The caller must hold the RTNL lock.
7267 bool netdev_has_any_upper_dev(struct net_device
*dev
)
7271 return !list_empty(&dev
->adj_list
.upper
);
7273 EXPORT_SYMBOL(netdev_has_any_upper_dev
);
7276 * netdev_master_upper_dev_get - Get master upper device
7279 * Find a master upper device and return pointer to it or NULL in case
7280 * it's not there. The caller must hold the RTNL lock.
7282 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
)
7284 struct netdev_adjacent
*upper
;
7288 if (list_empty(&dev
->adj_list
.upper
))
7291 upper
= list_first_entry(&dev
->adj_list
.upper
,
7292 struct netdev_adjacent
, list
);
7293 if (likely(upper
->master
))
7297 EXPORT_SYMBOL(netdev_master_upper_dev_get
);
7299 static struct net_device
*__netdev_master_upper_dev_get(struct net_device
*dev
)
7301 struct netdev_adjacent
*upper
;
7305 if (list_empty(&dev
->adj_list
.upper
))
7308 upper
= list_first_entry(&dev
->adj_list
.upper
,
7309 struct netdev_adjacent
, list
);
7310 if (likely(upper
->master
) && !upper
->ignore
)
7316 * netdev_has_any_lower_dev - Check if device is linked to some device
7319 * Find out if a device is linked to a lower device and return true in case
7320 * it is. The caller must hold the RTNL lock.
7322 static bool netdev_has_any_lower_dev(struct net_device
*dev
)
7326 return !list_empty(&dev
->adj_list
.lower
);
7329 void *netdev_adjacent_get_private(struct list_head
*adj_list
)
7331 struct netdev_adjacent
*adj
;
7333 adj
= list_entry(adj_list
, struct netdev_adjacent
, list
);
7335 return adj
->private;
7337 EXPORT_SYMBOL(netdev_adjacent_get_private
);
7340 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
7342 * @iter: list_head ** of the current position
7344 * Gets the next device from the dev's upper list, starting from iter
7345 * position. The caller must hold RCU read lock.
7347 struct net_device
*netdev_upper_get_next_dev_rcu(struct net_device
*dev
,
7348 struct list_head
**iter
)
7350 struct netdev_adjacent
*upper
;
7352 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7354 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
7356 if (&upper
->list
== &dev
->adj_list
.upper
)
7359 *iter
= &upper
->list
;
7363 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu
);
7365 static struct net_device
*__netdev_next_upper_dev(struct net_device
*dev
,
7366 struct list_head
**iter
,
7369 struct netdev_adjacent
*upper
;
7371 upper
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
7373 if (&upper
->list
== &dev
->adj_list
.upper
)
7376 *iter
= &upper
->list
;
7377 *ignore
= upper
->ignore
;
7382 static struct net_device
*netdev_next_upper_dev_rcu(struct net_device
*dev
,
7383 struct list_head
**iter
)
7385 struct netdev_adjacent
*upper
;
7387 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7389 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
7391 if (&upper
->list
== &dev
->adj_list
.upper
)
7394 *iter
= &upper
->list
;
7399 static int __netdev_walk_all_upper_dev(struct net_device
*dev
,
7400 int (*fn
)(struct net_device
*dev
,
7401 struct netdev_nested_priv
*priv
),
7402 struct netdev_nested_priv
*priv
)
7404 struct net_device
*udev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7405 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7410 iter
= &dev
->adj_list
.upper
;
7414 ret
= fn(now
, priv
);
7421 udev
= __netdev_next_upper_dev(now
, &iter
, &ignore
);
7428 niter
= &udev
->adj_list
.upper
;
7429 dev_stack
[cur
] = now
;
7430 iter_stack
[cur
++] = iter
;
7437 next
= dev_stack
[--cur
];
7438 niter
= iter_stack
[cur
];
7448 int netdev_walk_all_upper_dev_rcu(struct net_device
*dev
,
7449 int (*fn
)(struct net_device
*dev
,
7450 struct netdev_nested_priv
*priv
),
7451 struct netdev_nested_priv
*priv
)
7453 struct net_device
*udev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7454 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7458 iter
= &dev
->adj_list
.upper
;
7462 ret
= fn(now
, priv
);
7469 udev
= netdev_next_upper_dev_rcu(now
, &iter
);
7474 niter
= &udev
->adj_list
.upper
;
7475 dev_stack
[cur
] = now
;
7476 iter_stack
[cur
++] = iter
;
7483 next
= dev_stack
[--cur
];
7484 niter
= iter_stack
[cur
];
7493 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu
);
7495 static bool __netdev_has_upper_dev(struct net_device
*dev
,
7496 struct net_device
*upper_dev
)
7498 struct netdev_nested_priv priv
= {
7500 .data
= (void *)upper_dev
,
7505 return __netdev_walk_all_upper_dev(dev
, ____netdev_has_upper_dev
,
7510 * netdev_lower_get_next_private - Get the next ->private from the
7511 * lower neighbour list
7513 * @iter: list_head ** of the current position
7515 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7516 * list, starting from iter position. The caller must hold either hold the
7517 * RTNL lock or its own locking that guarantees that the neighbour lower
7518 * list will remain unchanged.
7520 void *netdev_lower_get_next_private(struct net_device
*dev
,
7521 struct list_head
**iter
)
7523 struct netdev_adjacent
*lower
;
7525 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
7527 if (&lower
->list
== &dev
->adj_list
.lower
)
7530 *iter
= lower
->list
.next
;
7532 return lower
->private;
7534 EXPORT_SYMBOL(netdev_lower_get_next_private
);
7537 * netdev_lower_get_next_private_rcu - Get the next ->private from the
7538 * lower neighbour list, RCU
7541 * @iter: list_head ** of the current position
7543 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7544 * list, starting from iter position. The caller must hold RCU read lock.
7546 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
7547 struct list_head
**iter
)
7549 struct netdev_adjacent
*lower
;
7551 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
7553 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
7555 if (&lower
->list
== &dev
->adj_list
.lower
)
7558 *iter
= &lower
->list
;
7560 return lower
->private;
7562 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu
);
7565 * netdev_lower_get_next - Get the next device from the lower neighbour
7568 * @iter: list_head ** of the current position
7570 * Gets the next netdev_adjacent from the dev's lower neighbour
7571 * list, starting from iter position. The caller must hold RTNL lock or
7572 * its own locking that guarantees that the neighbour lower
7573 * list will remain unchanged.
7575 void *netdev_lower_get_next(struct net_device
*dev
, struct list_head
**iter
)
7577 struct netdev_adjacent
*lower
;
7579 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
7581 if (&lower
->list
== &dev
->adj_list
.lower
)
7584 *iter
= lower
->list
.next
;
7588 EXPORT_SYMBOL(netdev_lower_get_next
);
7590 static struct net_device
*netdev_next_lower_dev(struct net_device
*dev
,
7591 struct list_head
**iter
)
7593 struct netdev_adjacent
*lower
;
7595 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
7597 if (&lower
->list
== &dev
->adj_list
.lower
)
7600 *iter
= &lower
->list
;
7605 static struct net_device
*__netdev_next_lower_dev(struct net_device
*dev
,
7606 struct list_head
**iter
,
7609 struct netdev_adjacent
*lower
;
7611 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
7613 if (&lower
->list
== &dev
->adj_list
.lower
)
7616 *iter
= &lower
->list
;
7617 *ignore
= lower
->ignore
;
7622 int netdev_walk_all_lower_dev(struct net_device
*dev
,
7623 int (*fn
)(struct net_device
*dev
,
7624 struct netdev_nested_priv
*priv
),
7625 struct netdev_nested_priv
*priv
)
7627 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7628 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7632 iter
= &dev
->adj_list
.lower
;
7636 ret
= fn(now
, priv
);
7643 ldev
= netdev_next_lower_dev(now
, &iter
);
7648 niter
= &ldev
->adj_list
.lower
;
7649 dev_stack
[cur
] = now
;
7650 iter_stack
[cur
++] = iter
;
7657 next
= dev_stack
[--cur
];
7658 niter
= iter_stack
[cur
];
7667 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev
);
7669 static int __netdev_walk_all_lower_dev(struct net_device
*dev
,
7670 int (*fn
)(struct net_device
*dev
,
7671 struct netdev_nested_priv
*priv
),
7672 struct netdev_nested_priv
*priv
)
7674 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7675 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7680 iter
= &dev
->adj_list
.lower
;
7684 ret
= fn(now
, priv
);
7691 ldev
= __netdev_next_lower_dev(now
, &iter
, &ignore
);
7698 niter
= &ldev
->adj_list
.lower
;
7699 dev_stack
[cur
] = now
;
7700 iter_stack
[cur
++] = iter
;
7707 next
= dev_stack
[--cur
];
7708 niter
= iter_stack
[cur
];
7718 struct net_device
*netdev_next_lower_dev_rcu(struct net_device
*dev
,
7719 struct list_head
**iter
)
7721 struct netdev_adjacent
*lower
;
7723 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
7724 if (&lower
->list
== &dev
->adj_list
.lower
)
7727 *iter
= &lower
->list
;
7731 EXPORT_SYMBOL(netdev_next_lower_dev_rcu
);
7733 static u8
__netdev_upper_depth(struct net_device
*dev
)
7735 struct net_device
*udev
;
7736 struct list_head
*iter
;
7740 for (iter
= &dev
->adj_list
.upper
,
7741 udev
= __netdev_next_upper_dev(dev
, &iter
, &ignore
);
7743 udev
= __netdev_next_upper_dev(dev
, &iter
, &ignore
)) {
7746 if (max_depth
< udev
->upper_level
)
7747 max_depth
= udev
->upper_level
;
7753 static u8
__netdev_lower_depth(struct net_device
*dev
)
7755 struct net_device
*ldev
;
7756 struct list_head
*iter
;
7760 for (iter
= &dev
->adj_list
.lower
,
7761 ldev
= __netdev_next_lower_dev(dev
, &iter
, &ignore
);
7763 ldev
= __netdev_next_lower_dev(dev
, &iter
, &ignore
)) {
7766 if (max_depth
< ldev
->lower_level
)
7767 max_depth
= ldev
->lower_level
;
7773 static int __netdev_update_upper_level(struct net_device
*dev
,
7774 struct netdev_nested_priv
*__unused
)
7776 dev
->upper_level
= __netdev_upper_depth(dev
) + 1;
7780 static int __netdev_update_lower_level(struct net_device
*dev
,
7781 struct netdev_nested_priv
*priv
)
7783 dev
->lower_level
= __netdev_lower_depth(dev
) + 1;
7785 #ifdef CONFIG_LOCKDEP
7789 if (priv
->flags
& NESTED_SYNC_IMM
)
7790 dev
->nested_level
= dev
->lower_level
- 1;
7791 if (priv
->flags
& NESTED_SYNC_TODO
)
7792 net_unlink_todo(dev
);
7797 int netdev_walk_all_lower_dev_rcu(struct net_device
*dev
,
7798 int (*fn
)(struct net_device
*dev
,
7799 struct netdev_nested_priv
*priv
),
7800 struct netdev_nested_priv
*priv
)
7802 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7803 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7807 iter
= &dev
->adj_list
.lower
;
7811 ret
= fn(now
, priv
);
7818 ldev
= netdev_next_lower_dev_rcu(now
, &iter
);
7823 niter
= &ldev
->adj_list
.lower
;
7824 dev_stack
[cur
] = now
;
7825 iter_stack
[cur
++] = iter
;
7832 next
= dev_stack
[--cur
];
7833 niter
= iter_stack
[cur
];
7842 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu
);
7845 * netdev_lower_get_first_private_rcu - Get the first ->private from the
7846 * lower neighbour list, RCU
7850 * Gets the first netdev_adjacent->private from the dev's lower neighbour
7851 * list. The caller must hold RCU read lock.
7853 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
)
7855 struct netdev_adjacent
*lower
;
7857 lower
= list_first_or_null_rcu(&dev
->adj_list
.lower
,
7858 struct netdev_adjacent
, list
);
7860 return lower
->private;
7863 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu
);
7866 * netdev_master_upper_dev_get_rcu - Get master upper device
7869 * Find a master upper device and return pointer to it or NULL in case
7870 * it's not there. The caller must hold the RCU read lock.
7872 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
)
7874 struct netdev_adjacent
*upper
;
7876 upper
= list_first_or_null_rcu(&dev
->adj_list
.upper
,
7877 struct netdev_adjacent
, list
);
7878 if (upper
&& likely(upper
->master
))
7882 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu
);
7884 static int netdev_adjacent_sysfs_add(struct net_device
*dev
,
7885 struct net_device
*adj_dev
,
7886 struct list_head
*dev_list
)
7888 char linkname
[IFNAMSIZ
+7];
7890 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
7891 "upper_%s" : "lower_%s", adj_dev
->name
);
7892 return sysfs_create_link(&(dev
->dev
.kobj
), &(adj_dev
->dev
.kobj
),
7895 static void netdev_adjacent_sysfs_del(struct net_device
*dev
,
7897 struct list_head
*dev_list
)
7899 char linkname
[IFNAMSIZ
+7];
7901 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
7902 "upper_%s" : "lower_%s", name
);
7903 sysfs_remove_link(&(dev
->dev
.kobj
), linkname
);
7906 static inline bool netdev_adjacent_is_neigh_list(struct net_device
*dev
,
7907 struct net_device
*adj_dev
,
7908 struct list_head
*dev_list
)
7910 return (dev_list
== &dev
->adj_list
.upper
||
7911 dev_list
== &dev
->adj_list
.lower
) &&
7912 net_eq(dev_net(dev
), dev_net(adj_dev
));
7915 static int __netdev_adjacent_dev_insert(struct net_device
*dev
,
7916 struct net_device
*adj_dev
,
7917 struct list_head
*dev_list
,
7918 void *private, bool master
)
7920 struct netdev_adjacent
*adj
;
7923 adj
= __netdev_find_adj(adj_dev
, dev_list
);
7927 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7928 dev
->name
, adj_dev
->name
, adj
->ref_nr
);
7933 adj
= kmalloc(sizeof(*adj
), GFP_KERNEL
);
7938 adj
->master
= master
;
7940 adj
->private = private;
7941 adj
->ignore
= false;
7944 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7945 dev
->name
, adj_dev
->name
, adj
->ref_nr
, adj_dev
->name
);
7947 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
)) {
7948 ret
= netdev_adjacent_sysfs_add(dev
, adj_dev
, dev_list
);
7953 /* Ensure that master link is always the first item in list. */
7955 ret
= sysfs_create_link(&(dev
->dev
.kobj
),
7956 &(adj_dev
->dev
.kobj
), "master");
7958 goto remove_symlinks
;
7960 list_add_rcu(&adj
->list
, dev_list
);
7962 list_add_tail_rcu(&adj
->list
, dev_list
);
7968 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
7969 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
7977 static void __netdev_adjacent_dev_remove(struct net_device
*dev
,
7978 struct net_device
*adj_dev
,
7980 struct list_head
*dev_list
)
7982 struct netdev_adjacent
*adj
;
7984 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7985 dev
->name
, adj_dev
->name
, ref_nr
);
7987 adj
= __netdev_find_adj(adj_dev
, dev_list
);
7990 pr_err("Adjacency does not exist for device %s from %s\n",
7991 dev
->name
, adj_dev
->name
);
7996 if (adj
->ref_nr
> ref_nr
) {
7997 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7998 dev
->name
, adj_dev
->name
, ref_nr
,
7999 adj
->ref_nr
- ref_nr
);
8000 adj
->ref_nr
-= ref_nr
;
8005 sysfs_remove_link(&(dev
->dev
.kobj
), "master");
8007 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
8008 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
8010 list_del_rcu(&adj
->list
);
8011 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
8012 adj_dev
->name
, dev
->name
, adj_dev
->name
);
8014 kfree_rcu(adj
, rcu
);
8017 static int __netdev_adjacent_dev_link_lists(struct net_device
*dev
,
8018 struct net_device
*upper_dev
,
8019 struct list_head
*up_list
,
8020 struct list_head
*down_list
,
8021 void *private, bool master
)
8025 ret
= __netdev_adjacent_dev_insert(dev
, upper_dev
, up_list
,
8030 ret
= __netdev_adjacent_dev_insert(upper_dev
, dev
, down_list
,
8033 __netdev_adjacent_dev_remove(dev
, upper_dev
, 1, up_list
);
8040 static void __netdev_adjacent_dev_unlink_lists(struct net_device
*dev
,
8041 struct net_device
*upper_dev
,
8043 struct list_head
*up_list
,
8044 struct list_head
*down_list
)
8046 __netdev_adjacent_dev_remove(dev
, upper_dev
, ref_nr
, up_list
);
8047 __netdev_adjacent_dev_remove(upper_dev
, dev
, ref_nr
, down_list
);
8050 static int __netdev_adjacent_dev_link_neighbour(struct net_device
*dev
,
8051 struct net_device
*upper_dev
,
8052 void *private, bool master
)
8054 return __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
8055 &dev
->adj_list
.upper
,
8056 &upper_dev
->adj_list
.lower
,
8060 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device
*dev
,
8061 struct net_device
*upper_dev
)
8063 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
, 1,
8064 &dev
->adj_list
.upper
,
8065 &upper_dev
->adj_list
.lower
);
8068 static int __netdev_upper_dev_link(struct net_device
*dev
,
8069 struct net_device
*upper_dev
, bool master
,
8070 void *upper_priv
, void *upper_info
,
8071 struct netdev_nested_priv
*priv
,
8072 struct netlink_ext_ack
*extack
)
8074 struct netdev_notifier_changeupper_info changeupper_info
= {
8079 .upper_dev
= upper_dev
,
8082 .upper_info
= upper_info
,
8084 struct net_device
*master_dev
;
8089 if (dev
== upper_dev
)
8092 /* To prevent loops, check if dev is not upper device to upper_dev. */
8093 if (__netdev_has_upper_dev(upper_dev
, dev
))
8096 if ((dev
->lower_level
+ upper_dev
->upper_level
) > MAX_NEST_DEV
)
8100 if (__netdev_has_upper_dev(dev
, upper_dev
))
8103 master_dev
= __netdev_master_upper_dev_get(dev
);
8105 return master_dev
== upper_dev
? -EEXIST
: -EBUSY
;
8108 ret
= call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
8109 &changeupper_info
.info
);
8110 ret
= notifier_to_errno(ret
);
8114 ret
= __netdev_adjacent_dev_link_neighbour(dev
, upper_dev
, upper_priv
,
8119 ret
= call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
8120 &changeupper_info
.info
);
8121 ret
= notifier_to_errno(ret
);
8125 __netdev_update_upper_level(dev
, NULL
);
8126 __netdev_walk_all_lower_dev(dev
, __netdev_update_upper_level
, NULL
);
8128 __netdev_update_lower_level(upper_dev
, priv
);
8129 __netdev_walk_all_upper_dev(upper_dev
, __netdev_update_lower_level
,
8135 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
8141 * netdev_upper_dev_link - Add a link to the upper device
8143 * @upper_dev: new upper device
8144 * @extack: netlink extended ack
8146 * Adds a link to device which is upper to this one. The caller must hold
8147 * the RTNL lock. On a failure a negative errno code is returned.
8148 * On success the reference counts are adjusted and the function
8151 int netdev_upper_dev_link(struct net_device
*dev
,
8152 struct net_device
*upper_dev
,
8153 struct netlink_ext_ack
*extack
)
8155 struct netdev_nested_priv priv
= {
8156 .flags
= NESTED_SYNC_IMM
| NESTED_SYNC_TODO
,
8160 return __netdev_upper_dev_link(dev
, upper_dev
, false,
8161 NULL
, NULL
, &priv
, extack
);
8163 EXPORT_SYMBOL(netdev_upper_dev_link
);
8166 * netdev_master_upper_dev_link - Add a master link to the upper device
8168 * @upper_dev: new upper device
8169 * @upper_priv: upper device private
8170 * @upper_info: upper info to be passed down via notifier
8171 * @extack: netlink extended ack
8173 * Adds a link to device which is upper to this one. In this case, only
8174 * one master upper device can be linked, although other non-master devices
8175 * might be linked as well. The caller must hold the RTNL lock.
8176 * On a failure a negative errno code is returned. On success the reference
8177 * counts are adjusted and the function returns zero.
8179 int netdev_master_upper_dev_link(struct net_device
*dev
,
8180 struct net_device
*upper_dev
,
8181 void *upper_priv
, void *upper_info
,
8182 struct netlink_ext_ack
*extack
)
8184 struct netdev_nested_priv priv
= {
8185 .flags
= NESTED_SYNC_IMM
| NESTED_SYNC_TODO
,
8189 return __netdev_upper_dev_link(dev
, upper_dev
, true,
8190 upper_priv
, upper_info
, &priv
, extack
);
8192 EXPORT_SYMBOL(netdev_master_upper_dev_link
);
8194 static void __netdev_upper_dev_unlink(struct net_device
*dev
,
8195 struct net_device
*upper_dev
,
8196 struct netdev_nested_priv
*priv
)
8198 struct netdev_notifier_changeupper_info changeupper_info
= {
8202 .upper_dev
= upper_dev
,
8208 changeupper_info
.master
= netdev_master_upper_dev_get(dev
) == upper_dev
;
8210 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
8211 &changeupper_info
.info
);
8213 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
8215 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
8216 &changeupper_info
.info
);
8218 __netdev_update_upper_level(dev
, NULL
);
8219 __netdev_walk_all_lower_dev(dev
, __netdev_update_upper_level
, NULL
);
8221 __netdev_update_lower_level(upper_dev
, priv
);
8222 __netdev_walk_all_upper_dev(upper_dev
, __netdev_update_lower_level
,
8227 * netdev_upper_dev_unlink - Removes a link to upper device
8229 * @upper_dev: new upper device
8231 * Removes a link to device which is upper to this one. The caller must hold
8234 void netdev_upper_dev_unlink(struct net_device
*dev
,
8235 struct net_device
*upper_dev
)
8237 struct netdev_nested_priv priv
= {
8238 .flags
= NESTED_SYNC_TODO
,
8242 __netdev_upper_dev_unlink(dev
, upper_dev
, &priv
);
8244 EXPORT_SYMBOL(netdev_upper_dev_unlink
);
8246 static void __netdev_adjacent_dev_set(struct net_device
*upper_dev
,
8247 struct net_device
*lower_dev
,
8250 struct netdev_adjacent
*adj
;
8252 adj
= __netdev_find_adj(lower_dev
, &upper_dev
->adj_list
.lower
);
8256 adj
= __netdev_find_adj(upper_dev
, &lower_dev
->adj_list
.upper
);
8261 static void netdev_adjacent_dev_disable(struct net_device
*upper_dev
,
8262 struct net_device
*lower_dev
)
8264 __netdev_adjacent_dev_set(upper_dev
, lower_dev
, true);
8267 static void netdev_adjacent_dev_enable(struct net_device
*upper_dev
,
8268 struct net_device
*lower_dev
)
8270 __netdev_adjacent_dev_set(upper_dev
, lower_dev
, false);
8273 int netdev_adjacent_change_prepare(struct net_device
*old_dev
,
8274 struct net_device
*new_dev
,
8275 struct net_device
*dev
,
8276 struct netlink_ext_ack
*extack
)
8278 struct netdev_nested_priv priv
= {
8287 if (old_dev
&& new_dev
!= old_dev
)
8288 netdev_adjacent_dev_disable(dev
, old_dev
);
8289 err
= __netdev_upper_dev_link(new_dev
, dev
, false, NULL
, NULL
, &priv
,
8292 if (old_dev
&& new_dev
!= old_dev
)
8293 netdev_adjacent_dev_enable(dev
, old_dev
);
8299 EXPORT_SYMBOL(netdev_adjacent_change_prepare
);
8301 void netdev_adjacent_change_commit(struct net_device
*old_dev
,
8302 struct net_device
*new_dev
,
8303 struct net_device
*dev
)
8305 struct netdev_nested_priv priv
= {
8306 .flags
= NESTED_SYNC_IMM
| NESTED_SYNC_TODO
,
8310 if (!new_dev
|| !old_dev
)
8313 if (new_dev
== old_dev
)
8316 netdev_adjacent_dev_enable(dev
, old_dev
);
8317 __netdev_upper_dev_unlink(old_dev
, dev
, &priv
);
8319 EXPORT_SYMBOL(netdev_adjacent_change_commit
);
8321 void netdev_adjacent_change_abort(struct net_device
*old_dev
,
8322 struct net_device
*new_dev
,
8323 struct net_device
*dev
)
8325 struct netdev_nested_priv priv
= {
8333 if (old_dev
&& new_dev
!= old_dev
)
8334 netdev_adjacent_dev_enable(dev
, old_dev
);
8336 __netdev_upper_dev_unlink(new_dev
, dev
, &priv
);
8338 EXPORT_SYMBOL(netdev_adjacent_change_abort
);
8341 * netdev_bonding_info_change - Dispatch event about slave change
8343 * @bonding_info: info to dispatch
8345 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
8346 * The caller must hold the RTNL lock.
8348 void netdev_bonding_info_change(struct net_device
*dev
,
8349 struct netdev_bonding_info
*bonding_info
)
8351 struct netdev_notifier_bonding_info info
= {
8355 memcpy(&info
.bonding_info
, bonding_info
,
8356 sizeof(struct netdev_bonding_info
));
8357 call_netdevice_notifiers_info(NETDEV_BONDING_INFO
,
8360 EXPORT_SYMBOL(netdev_bonding_info_change
);
8363 * netdev_get_xmit_slave - Get the xmit slave of master device
8366 * @all_slaves: assume all the slaves are active
8368 * The reference counters are not incremented so the caller must be
8369 * careful with locks. The caller must hold RCU lock.
8370 * %NULL is returned if no slave is found.
8373 struct net_device
*netdev_get_xmit_slave(struct net_device
*dev
,
8374 struct sk_buff
*skb
,
8377 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8379 if (!ops
->ndo_get_xmit_slave
)
8381 return ops
->ndo_get_xmit_slave(dev
, skb
, all_slaves
);
8383 EXPORT_SYMBOL(netdev_get_xmit_slave
);
8385 static struct net_device
*netdev_sk_get_lower_dev(struct net_device
*dev
,
8388 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8390 if (!ops
->ndo_sk_get_lower_dev
)
8392 return ops
->ndo_sk_get_lower_dev(dev
, sk
);
8396 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8400 * %NULL is returned if no lower device is found.
8403 struct net_device
*netdev_sk_get_lowest_dev(struct net_device
*dev
,
8406 struct net_device
*lower
;
8408 lower
= netdev_sk_get_lower_dev(dev
, sk
);
8411 lower
= netdev_sk_get_lower_dev(dev
, sk
);
8416 EXPORT_SYMBOL(netdev_sk_get_lowest_dev
);
8418 static void netdev_adjacent_add_links(struct net_device
*dev
)
8420 struct netdev_adjacent
*iter
;
8422 struct net
*net
= dev_net(dev
);
8424 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
8425 if (!net_eq(net
, dev_net(iter
->dev
)))
8427 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8428 &iter
->dev
->adj_list
.lower
);
8429 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
8430 &dev
->adj_list
.upper
);
8433 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
8434 if (!net_eq(net
, dev_net(iter
->dev
)))
8436 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8437 &iter
->dev
->adj_list
.upper
);
8438 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
8439 &dev
->adj_list
.lower
);
8443 static void netdev_adjacent_del_links(struct net_device
*dev
)
8445 struct netdev_adjacent
*iter
;
8447 struct net
*net
= dev_net(dev
);
8449 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
8450 if (!net_eq(net
, dev_net(iter
->dev
)))
8452 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
8453 &iter
->dev
->adj_list
.lower
);
8454 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
8455 &dev
->adj_list
.upper
);
8458 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
8459 if (!net_eq(net
, dev_net(iter
->dev
)))
8461 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
8462 &iter
->dev
->adj_list
.upper
);
8463 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
8464 &dev
->adj_list
.lower
);
8468 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
)
8470 struct netdev_adjacent
*iter
;
8472 struct net
*net
= dev_net(dev
);
8474 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
8475 if (!net_eq(net
, dev_net(iter
->dev
)))
8477 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
8478 &iter
->dev
->adj_list
.lower
);
8479 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8480 &iter
->dev
->adj_list
.lower
);
8483 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
8484 if (!net_eq(net
, dev_net(iter
->dev
)))
8486 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
8487 &iter
->dev
->adj_list
.upper
);
8488 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8489 &iter
->dev
->adj_list
.upper
);
8493 void *netdev_lower_dev_get_private(struct net_device
*dev
,
8494 struct net_device
*lower_dev
)
8496 struct netdev_adjacent
*lower
;
8500 lower
= __netdev_find_adj(lower_dev
, &dev
->adj_list
.lower
);
8504 return lower
->private;
8506 EXPORT_SYMBOL(netdev_lower_dev_get_private
);
8510 * netdev_lower_state_changed - Dispatch event about lower device state change
8511 * @lower_dev: device
8512 * @lower_state_info: state to dispatch
8514 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8515 * The caller must hold the RTNL lock.
8517 void netdev_lower_state_changed(struct net_device
*lower_dev
,
8518 void *lower_state_info
)
8520 struct netdev_notifier_changelowerstate_info changelowerstate_info
= {
8521 .info
.dev
= lower_dev
,
8525 changelowerstate_info
.lower_state_info
= lower_state_info
;
8526 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE
,
8527 &changelowerstate_info
.info
);
8529 EXPORT_SYMBOL(netdev_lower_state_changed
);
8531 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
8533 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8535 if (ops
->ndo_change_rx_flags
)
8536 ops
->ndo_change_rx_flags(dev
, flags
);
8539 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
, bool notify
)
8541 unsigned int old_flags
= dev
->flags
;
8547 dev
->flags
|= IFF_PROMISC
;
8548 dev
->promiscuity
+= inc
;
8549 if (dev
->promiscuity
== 0) {
8552 * If inc causes overflow, untouch promisc and return error.
8555 dev
->flags
&= ~IFF_PROMISC
;
8557 dev
->promiscuity
-= inc
;
8558 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
8563 if (dev
->flags
!= old_flags
) {
8564 pr_info("device %s %s promiscuous mode\n",
8566 dev
->flags
& IFF_PROMISC
? "entered" : "left");
8567 if (audit_enabled
) {
8568 current_uid_gid(&uid
, &gid
);
8569 audit_log(audit_context(), GFP_ATOMIC
,
8570 AUDIT_ANOM_PROMISCUOUS
,
8571 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8572 dev
->name
, (dev
->flags
& IFF_PROMISC
),
8573 (old_flags
& IFF_PROMISC
),
8574 from_kuid(&init_user_ns
, audit_get_loginuid(current
)),
8575 from_kuid(&init_user_ns
, uid
),
8576 from_kgid(&init_user_ns
, gid
),
8577 audit_get_sessionid(current
));
8580 dev_change_rx_flags(dev
, IFF_PROMISC
);
8583 __dev_notify_flags(dev
, old_flags
, IFF_PROMISC
);
8588 * dev_set_promiscuity - update promiscuity count on a device
8592 * Add or remove promiscuity from a device. While the count in the device
8593 * remains above zero the interface remains promiscuous. Once it hits zero
8594 * the device reverts back to normal filtering operation. A negative inc
8595 * value is used to drop promiscuity on the device.
8596 * Return 0 if successful or a negative errno code on error.
8598 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
8600 unsigned int old_flags
= dev
->flags
;
8603 err
= __dev_set_promiscuity(dev
, inc
, true);
8606 if (dev
->flags
!= old_flags
)
8607 dev_set_rx_mode(dev
);
8610 EXPORT_SYMBOL(dev_set_promiscuity
);
8612 static int __dev_set_allmulti(struct net_device
*dev
, int inc
, bool notify
)
8614 unsigned int old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
8618 dev
->flags
|= IFF_ALLMULTI
;
8619 dev
->allmulti
+= inc
;
8620 if (dev
->allmulti
== 0) {
8623 * If inc causes overflow, untouch allmulti and return error.
8626 dev
->flags
&= ~IFF_ALLMULTI
;
8628 dev
->allmulti
-= inc
;
8629 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
8634 if (dev
->flags
^ old_flags
) {
8635 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
8636 dev_set_rx_mode(dev
);
8638 __dev_notify_flags(dev
, old_flags
,
8639 dev
->gflags
^ old_gflags
);
8645 * dev_set_allmulti - update allmulti count on a device
8649 * Add or remove reception of all multicast frames to a device. While the
8650 * count in the device remains above zero the interface remains listening
8651 * to all interfaces. Once it hits zero the device reverts back to normal
8652 * filtering operation. A negative @inc value is used to drop the counter
8653 * when releasing a resource needing all multicasts.
8654 * Return 0 if successful or a negative errno code on error.
8657 int dev_set_allmulti(struct net_device
*dev
, int inc
)
8659 return __dev_set_allmulti(dev
, inc
, true);
8661 EXPORT_SYMBOL(dev_set_allmulti
);
8664 * Upload unicast and multicast address lists to device and
8665 * configure RX filtering. When the device doesn't support unicast
8666 * filtering it is put in promiscuous mode while unicast addresses
8669 void __dev_set_rx_mode(struct net_device
*dev
)
8671 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8673 /* dev_open will call this function so the list will stay sane. */
8674 if (!(dev
->flags
&IFF_UP
))
8677 if (!netif_device_present(dev
))
8680 if (!(dev
->priv_flags
& IFF_UNICAST_FLT
)) {
8681 /* Unicast addresses changes may only happen under the rtnl,
8682 * therefore calling __dev_set_promiscuity here is safe.
8684 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
8685 __dev_set_promiscuity(dev
, 1, false);
8686 dev
->uc_promisc
= true;
8687 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
8688 __dev_set_promiscuity(dev
, -1, false);
8689 dev
->uc_promisc
= false;
8693 if (ops
->ndo_set_rx_mode
)
8694 ops
->ndo_set_rx_mode(dev
);
8697 void dev_set_rx_mode(struct net_device
*dev
)
8699 netif_addr_lock_bh(dev
);
8700 __dev_set_rx_mode(dev
);
8701 netif_addr_unlock_bh(dev
);
8705 * dev_get_flags - get flags reported to userspace
8708 * Get the combination of flag bits exported through APIs to userspace.
8710 unsigned int dev_get_flags(const struct net_device
*dev
)
8714 flags
= (dev
->flags
& ~(IFF_PROMISC
|
8719 (dev
->gflags
& (IFF_PROMISC
|
8722 if (netif_running(dev
)) {
8723 if (netif_oper_up(dev
))
8724 flags
|= IFF_RUNNING
;
8725 if (netif_carrier_ok(dev
))
8726 flags
|= IFF_LOWER_UP
;
8727 if (netif_dormant(dev
))
8728 flags
|= IFF_DORMANT
;
8733 EXPORT_SYMBOL(dev_get_flags
);
8735 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
,
8736 struct netlink_ext_ack
*extack
)
8738 unsigned int old_flags
= dev
->flags
;
8744 * Set the flags on our device.
8747 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
8748 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
8750 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
8754 * Load in the correct multicast list now the flags have changed.
8757 if ((old_flags
^ flags
) & IFF_MULTICAST
)
8758 dev_change_rx_flags(dev
, IFF_MULTICAST
);
8760 dev_set_rx_mode(dev
);
8763 * Have we downed the interface. We handle IFF_UP ourselves
8764 * according to user attempts to set it, rather than blindly
8769 if ((old_flags
^ flags
) & IFF_UP
) {
8770 if (old_flags
& IFF_UP
)
8773 ret
= __dev_open(dev
, extack
);
8776 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
8777 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
8778 unsigned int old_flags
= dev
->flags
;
8780 dev
->gflags
^= IFF_PROMISC
;
8782 if (__dev_set_promiscuity(dev
, inc
, false) >= 0)
8783 if (dev
->flags
!= old_flags
)
8784 dev_set_rx_mode(dev
);
8787 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8788 * is important. Some (broken) drivers set IFF_PROMISC, when
8789 * IFF_ALLMULTI is requested not asking us and not reporting.
8791 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
8792 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
8794 dev
->gflags
^= IFF_ALLMULTI
;
8795 __dev_set_allmulti(dev
, inc
, false);
8801 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
,
8802 unsigned int gchanges
)
8804 unsigned int changes
= dev
->flags
^ old_flags
;
8807 rtmsg_ifinfo(RTM_NEWLINK
, dev
, gchanges
, GFP_ATOMIC
);
8809 if (changes
& IFF_UP
) {
8810 if (dev
->flags
& IFF_UP
)
8811 call_netdevice_notifiers(NETDEV_UP
, dev
);
8813 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
8816 if (dev
->flags
& IFF_UP
&&
8817 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
))) {
8818 struct netdev_notifier_change_info change_info
= {
8822 .flags_changed
= changes
,
8825 call_netdevice_notifiers_info(NETDEV_CHANGE
, &change_info
.info
);
8830 * dev_change_flags - change device settings
8832 * @flags: device state flags
8833 * @extack: netlink extended ack
8835 * Change settings on device based state flags. The flags are
8836 * in the userspace exported format.
8838 int dev_change_flags(struct net_device
*dev
, unsigned int flags
,
8839 struct netlink_ext_ack
*extack
)
8842 unsigned int changes
, old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
8844 ret
= __dev_change_flags(dev
, flags
, extack
);
8848 changes
= (old_flags
^ dev
->flags
) | (old_gflags
^ dev
->gflags
);
8849 __dev_notify_flags(dev
, old_flags
, changes
);
8852 EXPORT_SYMBOL(dev_change_flags
);
8854 int __dev_set_mtu(struct net_device
*dev
, int new_mtu
)
8856 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8858 if (ops
->ndo_change_mtu
)
8859 return ops
->ndo_change_mtu(dev
, new_mtu
);
8861 /* Pairs with all the lockless reads of dev->mtu in the stack */
8862 WRITE_ONCE(dev
->mtu
, new_mtu
);
8865 EXPORT_SYMBOL(__dev_set_mtu
);
8867 int dev_validate_mtu(struct net_device
*dev
, int new_mtu
,
8868 struct netlink_ext_ack
*extack
)
8870 /* MTU must be positive, and in range */
8871 if (new_mtu
< 0 || new_mtu
< dev
->min_mtu
) {
8872 NL_SET_ERR_MSG(extack
, "mtu less than device minimum");
8876 if (dev
->max_mtu
> 0 && new_mtu
> dev
->max_mtu
) {
8877 NL_SET_ERR_MSG(extack
, "mtu greater than device maximum");
8884 * dev_set_mtu_ext - Change maximum transfer unit
8886 * @new_mtu: new transfer unit
8887 * @extack: netlink extended ack
8889 * Change the maximum transfer size of the network device.
8891 int dev_set_mtu_ext(struct net_device
*dev
, int new_mtu
,
8892 struct netlink_ext_ack
*extack
)
8896 if (new_mtu
== dev
->mtu
)
8899 err
= dev_validate_mtu(dev
, new_mtu
, extack
);
8903 if (!netif_device_present(dev
))
8906 err
= call_netdevice_notifiers(NETDEV_PRECHANGEMTU
, dev
);
8907 err
= notifier_to_errno(err
);
8911 orig_mtu
= dev
->mtu
;
8912 err
= __dev_set_mtu(dev
, new_mtu
);
8915 err
= call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
8917 err
= notifier_to_errno(err
);
8919 /* setting mtu back and notifying everyone again,
8920 * so that they have a chance to revert changes.
8922 __dev_set_mtu(dev
, orig_mtu
);
8923 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
8930 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
8932 struct netlink_ext_ack extack
;
8935 memset(&extack
, 0, sizeof(extack
));
8936 err
= dev_set_mtu_ext(dev
, new_mtu
, &extack
);
8937 if (err
&& extack
._msg
)
8938 net_err_ratelimited("%s: %s\n", dev
->name
, extack
._msg
);
8941 EXPORT_SYMBOL(dev_set_mtu
);
8944 * dev_change_tx_queue_len - Change TX queue length of a netdevice
8946 * @new_len: new tx queue length
8948 int dev_change_tx_queue_len(struct net_device
*dev
, unsigned long new_len
)
8950 unsigned int orig_len
= dev
->tx_queue_len
;
8953 if (new_len
!= (unsigned int)new_len
)
8956 if (new_len
!= orig_len
) {
8957 dev
->tx_queue_len
= new_len
;
8958 res
= call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN
, dev
);
8959 res
= notifier_to_errno(res
);
8962 res
= dev_qdisc_change_tx_queue_len(dev
);
8970 netdev_err(dev
, "refused to change device tx_queue_len\n");
8971 dev
->tx_queue_len
= orig_len
;
8976 * dev_set_group - Change group this device belongs to
8978 * @new_group: group this device should belong to
8980 void dev_set_group(struct net_device
*dev
, int new_group
)
8982 dev
->group
= new_group
;
8984 EXPORT_SYMBOL(dev_set_group
);
8987 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8989 * @addr: new address
8990 * @extack: netlink extended ack
8992 int dev_pre_changeaddr_notify(struct net_device
*dev
, const char *addr
,
8993 struct netlink_ext_ack
*extack
)
8995 struct netdev_notifier_pre_changeaddr_info info
= {
8997 .info
.extack
= extack
,
9002 rc
= call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR
, &info
.info
);
9003 return notifier_to_errno(rc
);
9005 EXPORT_SYMBOL(dev_pre_changeaddr_notify
);
9008 * dev_set_mac_address - Change Media Access Control Address
9011 * @extack: netlink extended ack
9013 * Change the hardware (MAC) address of the device
9015 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
,
9016 struct netlink_ext_ack
*extack
)
9018 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9021 if (!ops
->ndo_set_mac_address
)
9023 if (sa
->sa_family
!= dev
->type
)
9025 if (!netif_device_present(dev
))
9027 err
= dev_pre_changeaddr_notify(dev
, sa
->sa_data
, extack
);
9030 err
= ops
->ndo_set_mac_address(dev
, sa
);
9033 dev
->addr_assign_type
= NET_ADDR_SET
;
9034 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
9035 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
9038 EXPORT_SYMBOL(dev_set_mac_address
);
9040 static DECLARE_RWSEM(dev_addr_sem
);
9042 int dev_set_mac_address_user(struct net_device
*dev
, struct sockaddr
*sa
,
9043 struct netlink_ext_ack
*extack
)
9047 down_write(&dev_addr_sem
);
9048 ret
= dev_set_mac_address(dev
, sa
, extack
);
9049 up_write(&dev_addr_sem
);
9052 EXPORT_SYMBOL(dev_set_mac_address_user
);
9054 int dev_get_mac_address(struct sockaddr
*sa
, struct net
*net
, char *dev_name
)
9056 size_t size
= sizeof(sa
->sa_data
);
9057 struct net_device
*dev
;
9060 down_read(&dev_addr_sem
);
9063 dev
= dev_get_by_name_rcu(net
, dev_name
);
9069 memset(sa
->sa_data
, 0, size
);
9071 memcpy(sa
->sa_data
, dev
->dev_addr
,
9072 min_t(size_t, size
, dev
->addr_len
));
9073 sa
->sa_family
= dev
->type
;
9077 up_read(&dev_addr_sem
);
9080 EXPORT_SYMBOL(dev_get_mac_address
);
9083 * dev_change_carrier - Change device carrier
9085 * @new_carrier: new value
9087 * Change device carrier
9089 int dev_change_carrier(struct net_device
*dev
, bool new_carrier
)
9091 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9093 if (!ops
->ndo_change_carrier
)
9095 if (!netif_device_present(dev
))
9097 return ops
->ndo_change_carrier(dev
, new_carrier
);
9099 EXPORT_SYMBOL(dev_change_carrier
);
9102 * dev_get_phys_port_id - Get device physical port ID
9106 * Get device physical port ID
9108 int dev_get_phys_port_id(struct net_device
*dev
,
9109 struct netdev_phys_item_id
*ppid
)
9111 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9113 if (!ops
->ndo_get_phys_port_id
)
9115 return ops
->ndo_get_phys_port_id(dev
, ppid
);
9117 EXPORT_SYMBOL(dev_get_phys_port_id
);
9120 * dev_get_phys_port_name - Get device physical port name
9123 * @len: limit of bytes to copy to name
9125 * Get device physical port name
9127 int dev_get_phys_port_name(struct net_device
*dev
,
9128 char *name
, size_t len
)
9130 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9133 if (ops
->ndo_get_phys_port_name
) {
9134 err
= ops
->ndo_get_phys_port_name(dev
, name
, len
);
9135 if (err
!= -EOPNOTSUPP
)
9138 return devlink_compat_phys_port_name_get(dev
, name
, len
);
9140 EXPORT_SYMBOL(dev_get_phys_port_name
);
9143 * dev_get_port_parent_id - Get the device's port parent identifier
9144 * @dev: network device
9145 * @ppid: pointer to a storage for the port's parent identifier
9146 * @recurse: allow/disallow recursion to lower devices
9148 * Get the devices's port parent identifier
9150 int dev_get_port_parent_id(struct net_device
*dev
,
9151 struct netdev_phys_item_id
*ppid
,
9154 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9155 struct netdev_phys_item_id first
= { };
9156 struct net_device
*lower_dev
;
9157 struct list_head
*iter
;
9160 if (ops
->ndo_get_port_parent_id
) {
9161 err
= ops
->ndo_get_port_parent_id(dev
, ppid
);
9162 if (err
!= -EOPNOTSUPP
)
9166 err
= devlink_compat_switch_id_get(dev
, ppid
);
9167 if (!err
|| err
!= -EOPNOTSUPP
)
9173 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
9174 err
= dev_get_port_parent_id(lower_dev
, ppid
, recurse
);
9179 else if (memcmp(&first
, ppid
, sizeof(*ppid
)))
9185 EXPORT_SYMBOL(dev_get_port_parent_id
);
9188 * netdev_port_same_parent_id - Indicate if two network devices have
9189 * the same port parent identifier
9190 * @a: first network device
9191 * @b: second network device
9193 bool netdev_port_same_parent_id(struct net_device
*a
, struct net_device
*b
)
9195 struct netdev_phys_item_id a_id
= { };
9196 struct netdev_phys_item_id b_id
= { };
9198 if (dev_get_port_parent_id(a
, &a_id
, true) ||
9199 dev_get_port_parent_id(b
, &b_id
, true))
9202 return netdev_phys_item_id_same(&a_id
, &b_id
);
9204 EXPORT_SYMBOL(netdev_port_same_parent_id
);
9207 * dev_change_proto_down - update protocol port state information
9209 * @proto_down: new value
9211 * This info can be used by switch drivers to set the phys state of the
9214 int dev_change_proto_down(struct net_device
*dev
, bool proto_down
)
9216 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9218 if (!ops
->ndo_change_proto_down
)
9220 if (!netif_device_present(dev
))
9222 return ops
->ndo_change_proto_down(dev
, proto_down
);
9224 EXPORT_SYMBOL(dev_change_proto_down
);
9227 * dev_change_proto_down_generic - generic implementation for
9228 * ndo_change_proto_down that sets carrier according to
9232 * @proto_down: new value
9234 int dev_change_proto_down_generic(struct net_device
*dev
, bool proto_down
)
9237 netif_carrier_off(dev
);
9239 netif_carrier_on(dev
);
9240 dev
->proto_down
= proto_down
;
9243 EXPORT_SYMBOL(dev_change_proto_down_generic
);
9246 * dev_change_proto_down_reason - proto down reason
9249 * @mask: proto down mask
9250 * @value: proto down value
9252 void dev_change_proto_down_reason(struct net_device
*dev
, unsigned long mask
,
9258 dev
->proto_down_reason
= value
;
9260 for_each_set_bit(b
, &mask
, 32) {
9261 if (value
& (1 << b
))
9262 dev
->proto_down_reason
|= BIT(b
);
9264 dev
->proto_down_reason
&= ~BIT(b
);
9268 EXPORT_SYMBOL(dev_change_proto_down_reason
);
9270 struct bpf_xdp_link
{
9271 struct bpf_link link
;
9272 struct net_device
*dev
; /* protected by rtnl_lock, no refcnt held */
9276 static enum bpf_xdp_mode
dev_xdp_mode(struct net_device
*dev
, u32 flags
)
9278 if (flags
& XDP_FLAGS_HW_MODE
)
9280 if (flags
& XDP_FLAGS_DRV_MODE
)
9281 return XDP_MODE_DRV
;
9282 if (flags
& XDP_FLAGS_SKB_MODE
)
9283 return XDP_MODE_SKB
;
9284 return dev
->netdev_ops
->ndo_bpf
? XDP_MODE_DRV
: XDP_MODE_SKB
;
9287 static bpf_op_t
dev_xdp_bpf_op(struct net_device
*dev
, enum bpf_xdp_mode mode
)
9291 return generic_xdp_install
;
9294 return dev
->netdev_ops
->ndo_bpf
;
9300 static struct bpf_xdp_link
*dev_xdp_link(struct net_device
*dev
,
9301 enum bpf_xdp_mode mode
)
9303 return dev
->xdp_state
[mode
].link
;
9306 static struct bpf_prog
*dev_xdp_prog(struct net_device
*dev
,
9307 enum bpf_xdp_mode mode
)
9309 struct bpf_xdp_link
*link
= dev_xdp_link(dev
, mode
);
9312 return link
->link
.prog
;
9313 return dev
->xdp_state
[mode
].prog
;
9316 u8
dev_xdp_prog_count(struct net_device
*dev
)
9321 for (i
= 0; i
< __MAX_XDP_MODE
; i
++)
9322 if (dev
->xdp_state
[i
].prog
|| dev
->xdp_state
[i
].link
)
9326 EXPORT_SYMBOL_GPL(dev_xdp_prog_count
);
9328 u32
dev_xdp_prog_id(struct net_device
*dev
, enum bpf_xdp_mode mode
)
9330 struct bpf_prog
*prog
= dev_xdp_prog(dev
, mode
);
9332 return prog
? prog
->aux
->id
: 0;
9335 static void dev_xdp_set_link(struct net_device
*dev
, enum bpf_xdp_mode mode
,
9336 struct bpf_xdp_link
*link
)
9338 dev
->xdp_state
[mode
].link
= link
;
9339 dev
->xdp_state
[mode
].prog
= NULL
;
9342 static void dev_xdp_set_prog(struct net_device
*dev
, enum bpf_xdp_mode mode
,
9343 struct bpf_prog
*prog
)
9345 dev
->xdp_state
[mode
].link
= NULL
;
9346 dev
->xdp_state
[mode
].prog
= prog
;
9349 static int dev_xdp_install(struct net_device
*dev
, enum bpf_xdp_mode mode
,
9350 bpf_op_t bpf_op
, struct netlink_ext_ack
*extack
,
9351 u32 flags
, struct bpf_prog
*prog
)
9353 struct netdev_bpf xdp
;
9356 memset(&xdp
, 0, sizeof(xdp
));
9357 xdp
.command
= mode
== XDP_MODE_HW
? XDP_SETUP_PROG_HW
: XDP_SETUP_PROG
;
9358 xdp
.extack
= extack
;
9362 /* Drivers assume refcnt is already incremented (i.e, prog pointer is
9363 * "moved" into driver), so they don't increment it on their own, but
9364 * they do decrement refcnt when program is detached or replaced.
9365 * Given net_device also owns link/prog, we need to bump refcnt here
9366 * to prevent drivers from underflowing it.
9370 err
= bpf_op(dev
, &xdp
);
9377 if (mode
!= XDP_MODE_HW
)
9378 bpf_prog_change_xdp(dev_xdp_prog(dev
, mode
), prog
);
9383 static void dev_xdp_uninstall(struct net_device
*dev
)
9385 struct bpf_xdp_link
*link
;
9386 struct bpf_prog
*prog
;
9387 enum bpf_xdp_mode mode
;
9392 for (mode
= XDP_MODE_SKB
; mode
< __MAX_XDP_MODE
; mode
++) {
9393 prog
= dev_xdp_prog(dev
, mode
);
9397 bpf_op
= dev_xdp_bpf_op(dev
, mode
);
9401 WARN_ON(dev_xdp_install(dev
, mode
, bpf_op
, NULL
, 0, NULL
));
9403 /* auto-detach link from net device */
9404 link
= dev_xdp_link(dev
, mode
);
9410 dev_xdp_set_link(dev
, mode
, NULL
);
9414 static int dev_xdp_attach(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
9415 struct bpf_xdp_link
*link
, struct bpf_prog
*new_prog
,
9416 struct bpf_prog
*old_prog
, u32 flags
)
9418 unsigned int num_modes
= hweight32(flags
& XDP_FLAGS_MODES
);
9419 struct bpf_prog
*cur_prog
;
9420 struct net_device
*upper
;
9421 struct list_head
*iter
;
9422 enum bpf_xdp_mode mode
;
9428 /* either link or prog attachment, never both */
9429 if (link
&& (new_prog
|| old_prog
))
9431 /* link supports only XDP mode flags */
9432 if (link
&& (flags
& ~XDP_FLAGS_MODES
)) {
9433 NL_SET_ERR_MSG(extack
, "Invalid XDP flags for BPF link attachment");
9436 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9437 if (num_modes
> 1) {
9438 NL_SET_ERR_MSG(extack
, "Only one XDP mode flag can be set");
9441 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */
9442 if (!num_modes
&& dev_xdp_prog_count(dev
) > 1) {
9443 NL_SET_ERR_MSG(extack
,
9444 "More than one program loaded, unset mode is ambiguous");
9447 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
9448 if (old_prog
&& !(flags
& XDP_FLAGS_REPLACE
)) {
9449 NL_SET_ERR_MSG(extack
, "XDP_FLAGS_REPLACE is not specified");
9453 mode
= dev_xdp_mode(dev
, flags
);
9454 /* can't replace attached link */
9455 if (dev_xdp_link(dev
, mode
)) {
9456 NL_SET_ERR_MSG(extack
, "Can't replace active BPF XDP link");
9460 /* don't allow if an upper device already has a program */
9461 netdev_for_each_upper_dev_rcu(dev
, upper
, iter
) {
9462 if (dev_xdp_prog_count(upper
) > 0) {
9463 NL_SET_ERR_MSG(extack
, "Cannot attach when an upper device already has a program");
9468 cur_prog
= dev_xdp_prog(dev
, mode
);
9469 /* can't replace attached prog with link */
9470 if (link
&& cur_prog
) {
9471 NL_SET_ERR_MSG(extack
, "Can't replace active XDP program with BPF link");
9474 if ((flags
& XDP_FLAGS_REPLACE
) && cur_prog
!= old_prog
) {
9475 NL_SET_ERR_MSG(extack
, "Active program does not match expected");
9479 /* put effective new program into new_prog */
9481 new_prog
= link
->link
.prog
;
9484 bool offload
= mode
== XDP_MODE_HW
;
9485 enum bpf_xdp_mode other_mode
= mode
== XDP_MODE_SKB
9486 ? XDP_MODE_DRV
: XDP_MODE_SKB
;
9488 if ((flags
& XDP_FLAGS_UPDATE_IF_NOEXIST
) && cur_prog
) {
9489 NL_SET_ERR_MSG(extack
, "XDP program already attached");
9492 if (!offload
&& dev_xdp_prog(dev
, other_mode
)) {
9493 NL_SET_ERR_MSG(extack
, "Native and generic XDP can't be active at the same time");
9496 if (!offload
&& bpf_prog_is_dev_bound(new_prog
->aux
)) {
9497 NL_SET_ERR_MSG(extack
, "Using device-bound program without HW_MODE flag is not supported");
9500 if (new_prog
->expected_attach_type
== BPF_XDP_DEVMAP
) {
9501 NL_SET_ERR_MSG(extack
, "BPF_XDP_DEVMAP programs can not be attached to a device");
9504 if (new_prog
->expected_attach_type
== BPF_XDP_CPUMAP
) {
9505 NL_SET_ERR_MSG(extack
, "BPF_XDP_CPUMAP programs can not be attached to a device");
9510 /* don't call drivers if the effective program didn't change */
9511 if (new_prog
!= cur_prog
) {
9512 bpf_op
= dev_xdp_bpf_op(dev
, mode
);
9514 NL_SET_ERR_MSG(extack
, "Underlying driver does not support XDP in native mode");
9518 err
= dev_xdp_install(dev
, mode
, bpf_op
, extack
, flags
, new_prog
);
9524 dev_xdp_set_link(dev
, mode
, link
);
9526 dev_xdp_set_prog(dev
, mode
, new_prog
);
9528 bpf_prog_put(cur_prog
);
9533 static int dev_xdp_attach_link(struct net_device
*dev
,
9534 struct netlink_ext_ack
*extack
,
9535 struct bpf_xdp_link
*link
)
9537 return dev_xdp_attach(dev
, extack
, link
, NULL
, NULL
, link
->flags
);
9540 static int dev_xdp_detach_link(struct net_device
*dev
,
9541 struct netlink_ext_ack
*extack
,
9542 struct bpf_xdp_link
*link
)
9544 enum bpf_xdp_mode mode
;
9549 mode
= dev_xdp_mode(dev
, link
->flags
);
9550 if (dev_xdp_link(dev
, mode
) != link
)
9553 bpf_op
= dev_xdp_bpf_op(dev
, mode
);
9554 WARN_ON(dev_xdp_install(dev
, mode
, bpf_op
, NULL
, 0, NULL
));
9555 dev_xdp_set_link(dev
, mode
, NULL
);
9559 static void bpf_xdp_link_release(struct bpf_link
*link
)
9561 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9565 /* if racing with net_device's tear down, xdp_link->dev might be
9566 * already NULL, in which case link was already auto-detached
9568 if (xdp_link
->dev
) {
9569 WARN_ON(dev_xdp_detach_link(xdp_link
->dev
, NULL
, xdp_link
));
9570 xdp_link
->dev
= NULL
;
9576 static int bpf_xdp_link_detach(struct bpf_link
*link
)
9578 bpf_xdp_link_release(link
);
9582 static void bpf_xdp_link_dealloc(struct bpf_link
*link
)
9584 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9589 static void bpf_xdp_link_show_fdinfo(const struct bpf_link
*link
,
9590 struct seq_file
*seq
)
9592 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9597 ifindex
= xdp_link
->dev
->ifindex
;
9600 seq_printf(seq
, "ifindex:\t%u\n", ifindex
);
9603 static int bpf_xdp_link_fill_link_info(const struct bpf_link
*link
,
9604 struct bpf_link_info
*info
)
9606 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9611 ifindex
= xdp_link
->dev
->ifindex
;
9614 info
->xdp
.ifindex
= ifindex
;
9618 static int bpf_xdp_link_update(struct bpf_link
*link
, struct bpf_prog
*new_prog
,
9619 struct bpf_prog
*old_prog
)
9621 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9622 enum bpf_xdp_mode mode
;
9628 /* link might have been auto-released already, so fail */
9629 if (!xdp_link
->dev
) {
9634 if (old_prog
&& link
->prog
!= old_prog
) {
9638 old_prog
= link
->prog
;
9639 if (old_prog
->type
!= new_prog
->type
||
9640 old_prog
->expected_attach_type
!= new_prog
->expected_attach_type
) {
9645 if (old_prog
== new_prog
) {
9646 /* no-op, don't disturb drivers */
9647 bpf_prog_put(new_prog
);
9651 mode
= dev_xdp_mode(xdp_link
->dev
, xdp_link
->flags
);
9652 bpf_op
= dev_xdp_bpf_op(xdp_link
->dev
, mode
);
9653 err
= dev_xdp_install(xdp_link
->dev
, mode
, bpf_op
, NULL
,
9654 xdp_link
->flags
, new_prog
);
9658 old_prog
= xchg(&link
->prog
, new_prog
);
9659 bpf_prog_put(old_prog
);
9666 static const struct bpf_link_ops bpf_xdp_link_lops
= {
9667 .release
= bpf_xdp_link_release
,
9668 .dealloc
= bpf_xdp_link_dealloc
,
9669 .detach
= bpf_xdp_link_detach
,
9670 .show_fdinfo
= bpf_xdp_link_show_fdinfo
,
9671 .fill_link_info
= bpf_xdp_link_fill_link_info
,
9672 .update_prog
= bpf_xdp_link_update
,
9675 int bpf_xdp_link_attach(const union bpf_attr
*attr
, struct bpf_prog
*prog
)
9677 struct net
*net
= current
->nsproxy
->net_ns
;
9678 struct bpf_link_primer link_primer
;
9679 struct bpf_xdp_link
*link
;
9680 struct net_device
*dev
;
9684 dev
= dev_get_by_index(net
, attr
->link_create
.target_ifindex
);
9690 link
= kzalloc(sizeof(*link
), GFP_USER
);
9696 bpf_link_init(&link
->link
, BPF_LINK_TYPE_XDP
, &bpf_xdp_link_lops
, prog
);
9698 link
->flags
= attr
->link_create
.flags
;
9700 err
= bpf_link_prime(&link
->link
, &link_primer
);
9706 err
= dev_xdp_attach_link(dev
, NULL
, link
);
9711 bpf_link_cleanup(&link_primer
);
9715 fd
= bpf_link_settle(&link_primer
);
9716 /* link itself doesn't hold dev's refcnt to not complicate shutdown */
9729 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
9731 * @extack: netlink extended ack
9732 * @fd: new program fd or negative value to clear
9733 * @expected_fd: old program fd that userspace expects to replace or clear
9734 * @flags: xdp-related flags
9736 * Set or clear a bpf program for a device
9738 int dev_change_xdp_fd(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
9739 int fd
, int expected_fd
, u32 flags
)
9741 enum bpf_xdp_mode mode
= dev_xdp_mode(dev
, flags
);
9742 struct bpf_prog
*new_prog
= NULL
, *old_prog
= NULL
;
9748 new_prog
= bpf_prog_get_type_dev(fd
, BPF_PROG_TYPE_XDP
,
9749 mode
!= XDP_MODE_SKB
);
9750 if (IS_ERR(new_prog
))
9751 return PTR_ERR(new_prog
);
9754 if (expected_fd
>= 0) {
9755 old_prog
= bpf_prog_get_type_dev(expected_fd
, BPF_PROG_TYPE_XDP
,
9756 mode
!= XDP_MODE_SKB
);
9757 if (IS_ERR(old_prog
)) {
9758 err
= PTR_ERR(old_prog
);
9764 err
= dev_xdp_attach(dev
, extack
, NULL
, new_prog
, old_prog
, flags
);
9767 if (err
&& new_prog
)
9768 bpf_prog_put(new_prog
);
9770 bpf_prog_put(old_prog
);
9775 * dev_new_index - allocate an ifindex
9776 * @net: the applicable net namespace
9778 * Returns a suitable unique value for a new device interface
9779 * number. The caller must hold the rtnl semaphore or the
9780 * dev_base_lock to be sure it remains unique.
9782 static int dev_new_index(struct net
*net
)
9784 int ifindex
= net
->ifindex
;
9789 if (!__dev_get_by_index(net
, ifindex
))
9790 return net
->ifindex
= ifindex
;
9794 /* Delayed registration/unregisteration */
9795 static LIST_HEAD(net_todo_list
);
9796 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq
);
9798 static void net_set_todo(struct net_device
*dev
)
9800 list_add_tail(&dev
->todo_list
, &net_todo_list
);
9801 dev_net(dev
)->dev_unreg_count
++;
9804 static netdev_features_t
netdev_sync_upper_features(struct net_device
*lower
,
9805 struct net_device
*upper
, netdev_features_t features
)
9807 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
9808 netdev_features_t feature
;
9811 for_each_netdev_feature(upper_disables
, feature_bit
) {
9812 feature
= __NETIF_F_BIT(feature_bit
);
9813 if (!(upper
->wanted_features
& feature
)
9814 && (features
& feature
)) {
9815 netdev_dbg(lower
, "Dropping feature %pNF, upper dev %s has it off.\n",
9816 &feature
, upper
->name
);
9817 features
&= ~feature
;
9824 static void netdev_sync_lower_features(struct net_device
*upper
,
9825 struct net_device
*lower
, netdev_features_t features
)
9827 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
9828 netdev_features_t feature
;
9831 for_each_netdev_feature(upper_disables
, feature_bit
) {
9832 feature
= __NETIF_F_BIT(feature_bit
);
9833 if (!(features
& feature
) && (lower
->features
& feature
)) {
9834 netdev_dbg(upper
, "Disabling feature %pNF on lower dev %s.\n",
9835 &feature
, lower
->name
);
9836 lower
->wanted_features
&= ~feature
;
9837 __netdev_update_features(lower
);
9839 if (unlikely(lower
->features
& feature
))
9840 netdev_WARN(upper
, "failed to disable %pNF on %s!\n",
9841 &feature
, lower
->name
);
9843 netdev_features_change(lower
);
9848 static netdev_features_t
netdev_fix_features(struct net_device
*dev
,
9849 netdev_features_t features
)
9851 /* Fix illegal checksum combinations */
9852 if ((features
& NETIF_F_HW_CSUM
) &&
9853 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
9854 netdev_warn(dev
, "mixed HW and IP checksum settings.\n");
9855 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
9858 /* TSO requires that SG is present as well. */
9859 if ((features
& NETIF_F_ALL_TSO
) && !(features
& NETIF_F_SG
)) {
9860 netdev_dbg(dev
, "Dropping TSO features since no SG feature.\n");
9861 features
&= ~NETIF_F_ALL_TSO
;
9864 if ((features
& NETIF_F_TSO
) && !(features
& NETIF_F_HW_CSUM
) &&
9865 !(features
& NETIF_F_IP_CSUM
)) {
9866 netdev_dbg(dev
, "Dropping TSO features since no CSUM feature.\n");
9867 features
&= ~NETIF_F_TSO
;
9868 features
&= ~NETIF_F_TSO_ECN
;
9871 if ((features
& NETIF_F_TSO6
) && !(features
& NETIF_F_HW_CSUM
) &&
9872 !(features
& NETIF_F_IPV6_CSUM
)) {
9873 netdev_dbg(dev
, "Dropping TSO6 features since no CSUM feature.\n");
9874 features
&= ~NETIF_F_TSO6
;
9877 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9878 if ((features
& NETIF_F_TSO_MANGLEID
) && !(features
& NETIF_F_TSO
))
9879 features
&= ~NETIF_F_TSO_MANGLEID
;
9881 /* TSO ECN requires that TSO is present as well. */
9882 if ((features
& NETIF_F_ALL_TSO
) == NETIF_F_TSO_ECN
)
9883 features
&= ~NETIF_F_TSO_ECN
;
9885 /* Software GSO depends on SG. */
9886 if ((features
& NETIF_F_GSO
) && !(features
& NETIF_F_SG
)) {
9887 netdev_dbg(dev
, "Dropping NETIF_F_GSO since no SG feature.\n");
9888 features
&= ~NETIF_F_GSO
;
9891 /* GSO partial features require GSO partial be set */
9892 if ((features
& dev
->gso_partial_features
) &&
9893 !(features
& NETIF_F_GSO_PARTIAL
)) {
9895 "Dropping partially supported GSO features since no GSO partial.\n");
9896 features
&= ~dev
->gso_partial_features
;
9899 if (!(features
& NETIF_F_RXCSUM
)) {
9900 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9901 * successfully merged by hardware must also have the
9902 * checksum verified by hardware. If the user does not
9903 * want to enable RXCSUM, logically, we should disable GRO_HW.
9905 if (features
& NETIF_F_GRO_HW
) {
9906 netdev_dbg(dev
, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9907 features
&= ~NETIF_F_GRO_HW
;
9911 /* LRO/HW-GRO features cannot be combined with RX-FCS */
9912 if (features
& NETIF_F_RXFCS
) {
9913 if (features
& NETIF_F_LRO
) {
9914 netdev_dbg(dev
, "Dropping LRO feature since RX-FCS is requested.\n");
9915 features
&= ~NETIF_F_LRO
;
9918 if (features
& NETIF_F_GRO_HW
) {
9919 netdev_dbg(dev
, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9920 features
&= ~NETIF_F_GRO_HW
;
9924 if (features
& NETIF_F_HW_TLS_TX
) {
9925 bool ip_csum
= (features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)) ==
9926 (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
);
9927 bool hw_csum
= features
& NETIF_F_HW_CSUM
;
9929 if (!ip_csum
&& !hw_csum
) {
9930 netdev_dbg(dev
, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
9931 features
&= ~NETIF_F_HW_TLS_TX
;
9935 if ((features
& NETIF_F_HW_TLS_RX
) && !(features
& NETIF_F_RXCSUM
)) {
9936 netdev_dbg(dev
, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
9937 features
&= ~NETIF_F_HW_TLS_RX
;
9943 int __netdev_update_features(struct net_device
*dev
)
9945 struct net_device
*upper
, *lower
;
9946 netdev_features_t features
;
9947 struct list_head
*iter
;
9952 features
= netdev_get_wanted_features(dev
);
9954 if (dev
->netdev_ops
->ndo_fix_features
)
9955 features
= dev
->netdev_ops
->ndo_fix_features(dev
, features
);
9957 /* driver might be less strict about feature dependencies */
9958 features
= netdev_fix_features(dev
, features
);
9960 /* some features can't be enabled if they're off on an upper device */
9961 netdev_for_each_upper_dev_rcu(dev
, upper
, iter
)
9962 features
= netdev_sync_upper_features(dev
, upper
, features
);
9964 if (dev
->features
== features
)
9967 netdev_dbg(dev
, "Features changed: %pNF -> %pNF\n",
9968 &dev
->features
, &features
);
9970 if (dev
->netdev_ops
->ndo_set_features
)
9971 err
= dev
->netdev_ops
->ndo_set_features(dev
, features
);
9975 if (unlikely(err
< 0)) {
9977 "set_features() failed (%d); wanted %pNF, left %pNF\n",
9978 err
, &features
, &dev
->features
);
9979 /* return non-0 since some features might have changed and
9980 * it's better to fire a spurious notification than miss it
9986 /* some features must be disabled on lower devices when disabled
9987 * on an upper device (think: bonding master or bridge)
9989 netdev_for_each_lower_dev(dev
, lower
, iter
)
9990 netdev_sync_lower_features(dev
, lower
, features
);
9993 netdev_features_t diff
= features
^ dev
->features
;
9995 if (diff
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
9996 /* udp_tunnel_{get,drop}_rx_info both need
9997 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9998 * device, or they won't do anything.
9999 * Thus we need to update dev->features
10000 * *before* calling udp_tunnel_get_rx_info,
10001 * but *after* calling udp_tunnel_drop_rx_info.
10003 if (features
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
10004 dev
->features
= features
;
10005 udp_tunnel_get_rx_info(dev
);
10007 udp_tunnel_drop_rx_info(dev
);
10011 if (diff
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
10012 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
10013 dev
->features
= features
;
10014 err
|= vlan_get_rx_ctag_filter_info(dev
);
10016 vlan_drop_rx_ctag_filter_info(dev
);
10020 if (diff
& NETIF_F_HW_VLAN_STAG_FILTER
) {
10021 if (features
& NETIF_F_HW_VLAN_STAG_FILTER
) {
10022 dev
->features
= features
;
10023 err
|= vlan_get_rx_stag_filter_info(dev
);
10025 vlan_drop_rx_stag_filter_info(dev
);
10029 dev
->features
= features
;
10032 return err
< 0 ? 0 : 1;
10036 * netdev_update_features - recalculate device features
10037 * @dev: the device to check
10039 * Recalculate dev->features set and send notifications if it
10040 * has changed. Should be called after driver or hardware dependent
10041 * conditions might have changed that influence the features.
10043 void netdev_update_features(struct net_device
*dev
)
10045 if (__netdev_update_features(dev
))
10046 netdev_features_change(dev
);
10048 EXPORT_SYMBOL(netdev_update_features
);
10051 * netdev_change_features - recalculate device features
10052 * @dev: the device to check
10054 * Recalculate dev->features set and send notifications even
10055 * if they have not changed. Should be called instead of
10056 * netdev_update_features() if also dev->vlan_features might
10057 * have changed to allow the changes to be propagated to stacked
10060 void netdev_change_features(struct net_device
*dev
)
10062 __netdev_update_features(dev
);
10063 netdev_features_change(dev
);
10065 EXPORT_SYMBOL(netdev_change_features
);
10068 * netif_stacked_transfer_operstate - transfer operstate
10069 * @rootdev: the root or lower level device to transfer state from
10070 * @dev: the device to transfer operstate to
10072 * Transfer operational state from root to device. This is normally
10073 * called when a stacking relationship exists between the root
10074 * device and the device(a leaf device).
10076 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
10077 struct net_device
*dev
)
10079 if (rootdev
->operstate
== IF_OPER_DORMANT
)
10080 netif_dormant_on(dev
);
10082 netif_dormant_off(dev
);
10084 if (rootdev
->operstate
== IF_OPER_TESTING
)
10085 netif_testing_on(dev
);
10087 netif_testing_off(dev
);
10089 if (netif_carrier_ok(rootdev
))
10090 netif_carrier_on(dev
);
10092 netif_carrier_off(dev
);
10094 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
10096 static int netif_alloc_rx_queues(struct net_device
*dev
)
10098 unsigned int i
, count
= dev
->num_rx_queues
;
10099 struct netdev_rx_queue
*rx
;
10100 size_t sz
= count
* sizeof(*rx
);
10105 rx
= kvzalloc(sz
, GFP_KERNEL_ACCOUNT
| __GFP_RETRY_MAYFAIL
);
10111 for (i
= 0; i
< count
; i
++) {
10114 /* XDP RX-queue setup */
10115 err
= xdp_rxq_info_reg(&rx
[i
].xdp_rxq
, dev
, i
, 0);
10122 /* Rollback successful reg's and free other resources */
10124 xdp_rxq_info_unreg(&rx
[i
].xdp_rxq
);
10130 static void netif_free_rx_queues(struct net_device
*dev
)
10132 unsigned int i
, count
= dev
->num_rx_queues
;
10134 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
10138 for (i
= 0; i
< count
; i
++)
10139 xdp_rxq_info_unreg(&dev
->_rx
[i
].xdp_rxq
);
10144 static void netdev_init_one_queue(struct net_device
*dev
,
10145 struct netdev_queue
*queue
, void *_unused
)
10147 /* Initialize queue lock */
10148 spin_lock_init(&queue
->_xmit_lock
);
10149 netdev_set_xmit_lockdep_class(&queue
->_xmit_lock
, dev
->type
);
10150 queue
->xmit_lock_owner
= -1;
10151 netdev_queue_numa_node_write(queue
, NUMA_NO_NODE
);
10154 dql_init(&queue
->dql
, HZ
);
10158 static void netif_free_tx_queues(struct net_device
*dev
)
10163 static int netif_alloc_netdev_queues(struct net_device
*dev
)
10165 unsigned int count
= dev
->num_tx_queues
;
10166 struct netdev_queue
*tx
;
10167 size_t sz
= count
* sizeof(*tx
);
10169 if (count
< 1 || count
> 0xffff)
10172 tx
= kvzalloc(sz
, GFP_KERNEL_ACCOUNT
| __GFP_RETRY_MAYFAIL
);
10178 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
10179 spin_lock_init(&dev
->tx_global_lock
);
10184 void netif_tx_stop_all_queues(struct net_device
*dev
)
10188 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
10189 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
10191 netif_tx_stop_queue(txq
);
10194 EXPORT_SYMBOL(netif_tx_stop_all_queues
);
10197 * register_netdevice - register a network device
10198 * @dev: device to register
10200 * Take a completed network device structure and add it to the kernel
10201 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10202 * chain. 0 is returned on success. A negative errno code is returned
10203 * on a failure to set up the device, or if the name is a duplicate.
10205 * Callers must hold the rtnl semaphore. You may want
10206 * register_netdev() instead of this.
10209 * The locking appears insufficient to guarantee two parallel registers
10210 * will not get the same name.
10213 int register_netdevice(struct net_device
*dev
)
10216 struct net
*net
= dev_net(dev
);
10218 BUILD_BUG_ON(sizeof(netdev_features_t
) * BITS_PER_BYTE
<
10219 NETDEV_FEATURE_COUNT
);
10220 BUG_ON(dev_boot_phase
);
10225 /* When net_device's are persistent, this will be fatal. */
10226 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
10229 ret
= ethtool_check_ops(dev
->ethtool_ops
);
10233 spin_lock_init(&dev
->addr_list_lock
);
10234 netdev_set_addr_lockdep_class(dev
);
10236 ret
= dev_get_valid_name(net
, dev
, dev
->name
);
10241 dev
->name_node
= netdev_name_node_head_alloc(dev
);
10242 if (!dev
->name_node
)
10245 /* Init, if this function is available */
10246 if (dev
->netdev_ops
->ndo_init
) {
10247 ret
= dev
->netdev_ops
->ndo_init(dev
);
10251 goto err_free_name
;
10255 if (((dev
->hw_features
| dev
->features
) &
10256 NETIF_F_HW_VLAN_CTAG_FILTER
) &&
10257 (!dev
->netdev_ops
->ndo_vlan_rx_add_vid
||
10258 !dev
->netdev_ops
->ndo_vlan_rx_kill_vid
)) {
10259 netdev_WARN(dev
, "Buggy VLAN acceleration in driver!\n");
10266 dev
->ifindex
= dev_new_index(net
);
10267 else if (__dev_get_by_index(net
, dev
->ifindex
))
10270 /* Transfer changeable features to wanted_features and enable
10271 * software offloads (GSO and GRO).
10273 dev
->hw_features
|= (NETIF_F_SOFT_FEATURES
| NETIF_F_SOFT_FEATURES_OFF
);
10274 dev
->features
|= NETIF_F_SOFT_FEATURES
;
10276 if (dev
->udp_tunnel_nic_info
) {
10277 dev
->features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
10278 dev
->hw_features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
10281 dev
->wanted_features
= dev
->features
& dev
->hw_features
;
10283 if (!(dev
->flags
& IFF_LOOPBACK
))
10284 dev
->hw_features
|= NETIF_F_NOCACHE_COPY
;
10286 /* If IPv4 TCP segmentation offload is supported we should also
10287 * allow the device to enable segmenting the frame with the option
10288 * of ignoring a static IP ID value. This doesn't enable the
10289 * feature itself but allows the user to enable it later.
10291 if (dev
->hw_features
& NETIF_F_TSO
)
10292 dev
->hw_features
|= NETIF_F_TSO_MANGLEID
;
10293 if (dev
->vlan_features
& NETIF_F_TSO
)
10294 dev
->vlan_features
|= NETIF_F_TSO_MANGLEID
;
10295 if (dev
->mpls_features
& NETIF_F_TSO
)
10296 dev
->mpls_features
|= NETIF_F_TSO_MANGLEID
;
10297 if (dev
->hw_enc_features
& NETIF_F_TSO
)
10298 dev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
10300 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
10302 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
10304 /* Make NETIF_F_SG inheritable to tunnel devices.
10306 dev
->hw_enc_features
|= NETIF_F_SG
| NETIF_F_GSO_PARTIAL
;
10308 /* Make NETIF_F_SG inheritable to MPLS.
10310 dev
->mpls_features
|= NETIF_F_SG
;
10312 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
10313 ret
= notifier_to_errno(ret
);
10317 ret
= netdev_register_kobject(dev
);
10319 dev
->reg_state
= NETREG_UNREGISTERED
;
10322 dev
->reg_state
= NETREG_REGISTERED
;
10324 __netdev_update_features(dev
);
10327 * Default initial state at registry is that the
10328 * device is present.
10331 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
10333 linkwatch_init_dev(dev
);
10335 dev_init_scheduler(dev
);
10337 list_netdevice(dev
);
10338 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
10340 /* If the device has permanent device address, driver should
10341 * set dev_addr and also addr_assign_type should be set to
10342 * NET_ADDR_PERM (default value).
10344 if (dev
->addr_assign_type
== NET_ADDR_PERM
)
10345 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
10347 /* Notify protocols, that a new device appeared. */
10348 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
10349 ret
= notifier_to_errno(ret
);
10351 /* Expect explicit free_netdev() on failure */
10352 dev
->needs_free_netdev
= false;
10353 unregister_netdevice_queue(dev
, NULL
);
10357 * Prevent userspace races by waiting until the network
10358 * device is fully setup before sending notifications.
10360 if (!dev
->rtnl_link_ops
||
10361 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
10362 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
10368 if (dev
->netdev_ops
->ndo_uninit
)
10369 dev
->netdev_ops
->ndo_uninit(dev
);
10370 if (dev
->priv_destructor
)
10371 dev
->priv_destructor(dev
);
10373 netdev_name_node_free(dev
->name_node
);
10376 EXPORT_SYMBOL(register_netdevice
);
10379 * init_dummy_netdev - init a dummy network device for NAPI
10380 * @dev: device to init
10382 * This takes a network device structure and initialize the minimum
10383 * amount of fields so it can be used to schedule NAPI polls without
10384 * registering a full blown interface. This is to be used by drivers
10385 * that need to tie several hardware interfaces to a single NAPI
10386 * poll scheduler due to HW limitations.
10388 int init_dummy_netdev(struct net_device
*dev
)
10390 /* Clear everything. Note we don't initialize spinlocks
10391 * are they aren't supposed to be taken by any of the
10392 * NAPI code and this dummy netdev is supposed to be
10393 * only ever used for NAPI polls
10395 memset(dev
, 0, sizeof(struct net_device
));
10397 /* make sure we BUG if trying to hit standard
10398 * register/unregister code path
10400 dev
->reg_state
= NETREG_DUMMY
;
10402 /* NAPI wants this */
10403 INIT_LIST_HEAD(&dev
->napi_list
);
10405 /* a dummy interface is started by default */
10406 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
10407 set_bit(__LINK_STATE_START
, &dev
->state
);
10409 /* napi_busy_loop stats accounting wants this */
10410 dev_net_set(dev
, &init_net
);
10412 /* Note : We dont allocate pcpu_refcnt for dummy devices,
10413 * because users of this 'device' dont need to change
10419 EXPORT_SYMBOL_GPL(init_dummy_netdev
);
10423 * register_netdev - register a network device
10424 * @dev: device to register
10426 * Take a completed network device structure and add it to the kernel
10427 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10428 * chain. 0 is returned on success. A negative errno code is returned
10429 * on a failure to set up the device, or if the name is a duplicate.
10431 * This is a wrapper around register_netdevice that takes the rtnl semaphore
10432 * and expands the device name if you passed a format string to
10435 int register_netdev(struct net_device
*dev
)
10439 if (rtnl_lock_killable())
10441 err
= register_netdevice(dev
);
10445 EXPORT_SYMBOL(register_netdev
);
10447 int netdev_refcnt_read(const struct net_device
*dev
)
10449 #ifdef CONFIG_PCPU_DEV_REFCNT
10452 for_each_possible_cpu(i
)
10453 refcnt
+= *per_cpu_ptr(dev
->pcpu_refcnt
, i
);
10456 return refcount_read(&dev
->dev_refcnt
);
10459 EXPORT_SYMBOL(netdev_refcnt_read
);
10461 int netdev_unregister_timeout_secs __read_mostly
= 10;
10463 #define WAIT_REFS_MIN_MSECS 1
10464 #define WAIT_REFS_MAX_MSECS 250
10466 * netdev_wait_allrefs - wait until all references are gone.
10467 * @dev: target net_device
10469 * This is called when unregistering network devices.
10471 * Any protocol or device that holds a reference should register
10472 * for netdevice notification, and cleanup and put back the
10473 * reference if they receive an UNREGISTER event.
10474 * We can get stuck here if buggy protocols don't correctly
10477 static void netdev_wait_allrefs(struct net_device
*dev
)
10479 unsigned long rebroadcast_time
, warning_time
;
10480 int wait
= 0, refcnt
;
10482 linkwatch_forget_dev(dev
);
10484 rebroadcast_time
= warning_time
= jiffies
;
10485 refcnt
= netdev_refcnt_read(dev
);
10487 while (refcnt
!= 1) {
10488 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
10491 /* Rebroadcast unregister notification */
10492 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
10498 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
10500 /* We must not have linkwatch events
10501 * pending on unregister. If this
10502 * happens, we simply run the queue
10503 * unscheduled, resulting in a noop
10506 linkwatch_run_queue();
10511 rebroadcast_time
= jiffies
;
10516 wait
= WAIT_REFS_MIN_MSECS
;
10519 wait
= min(wait
<< 1, WAIT_REFS_MAX_MSECS
);
10522 refcnt
= netdev_refcnt_read(dev
);
10525 time_after(jiffies
, warning_time
+
10526 netdev_unregister_timeout_secs
* HZ
)) {
10527 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
10528 dev
->name
, refcnt
);
10529 warning_time
= jiffies
;
10534 /* The sequence is:
10538 * register_netdevice(x1);
10539 * register_netdevice(x2);
10541 * unregister_netdevice(y1);
10542 * unregister_netdevice(y2);
10548 * We are invoked by rtnl_unlock().
10549 * This allows us to deal with problems:
10550 * 1) We can delete sysfs objects which invoke hotplug
10551 * without deadlocking with linkwatch via keventd.
10552 * 2) Since we run with the RTNL semaphore not held, we can sleep
10553 * safely in order to wait for the netdev refcnt to drop to zero.
10555 * We must not return until all unregister events added during
10556 * the interval the lock was held have been completed.
10558 void netdev_run_todo(void)
10560 struct list_head list
;
10561 #ifdef CONFIG_LOCKDEP
10562 struct list_head unlink_list
;
10564 list_replace_init(&net_unlink_list
, &unlink_list
);
10566 while (!list_empty(&unlink_list
)) {
10567 struct net_device
*dev
= list_first_entry(&unlink_list
,
10570 list_del_init(&dev
->unlink_list
);
10571 dev
->nested_level
= dev
->lower_level
- 1;
10575 /* Snapshot list, allow later requests */
10576 list_replace_init(&net_todo_list
, &list
);
10581 /* Wait for rcu callbacks to finish before next phase */
10582 if (!list_empty(&list
))
10585 while (!list_empty(&list
)) {
10586 struct net_device
*dev
10587 = list_first_entry(&list
, struct net_device
, todo_list
);
10588 list_del(&dev
->todo_list
);
10590 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
10591 pr_err("network todo '%s' but state %d\n",
10592 dev
->name
, dev
->reg_state
);
10597 dev
->reg_state
= NETREG_UNREGISTERED
;
10599 netdev_wait_allrefs(dev
);
10602 BUG_ON(netdev_refcnt_read(dev
) != 1);
10603 BUG_ON(!list_empty(&dev
->ptype_all
));
10604 BUG_ON(!list_empty(&dev
->ptype_specific
));
10605 WARN_ON(rcu_access_pointer(dev
->ip_ptr
));
10606 WARN_ON(rcu_access_pointer(dev
->ip6_ptr
));
10607 #if IS_ENABLED(CONFIG_DECNET)
10608 WARN_ON(dev
->dn_ptr
);
10610 if (dev
->priv_destructor
)
10611 dev
->priv_destructor(dev
);
10612 if (dev
->needs_free_netdev
)
10615 /* Report a network device has been unregistered */
10617 dev_net(dev
)->dev_unreg_count
--;
10619 wake_up(&netdev_unregistering_wq
);
10621 /* Free network device */
10622 kobject_put(&dev
->dev
.kobj
);
10626 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
10627 * all the same fields in the same order as net_device_stats, with only
10628 * the type differing, but rtnl_link_stats64 may have additional fields
10629 * at the end for newer counters.
10631 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
10632 const struct net_device_stats
*netdev_stats
)
10634 #if BITS_PER_LONG == 64
10635 BUILD_BUG_ON(sizeof(*stats64
) < sizeof(*netdev_stats
));
10636 memcpy(stats64
, netdev_stats
, sizeof(*netdev_stats
));
10637 /* zero out counters that only exist in rtnl_link_stats64 */
10638 memset((char *)stats64
+ sizeof(*netdev_stats
), 0,
10639 sizeof(*stats64
) - sizeof(*netdev_stats
));
10641 size_t i
, n
= sizeof(*netdev_stats
) / sizeof(unsigned long);
10642 const unsigned long *src
= (const unsigned long *)netdev_stats
;
10643 u64
*dst
= (u64
*)stats64
;
10645 BUILD_BUG_ON(n
> sizeof(*stats64
) / sizeof(u64
));
10646 for (i
= 0; i
< n
; i
++)
10648 /* zero out counters that only exist in rtnl_link_stats64 */
10649 memset((char *)stats64
+ n
* sizeof(u64
), 0,
10650 sizeof(*stats64
) - n
* sizeof(u64
));
10653 EXPORT_SYMBOL(netdev_stats_to_stats64
);
10656 * dev_get_stats - get network device statistics
10657 * @dev: device to get statistics from
10658 * @storage: place to store stats
10660 * Get network statistics from device. Return @storage.
10661 * The device driver may provide its own method by setting
10662 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10663 * otherwise the internal statistics structure is used.
10665 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
10666 struct rtnl_link_stats64
*storage
)
10668 const struct net_device_ops
*ops
= dev
->netdev_ops
;
10670 if (ops
->ndo_get_stats64
) {
10671 memset(storage
, 0, sizeof(*storage
));
10672 ops
->ndo_get_stats64(dev
, storage
);
10673 } else if (ops
->ndo_get_stats
) {
10674 netdev_stats_to_stats64(storage
, ops
->ndo_get_stats(dev
));
10676 netdev_stats_to_stats64(storage
, &dev
->stats
);
10678 storage
->rx_dropped
+= (unsigned long)atomic_long_read(&dev
->rx_dropped
);
10679 storage
->tx_dropped
+= (unsigned long)atomic_long_read(&dev
->tx_dropped
);
10680 storage
->rx_nohandler
+= (unsigned long)atomic_long_read(&dev
->rx_nohandler
);
10683 EXPORT_SYMBOL(dev_get_stats
);
10686 * dev_fetch_sw_netstats - get per-cpu network device statistics
10687 * @s: place to store stats
10688 * @netstats: per-cpu network stats to read from
10690 * Read per-cpu network statistics and populate the related fields in @s.
10692 void dev_fetch_sw_netstats(struct rtnl_link_stats64
*s
,
10693 const struct pcpu_sw_netstats __percpu
*netstats
)
10697 for_each_possible_cpu(cpu
) {
10698 const struct pcpu_sw_netstats
*stats
;
10699 struct pcpu_sw_netstats tmp
;
10700 unsigned int start
;
10702 stats
= per_cpu_ptr(netstats
, cpu
);
10704 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
10705 tmp
.rx_packets
= stats
->rx_packets
;
10706 tmp
.rx_bytes
= stats
->rx_bytes
;
10707 tmp
.tx_packets
= stats
->tx_packets
;
10708 tmp
.tx_bytes
= stats
->tx_bytes
;
10709 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
10711 s
->rx_packets
+= tmp
.rx_packets
;
10712 s
->rx_bytes
+= tmp
.rx_bytes
;
10713 s
->tx_packets
+= tmp
.tx_packets
;
10714 s
->tx_bytes
+= tmp
.tx_bytes
;
10717 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats
);
10720 * dev_get_tstats64 - ndo_get_stats64 implementation
10721 * @dev: device to get statistics from
10722 * @s: place to store stats
10724 * Populate @s from dev->stats and dev->tstats. Can be used as
10725 * ndo_get_stats64() callback.
10727 void dev_get_tstats64(struct net_device
*dev
, struct rtnl_link_stats64
*s
)
10729 netdev_stats_to_stats64(s
, &dev
->stats
);
10730 dev_fetch_sw_netstats(s
, dev
->tstats
);
10732 EXPORT_SYMBOL_GPL(dev_get_tstats64
);
10734 struct netdev_queue
*dev_ingress_queue_create(struct net_device
*dev
)
10736 struct netdev_queue
*queue
= dev_ingress_queue(dev
);
10738 #ifdef CONFIG_NET_CLS_ACT
10741 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
10744 netdev_init_one_queue(dev
, queue
, NULL
);
10745 RCU_INIT_POINTER(queue
->qdisc
, &noop_qdisc
);
10746 queue
->qdisc_sleeping
= &noop_qdisc
;
10747 rcu_assign_pointer(dev
->ingress_queue
, queue
);
10752 static const struct ethtool_ops default_ethtool_ops
;
10754 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
10755 const struct ethtool_ops
*ops
)
10757 if (dev
->ethtool_ops
== &default_ethtool_ops
)
10758 dev
->ethtool_ops
= ops
;
10760 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops
);
10762 void netdev_freemem(struct net_device
*dev
)
10764 char *addr
= (char *)dev
- dev
->padded
;
10770 * alloc_netdev_mqs - allocate network device
10771 * @sizeof_priv: size of private data to allocate space for
10772 * @name: device name format string
10773 * @name_assign_type: origin of device name
10774 * @setup: callback to initialize device
10775 * @txqs: the number of TX subqueues to allocate
10776 * @rxqs: the number of RX subqueues to allocate
10778 * Allocates a struct net_device with private data area for driver use
10779 * and performs basic initialization. Also allocates subqueue structs
10780 * for each queue on the device.
10782 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
10783 unsigned char name_assign_type
,
10784 void (*setup
)(struct net_device
*),
10785 unsigned int txqs
, unsigned int rxqs
)
10787 struct net_device
*dev
;
10788 unsigned int alloc_size
;
10789 struct net_device
*p
;
10791 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
10794 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
10799 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
10803 alloc_size
= sizeof(struct net_device
);
10805 /* ensure 32-byte alignment of private area */
10806 alloc_size
= ALIGN(alloc_size
, NETDEV_ALIGN
);
10807 alloc_size
+= sizeof_priv
;
10809 /* ensure 32-byte alignment of whole construct */
10810 alloc_size
+= NETDEV_ALIGN
- 1;
10812 p
= kvzalloc(alloc_size
, GFP_KERNEL_ACCOUNT
| __GFP_RETRY_MAYFAIL
);
10816 dev
= PTR_ALIGN(p
, NETDEV_ALIGN
);
10817 dev
->padded
= (char *)dev
- (char *)p
;
10819 #ifdef CONFIG_PCPU_DEV_REFCNT
10820 dev
->pcpu_refcnt
= alloc_percpu(int);
10821 if (!dev
->pcpu_refcnt
)
10825 refcount_set(&dev
->dev_refcnt
, 1);
10828 if (dev_addr_init(dev
))
10834 dev_net_set(dev
, &init_net
);
10836 dev
->gso_max_size
= GSO_MAX_SIZE
;
10837 dev
->gso_max_segs
= GSO_MAX_SEGS
;
10838 dev
->upper_level
= 1;
10839 dev
->lower_level
= 1;
10840 #ifdef CONFIG_LOCKDEP
10841 dev
->nested_level
= 0;
10842 INIT_LIST_HEAD(&dev
->unlink_list
);
10845 INIT_LIST_HEAD(&dev
->napi_list
);
10846 INIT_LIST_HEAD(&dev
->unreg_list
);
10847 INIT_LIST_HEAD(&dev
->close_list
);
10848 INIT_LIST_HEAD(&dev
->link_watch_list
);
10849 INIT_LIST_HEAD(&dev
->adj_list
.upper
);
10850 INIT_LIST_HEAD(&dev
->adj_list
.lower
);
10851 INIT_LIST_HEAD(&dev
->ptype_all
);
10852 INIT_LIST_HEAD(&dev
->ptype_specific
);
10853 INIT_LIST_HEAD(&dev
->net_notifier_list
);
10854 #ifdef CONFIG_NET_SCHED
10855 hash_init(dev
->qdisc_hash
);
10857 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
;
10860 if (!dev
->tx_queue_len
) {
10861 dev
->priv_flags
|= IFF_NO_QUEUE
;
10862 dev
->tx_queue_len
= DEFAULT_TX_QUEUE_LEN
;
10865 dev
->num_tx_queues
= txqs
;
10866 dev
->real_num_tx_queues
= txqs
;
10867 if (netif_alloc_netdev_queues(dev
))
10870 dev
->num_rx_queues
= rxqs
;
10871 dev
->real_num_rx_queues
= rxqs
;
10872 if (netif_alloc_rx_queues(dev
))
10875 strcpy(dev
->name
, name
);
10876 dev
->name_assign_type
= name_assign_type
;
10877 dev
->group
= INIT_NETDEV_GROUP
;
10878 if (!dev
->ethtool_ops
)
10879 dev
->ethtool_ops
= &default_ethtool_ops
;
10881 nf_hook_ingress_init(dev
);
10890 #ifdef CONFIG_PCPU_DEV_REFCNT
10891 free_percpu(dev
->pcpu_refcnt
);
10894 netdev_freemem(dev
);
10897 EXPORT_SYMBOL(alloc_netdev_mqs
);
10900 * free_netdev - free network device
10903 * This function does the last stage of destroying an allocated device
10904 * interface. The reference to the device object is released. If this
10905 * is the last reference then it will be freed.Must be called in process
10908 void free_netdev(struct net_device
*dev
)
10910 struct napi_struct
*p
, *n
;
10914 /* When called immediately after register_netdevice() failed the unwind
10915 * handling may still be dismantling the device. Handle that case by
10916 * deferring the free.
10918 if (dev
->reg_state
== NETREG_UNREGISTERING
) {
10920 dev
->needs_free_netdev
= true;
10924 netif_free_tx_queues(dev
);
10925 netif_free_rx_queues(dev
);
10927 kfree(rcu_dereference_protected(dev
->ingress_queue
, 1));
10929 /* Flush device addresses */
10930 dev_addr_flush(dev
);
10932 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
10935 #ifdef CONFIG_PCPU_DEV_REFCNT
10936 free_percpu(dev
->pcpu_refcnt
);
10937 dev
->pcpu_refcnt
= NULL
;
10939 free_percpu(dev
->xdp_bulkq
);
10940 dev
->xdp_bulkq
= NULL
;
10942 /* Compatibility with error handling in drivers */
10943 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
10944 netdev_freemem(dev
);
10948 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
10949 dev
->reg_state
= NETREG_RELEASED
;
10951 /* will free via device release */
10952 put_device(&dev
->dev
);
10954 EXPORT_SYMBOL(free_netdev
);
10957 * synchronize_net - Synchronize with packet receive processing
10959 * Wait for packets currently being received to be done.
10960 * Does not block later packets from starting.
10962 void synchronize_net(void)
10965 if (rtnl_is_locked())
10966 synchronize_rcu_expedited();
10970 EXPORT_SYMBOL(synchronize_net
);
10973 * unregister_netdevice_queue - remove device from the kernel
10977 * This function shuts down a device interface and removes it
10978 * from the kernel tables.
10979 * If head not NULL, device is queued to be unregistered later.
10981 * Callers must hold the rtnl semaphore. You may want
10982 * unregister_netdev() instead of this.
10985 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
10990 list_move_tail(&dev
->unreg_list
, head
);
10994 list_add(&dev
->unreg_list
, &single
);
10995 unregister_netdevice_many(&single
);
10998 EXPORT_SYMBOL(unregister_netdevice_queue
);
11001 * unregister_netdevice_many - unregister many devices
11002 * @head: list of devices
11004 * Note: As most callers use a stack allocated list_head,
11005 * we force a list_del() to make sure stack wont be corrupted later.
11007 void unregister_netdevice_many(struct list_head
*head
)
11009 struct net_device
*dev
, *tmp
;
11010 LIST_HEAD(close_head
);
11012 BUG_ON(dev_boot_phase
);
11015 if (list_empty(head
))
11018 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
11019 /* Some devices call without registering
11020 * for initialization unwind. Remove those
11021 * devices and proceed with the remaining.
11023 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
11024 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
11028 list_del(&dev
->unreg_list
);
11031 dev
->dismantle
= true;
11032 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
11035 /* If device is running, close it first. */
11036 list_for_each_entry(dev
, head
, unreg_list
)
11037 list_add_tail(&dev
->close_list
, &close_head
);
11038 dev_close_many(&close_head
, true);
11040 list_for_each_entry(dev
, head
, unreg_list
) {
11041 /* And unlink it from device chain. */
11042 unlist_netdevice(dev
);
11044 dev
->reg_state
= NETREG_UNREGISTERING
;
11046 flush_all_backlogs();
11050 list_for_each_entry(dev
, head
, unreg_list
) {
11051 struct sk_buff
*skb
= NULL
;
11053 /* Shutdown queueing discipline. */
11056 dev_xdp_uninstall(dev
);
11058 /* Notify protocols, that we are about to destroy
11059 * this device. They should clean all the things.
11061 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
11063 if (!dev
->rtnl_link_ops
||
11064 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
11065 skb
= rtmsg_ifinfo_build_skb(RTM_DELLINK
, dev
, ~0U, 0,
11066 GFP_KERNEL
, NULL
, 0);
11069 * Flush the unicast and multicast chains
11074 netdev_name_node_alt_flush(dev
);
11075 netdev_name_node_free(dev
->name_node
);
11077 if (dev
->netdev_ops
->ndo_uninit
)
11078 dev
->netdev_ops
->ndo_uninit(dev
);
11081 rtmsg_ifinfo_send(skb
, dev
, GFP_KERNEL
);
11083 /* Notifier chain MUST detach us all upper devices. */
11084 WARN_ON(netdev_has_any_upper_dev(dev
));
11085 WARN_ON(netdev_has_any_lower_dev(dev
));
11087 /* Remove entries from kobject tree */
11088 netdev_unregister_kobject(dev
);
11090 /* Remove XPS queueing entries */
11091 netif_reset_xps_queues_gt(dev
, 0);
11097 list_for_each_entry(dev
, head
, unreg_list
) {
11104 EXPORT_SYMBOL(unregister_netdevice_many
);
11107 * unregister_netdev - remove device from the kernel
11110 * This function shuts down a device interface and removes it
11111 * from the kernel tables.
11113 * This is just a wrapper for unregister_netdevice that takes
11114 * the rtnl semaphore. In general you want to use this and not
11115 * unregister_netdevice.
11117 void unregister_netdev(struct net_device
*dev
)
11120 unregister_netdevice(dev
);
11123 EXPORT_SYMBOL(unregister_netdev
);
11126 * __dev_change_net_namespace - move device to different nethost namespace
11128 * @net: network namespace
11129 * @pat: If not NULL name pattern to try if the current device name
11130 * is already taken in the destination network namespace.
11131 * @new_ifindex: If not zero, specifies device index in the target
11134 * This function shuts down a device interface and moves it
11135 * to a new network namespace. On success 0 is returned, on
11136 * a failure a netagive errno code is returned.
11138 * Callers must hold the rtnl semaphore.
11141 int __dev_change_net_namespace(struct net_device
*dev
, struct net
*net
,
11142 const char *pat
, int new_ifindex
)
11144 struct net
*net_old
= dev_net(dev
);
11149 /* Don't allow namespace local devices to be moved. */
11151 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
11154 /* Ensure the device has been registrered */
11155 if (dev
->reg_state
!= NETREG_REGISTERED
)
11158 /* Get out if there is nothing todo */
11160 if (net_eq(net_old
, net
))
11163 /* Pick the destination device name, and ensure
11164 * we can use it in the destination network namespace.
11167 if (__dev_get_by_name(net
, dev
->name
)) {
11168 /* We get here if we can't use the current device name */
11171 err
= dev_get_valid_name(net
, dev
, pat
);
11176 /* Check that new_ifindex isn't used yet. */
11178 if (new_ifindex
&& __dev_get_by_index(net
, new_ifindex
))
11182 * And now a mini version of register_netdevice unregister_netdevice.
11185 /* If device is running close it first. */
11188 /* And unlink it from device chain */
11189 unlist_netdevice(dev
);
11193 /* Shutdown queueing discipline. */
11196 /* Notify protocols, that we are about to destroy
11197 * this device. They should clean all the things.
11199 * Note that dev->reg_state stays at NETREG_REGISTERED.
11200 * This is wanted because this way 8021q and macvlan know
11201 * the device is just moving and can keep their slaves up.
11203 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
11206 new_nsid
= peernet2id_alloc(dev_net(dev
), net
, GFP_KERNEL
);
11207 /* If there is an ifindex conflict assign a new one */
11208 if (!new_ifindex
) {
11209 if (__dev_get_by_index(net
, dev
->ifindex
))
11210 new_ifindex
= dev_new_index(net
);
11212 new_ifindex
= dev
->ifindex
;
11215 rtmsg_ifinfo_newnet(RTM_DELLINK
, dev
, ~0U, GFP_KERNEL
, &new_nsid
,
11219 * Flush the unicast and multicast chains
11224 /* Send a netdev-removed uevent to the old namespace */
11225 kobject_uevent(&dev
->dev
.kobj
, KOBJ_REMOVE
);
11226 netdev_adjacent_del_links(dev
);
11228 /* Move per-net netdevice notifiers that are following the netdevice */
11229 move_netdevice_notifiers_dev_net(dev
, net
);
11231 /* Actually switch the network namespace */
11232 dev_net_set(dev
, net
);
11233 dev
->ifindex
= new_ifindex
;
11235 /* Send a netdev-add uevent to the new namespace */
11236 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ADD
);
11237 netdev_adjacent_add_links(dev
);
11239 /* Fixup kobjects */
11240 err
= device_rename(&dev
->dev
, dev
->name
);
11243 /* Adapt owner in case owning user namespace of target network
11244 * namespace is different from the original one.
11246 err
= netdev_change_owner(dev
, net_old
, net
);
11249 /* Add the device back in the hashes */
11250 list_netdevice(dev
);
11252 /* Notify protocols, that a new device appeared. */
11253 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
11256 * Prevent userspace races by waiting until the network
11257 * device is fully setup before sending notifications.
11259 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
11266 EXPORT_SYMBOL_GPL(__dev_change_net_namespace
);
11268 static int dev_cpu_dead(unsigned int oldcpu
)
11270 struct sk_buff
**list_skb
;
11271 struct sk_buff
*skb
;
11273 struct softnet_data
*sd
, *oldsd
, *remsd
= NULL
;
11275 local_irq_disable();
11276 cpu
= smp_processor_id();
11277 sd
= &per_cpu(softnet_data
, cpu
);
11278 oldsd
= &per_cpu(softnet_data
, oldcpu
);
11280 /* Find end of our completion_queue. */
11281 list_skb
= &sd
->completion_queue
;
11283 list_skb
= &(*list_skb
)->next
;
11284 /* Append completion queue from offline CPU. */
11285 *list_skb
= oldsd
->completion_queue
;
11286 oldsd
->completion_queue
= NULL
;
11288 /* Append output queue from offline CPU. */
11289 if (oldsd
->output_queue
) {
11290 *sd
->output_queue_tailp
= oldsd
->output_queue
;
11291 sd
->output_queue_tailp
= oldsd
->output_queue_tailp
;
11292 oldsd
->output_queue
= NULL
;
11293 oldsd
->output_queue_tailp
= &oldsd
->output_queue
;
11295 /* Append NAPI poll list from offline CPU, with one exception :
11296 * process_backlog() must be called by cpu owning percpu backlog.
11297 * We properly handle process_queue & input_pkt_queue later.
11299 while (!list_empty(&oldsd
->poll_list
)) {
11300 struct napi_struct
*napi
= list_first_entry(&oldsd
->poll_list
,
11301 struct napi_struct
,
11304 list_del_init(&napi
->poll_list
);
11305 if (napi
->poll
== process_backlog
)
11308 ____napi_schedule(sd
, napi
);
11311 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
11312 local_irq_enable();
11315 remsd
= oldsd
->rps_ipi_list
;
11316 oldsd
->rps_ipi_list
= NULL
;
11318 /* send out pending IPI's on offline CPU */
11319 net_rps_send_ipi(remsd
);
11321 /* Process offline CPU's input_pkt_queue */
11322 while ((skb
= __skb_dequeue(&oldsd
->process_queue
))) {
11324 input_queue_head_incr(oldsd
);
11326 while ((skb
= skb_dequeue(&oldsd
->input_pkt_queue
))) {
11328 input_queue_head_incr(oldsd
);
11335 * netdev_increment_features - increment feature set by one
11336 * @all: current feature set
11337 * @one: new feature set
11338 * @mask: mask feature set
11340 * Computes a new feature set after adding a device with feature set
11341 * @one to the master device with current feature set @all. Will not
11342 * enable anything that is off in @mask. Returns the new feature set.
11344 netdev_features_t
netdev_increment_features(netdev_features_t all
,
11345 netdev_features_t one
, netdev_features_t mask
)
11347 if (mask
& NETIF_F_HW_CSUM
)
11348 mask
|= NETIF_F_CSUM_MASK
;
11349 mask
|= NETIF_F_VLAN_CHALLENGED
;
11351 all
|= one
& (NETIF_F_ONE_FOR_ALL
| NETIF_F_CSUM_MASK
) & mask
;
11352 all
&= one
| ~NETIF_F_ALL_FOR_ALL
;
11354 /* If one device supports hw checksumming, set for all. */
11355 if (all
& NETIF_F_HW_CSUM
)
11356 all
&= ~(NETIF_F_CSUM_MASK
& ~NETIF_F_HW_CSUM
);
11360 EXPORT_SYMBOL(netdev_increment_features
);
11362 static struct hlist_head
* __net_init
netdev_create_hash(void)
11365 struct hlist_head
*hash
;
11367 hash
= kmalloc_array(NETDEV_HASHENTRIES
, sizeof(*hash
), GFP_KERNEL
);
11369 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
11370 INIT_HLIST_HEAD(&hash
[i
]);
11375 /* Initialize per network namespace state */
11376 static int __net_init
netdev_init(struct net
*net
)
11378 BUILD_BUG_ON(GRO_HASH_BUCKETS
>
11379 8 * sizeof_field(struct napi_struct
, gro_bitmask
));
11381 INIT_LIST_HEAD(&net
->dev_base_head
);
11383 net
->dev_name_head
= netdev_create_hash();
11384 if (net
->dev_name_head
== NULL
)
11387 net
->dev_index_head
= netdev_create_hash();
11388 if (net
->dev_index_head
== NULL
)
11391 RAW_INIT_NOTIFIER_HEAD(&net
->netdev_chain
);
11396 kfree(net
->dev_name_head
);
11402 * netdev_drivername - network driver for the device
11403 * @dev: network device
11405 * Determine network driver for device.
11407 const char *netdev_drivername(const struct net_device
*dev
)
11409 const struct device_driver
*driver
;
11410 const struct device
*parent
;
11411 const char *empty
= "";
11413 parent
= dev
->dev
.parent
;
11417 driver
= parent
->driver
;
11418 if (driver
&& driver
->name
)
11419 return driver
->name
;
11423 static void __netdev_printk(const char *level
, const struct net_device
*dev
,
11424 struct va_format
*vaf
)
11426 if (dev
&& dev
->dev
.parent
) {
11427 dev_printk_emit(level
[1] - '0',
11430 dev_driver_string(dev
->dev
.parent
),
11431 dev_name(dev
->dev
.parent
),
11432 netdev_name(dev
), netdev_reg_state(dev
),
11435 printk("%s%s%s: %pV",
11436 level
, netdev_name(dev
), netdev_reg_state(dev
), vaf
);
11438 printk("%s(NULL net_device): %pV", level
, vaf
);
11442 void netdev_printk(const char *level
, const struct net_device
*dev
,
11443 const char *format
, ...)
11445 struct va_format vaf
;
11448 va_start(args
, format
);
11453 __netdev_printk(level
, dev
, &vaf
);
11457 EXPORT_SYMBOL(netdev_printk
);
11459 #define define_netdev_printk_level(func, level) \
11460 void func(const struct net_device *dev, const char *fmt, ...) \
11462 struct va_format vaf; \
11465 va_start(args, fmt); \
11470 __netdev_printk(level, dev, &vaf); \
11474 EXPORT_SYMBOL(func);
11476 define_netdev_printk_level(netdev_emerg
, KERN_EMERG
);
11477 define_netdev_printk_level(netdev_alert
, KERN_ALERT
);
11478 define_netdev_printk_level(netdev_crit
, KERN_CRIT
);
11479 define_netdev_printk_level(netdev_err
, KERN_ERR
);
11480 define_netdev_printk_level(netdev_warn
, KERN_WARNING
);
11481 define_netdev_printk_level(netdev_notice
, KERN_NOTICE
);
11482 define_netdev_printk_level(netdev_info
, KERN_INFO
);
11484 static void __net_exit
netdev_exit(struct net
*net
)
11486 kfree(net
->dev_name_head
);
11487 kfree(net
->dev_index_head
);
11488 if (net
!= &init_net
)
11489 WARN_ON_ONCE(!list_empty(&net
->dev_base_head
));
11492 static struct pernet_operations __net_initdata netdev_net_ops
= {
11493 .init
= netdev_init
,
11494 .exit
= netdev_exit
,
11497 static void __net_exit
default_device_exit(struct net
*net
)
11499 struct net_device
*dev
, *aux
;
11501 * Push all migratable network devices back to the
11502 * initial network namespace
11505 for_each_netdev_safe(net
, dev
, aux
) {
11507 char fb_name
[IFNAMSIZ
];
11509 /* Ignore unmoveable devices (i.e. loopback) */
11510 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
11513 /* Leave virtual devices for the generic cleanup */
11514 if (dev
->rtnl_link_ops
&& !dev
->rtnl_link_ops
->netns_refund
)
11517 /* Push remaining network devices to init_net */
11518 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
11519 if (__dev_get_by_name(&init_net
, fb_name
))
11520 snprintf(fb_name
, IFNAMSIZ
, "dev%%d");
11521 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
11523 pr_emerg("%s: failed to move %s to init_net: %d\n",
11524 __func__
, dev
->name
, err
);
11531 static void __net_exit
rtnl_lock_unregistering(struct list_head
*net_list
)
11533 /* Return with the rtnl_lock held when there are no network
11534 * devices unregistering in any network namespace in net_list.
11537 bool unregistering
;
11538 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
11540 add_wait_queue(&netdev_unregistering_wq
, &wait
);
11542 unregistering
= false;
11544 list_for_each_entry(net
, net_list
, exit_list
) {
11545 if (net
->dev_unreg_count
> 0) {
11546 unregistering
= true;
11550 if (!unregistering
)
11554 wait_woken(&wait
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
11556 remove_wait_queue(&netdev_unregistering_wq
, &wait
);
11559 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
11561 /* At exit all network devices most be removed from a network
11562 * namespace. Do this in the reverse order of registration.
11563 * Do this across as many network namespaces as possible to
11564 * improve batching efficiency.
11566 struct net_device
*dev
;
11568 LIST_HEAD(dev_kill_list
);
11570 /* To prevent network device cleanup code from dereferencing
11571 * loopback devices or network devices that have been freed
11572 * wait here for all pending unregistrations to complete,
11573 * before unregistring the loopback device and allowing the
11574 * network namespace be freed.
11576 * The netdev todo list containing all network devices
11577 * unregistrations that happen in default_device_exit_batch
11578 * will run in the rtnl_unlock() at the end of
11579 * default_device_exit_batch.
11581 rtnl_lock_unregistering(net_list
);
11582 list_for_each_entry(net
, net_list
, exit_list
) {
11583 for_each_netdev_reverse(net
, dev
) {
11584 if (dev
->rtnl_link_ops
&& dev
->rtnl_link_ops
->dellink
)
11585 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
11587 unregister_netdevice_queue(dev
, &dev_kill_list
);
11590 unregister_netdevice_many(&dev_kill_list
);
11594 static struct pernet_operations __net_initdata default_device_ops
= {
11595 .exit
= default_device_exit
,
11596 .exit_batch
= default_device_exit_batch
,
11600 * Initialize the DEV module. At boot time this walks the device list and
11601 * unhooks any devices that fail to initialise (normally hardware not
11602 * present) and leaves us with a valid list of present and active devices.
11607 * This is called single threaded during boot, so no need
11608 * to take the rtnl semaphore.
11610 static int __init
net_dev_init(void)
11612 int i
, rc
= -ENOMEM
;
11614 BUG_ON(!dev_boot_phase
);
11616 if (dev_proc_init())
11619 if (netdev_kobject_init())
11622 INIT_LIST_HEAD(&ptype_all
);
11623 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
11624 INIT_LIST_HEAD(&ptype_base
[i
]);
11626 INIT_LIST_HEAD(&offload_base
);
11628 if (register_pernet_subsys(&netdev_net_ops
))
11632 * Initialise the packet receive queues.
11635 for_each_possible_cpu(i
) {
11636 struct work_struct
*flush
= per_cpu_ptr(&flush_works
, i
);
11637 struct softnet_data
*sd
= &per_cpu(softnet_data
, i
);
11639 INIT_WORK(flush
, flush_backlog
);
11641 skb_queue_head_init(&sd
->input_pkt_queue
);
11642 skb_queue_head_init(&sd
->process_queue
);
11643 #ifdef CONFIG_XFRM_OFFLOAD
11644 skb_queue_head_init(&sd
->xfrm_backlog
);
11646 INIT_LIST_HEAD(&sd
->poll_list
);
11647 sd
->output_queue_tailp
= &sd
->output_queue
;
11649 INIT_CSD(&sd
->csd
, rps_trigger_softirq
, sd
);
11653 init_gro_hash(&sd
->backlog
);
11654 sd
->backlog
.poll
= process_backlog
;
11655 sd
->backlog
.weight
= weight_p
;
11658 dev_boot_phase
= 0;
11660 /* The loopback device is special if any other network devices
11661 * is present in a network namespace the loopback device must
11662 * be present. Since we now dynamically allocate and free the
11663 * loopback device ensure this invariant is maintained by
11664 * keeping the loopback device as the first device on the
11665 * list of network devices. Ensuring the loopback devices
11666 * is the first device that appears and the last network device
11669 if (register_pernet_device(&loopback_net_ops
))
11672 if (register_pernet_device(&default_device_ops
))
11675 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
11676 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
11678 rc
= cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD
, "net/dev:dead",
11679 NULL
, dev_cpu_dead
);
11686 subsys_initcall(net_dev_init
);