2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <linux/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/sched/mm.h>
85 #include <linux/mutex.h>
86 #include <linux/string.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/errno.h>
91 #include <linux/interrupt.h>
92 #include <linux/if_ether.h>
93 #include <linux/netdevice.h>
94 #include <linux/etherdevice.h>
95 #include <linux/ethtool.h>
96 #include <linux/notifier.h>
97 #include <linux/skbuff.h>
98 #include <linux/bpf.h>
99 #include <linux/bpf_trace.h>
100 #include <net/net_namespace.h>
101 #include <net/sock.h>
102 #include <net/busy_poll.h>
103 #include <linux/rtnetlink.h>
104 #include <linux/stat.h>
106 #include <net/dst_metadata.h>
107 #include <net/pkt_sched.h>
108 #include <net/pkt_cls.h>
109 #include <net/checksum.h>
110 #include <net/xfrm.h>
111 #include <linux/highmem.h>
112 #include <linux/init.h>
113 #include <linux/module.h>
114 #include <linux/netpoll.h>
115 #include <linux/rcupdate.h>
116 #include <linux/delay.h>
117 #include <net/iw_handler.h>
118 #include <asm/current.h>
119 #include <linux/audit.h>
120 #include <linux/dmaengine.h>
121 #include <linux/err.h>
122 #include <linux/ctype.h>
123 #include <linux/if_arp.h>
124 #include <linux/if_vlan.h>
125 #include <linux/ip.h>
127 #include <net/mpls.h>
128 #include <linux/ipv6.h>
129 #include <linux/in.h>
130 #include <linux/jhash.h>
131 #include <linux/random.h>
132 #include <trace/events/napi.h>
133 #include <trace/events/net.h>
134 #include <trace/events/skb.h>
135 #include <linux/pci.h>
136 #include <linux/inetdevice.h>
137 #include <linux/cpu_rmap.h>
138 #include <linux/static_key.h>
139 #include <linux/hashtable.h>
140 #include <linux/vmalloc.h>
141 #include <linux/if_macvlan.h>
142 #include <linux/errqueue.h>
143 #include <linux/hrtimer.h>
144 #include <linux/netfilter_ingress.h>
145 #include <linux/crash_dump.h>
146 #include <linux/sctp.h>
147 #include <net/udp_tunnel.h>
148 #include <linux/net_namespace.h>
150 #include "net-sysfs.h"
152 /* Instead of increasing this, you should create a hash table. */
153 #define MAX_GRO_SKBS 8
155 /* This should be increased if a protocol with a bigger head is added. */
156 #define GRO_MAX_HEAD (MAX_HEADER + 128)
158 static DEFINE_SPINLOCK(ptype_lock
);
159 static DEFINE_SPINLOCK(offload_lock
);
160 struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
161 struct list_head ptype_all __read_mostly
; /* Taps */
162 static struct list_head offload_base __read_mostly
;
164 static int netif_rx_internal(struct sk_buff
*skb
);
165 static int call_netdevice_notifiers_info(unsigned long val
,
166 struct netdev_notifier_info
*info
);
167 static struct napi_struct
*napi_by_id(unsigned int napi_id
);
170 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
173 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
175 * Writers must hold the rtnl semaphore while they loop through the
176 * dev_base_head list, and hold dev_base_lock for writing when they do the
177 * actual updates. This allows pure readers to access the list even
178 * while a writer is preparing to update it.
180 * To put it another way, dev_base_lock is held for writing only to
181 * protect against pure readers; the rtnl semaphore provides the
182 * protection against other writers.
184 * See, for example usages, register_netdevice() and
185 * unregister_netdevice(), which must be called with the rtnl
188 DEFINE_RWLOCK(dev_base_lock
);
189 EXPORT_SYMBOL(dev_base_lock
);
191 static DEFINE_MUTEX(ifalias_mutex
);
193 /* protects napi_hash addition/deletion and napi_gen_id */
194 static DEFINE_SPINLOCK(napi_hash_lock
);
196 static unsigned int napi_gen_id
= NR_CPUS
;
197 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash
, 8);
199 static seqcount_t devnet_rename_seq
;
201 static inline void dev_base_seq_inc(struct net
*net
)
203 while (++net
->dev_base_seq
== 0)
207 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
209 unsigned int hash
= full_name_hash(net
, name
, strnlen(name
, IFNAMSIZ
));
211 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
214 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
216 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
219 static inline void rps_lock(struct softnet_data
*sd
)
222 spin_lock(&sd
->input_pkt_queue
.lock
);
226 static inline void rps_unlock(struct softnet_data
*sd
)
229 spin_unlock(&sd
->input_pkt_queue
.lock
);
233 /* Device list insertion */
234 static void list_netdevice(struct net_device
*dev
)
236 struct net
*net
= dev_net(dev
);
240 write_lock_bh(&dev_base_lock
);
241 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
242 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
243 hlist_add_head_rcu(&dev
->index_hlist
,
244 dev_index_hash(net
, dev
->ifindex
));
245 write_unlock_bh(&dev_base_lock
);
247 dev_base_seq_inc(net
);
250 /* Device list removal
251 * caller must respect a RCU grace period before freeing/reusing dev
253 static void unlist_netdevice(struct net_device
*dev
)
257 /* Unlink dev from the device chain */
258 write_lock_bh(&dev_base_lock
);
259 list_del_rcu(&dev
->dev_list
);
260 hlist_del_rcu(&dev
->name_hlist
);
261 hlist_del_rcu(&dev
->index_hlist
);
262 write_unlock_bh(&dev_base_lock
);
264 dev_base_seq_inc(dev_net(dev
));
271 static RAW_NOTIFIER_HEAD(netdev_chain
);
274 * Device drivers call our routines to queue packets here. We empty the
275 * queue in the local softnet handler.
278 DEFINE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
279 EXPORT_PER_CPU_SYMBOL(softnet_data
);
281 #ifdef CONFIG_LOCKDEP
283 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
284 * according to dev->type
286 static const unsigned short netdev_lock_type
[] = {
287 ARPHRD_NETROM
, ARPHRD_ETHER
, ARPHRD_EETHER
, ARPHRD_AX25
,
288 ARPHRD_PRONET
, ARPHRD_CHAOS
, ARPHRD_IEEE802
, ARPHRD_ARCNET
,
289 ARPHRD_APPLETLK
, ARPHRD_DLCI
, ARPHRD_ATM
, ARPHRD_METRICOM
,
290 ARPHRD_IEEE1394
, ARPHRD_EUI64
, ARPHRD_INFINIBAND
, ARPHRD_SLIP
,
291 ARPHRD_CSLIP
, ARPHRD_SLIP6
, ARPHRD_CSLIP6
, ARPHRD_RSRVD
,
292 ARPHRD_ADAPT
, ARPHRD_ROSE
, ARPHRD_X25
, ARPHRD_HWX25
,
293 ARPHRD_PPP
, ARPHRD_CISCO
, ARPHRD_LAPB
, ARPHRD_DDCMP
,
294 ARPHRD_RAWHDLC
, ARPHRD_TUNNEL
, ARPHRD_TUNNEL6
, ARPHRD_FRAD
,
295 ARPHRD_SKIP
, ARPHRD_LOOPBACK
, ARPHRD_LOCALTLK
, ARPHRD_FDDI
,
296 ARPHRD_BIF
, ARPHRD_SIT
, ARPHRD_IPDDP
, ARPHRD_IPGRE
,
297 ARPHRD_PIMREG
, ARPHRD_HIPPI
, ARPHRD_ASH
, ARPHRD_ECONET
,
298 ARPHRD_IRDA
, ARPHRD_FCPP
, ARPHRD_FCAL
, ARPHRD_FCPL
,
299 ARPHRD_FCFABRIC
, ARPHRD_IEEE80211
, ARPHRD_IEEE80211_PRISM
,
300 ARPHRD_IEEE80211_RADIOTAP
, ARPHRD_PHONET
, ARPHRD_PHONET_PIPE
,
301 ARPHRD_IEEE802154
, ARPHRD_VOID
, ARPHRD_NONE
};
303 static const char *const netdev_lock_name
[] = {
304 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
305 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
306 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
307 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
308 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
309 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
310 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
311 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
312 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
313 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
314 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
315 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
316 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
317 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
318 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
320 static struct lock_class_key netdev_xmit_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
321 static struct lock_class_key netdev_addr_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
323 static inline unsigned short netdev_lock_pos(unsigned short dev_type
)
327 for (i
= 0; i
< ARRAY_SIZE(netdev_lock_type
); i
++)
328 if (netdev_lock_type
[i
] == dev_type
)
330 /* the last key is used by default */
331 return ARRAY_SIZE(netdev_lock_type
) - 1;
334 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
335 unsigned short dev_type
)
339 i
= netdev_lock_pos(dev_type
);
340 lockdep_set_class_and_name(lock
, &netdev_xmit_lock_key
[i
],
341 netdev_lock_name
[i
]);
344 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
348 i
= netdev_lock_pos(dev
->type
);
349 lockdep_set_class_and_name(&dev
->addr_list_lock
,
350 &netdev_addr_lock_key
[i
],
351 netdev_lock_name
[i
]);
354 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
355 unsigned short dev_type
)
358 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
363 /*******************************************************************************
365 * Protocol management and registration routines
367 *******************************************************************************/
371 * Add a protocol ID to the list. Now that the input handler is
372 * smarter we can dispense with all the messy stuff that used to be
375 * BEWARE!!! Protocol handlers, mangling input packets,
376 * MUST BE last in hash buckets and checking protocol handlers
377 * MUST start from promiscuous ptype_all chain in net_bh.
378 * It is true now, do not change it.
379 * Explanation follows: if protocol handler, mangling packet, will
380 * be the first on list, it is not able to sense, that packet
381 * is cloned and should be copied-on-write, so that it will
382 * change it and subsequent readers will get broken packet.
386 static inline struct list_head
*ptype_head(const struct packet_type
*pt
)
388 if (pt
->type
== htons(ETH_P_ALL
))
389 return pt
->dev
? &pt
->dev
->ptype_all
: &ptype_all
;
391 return pt
->dev
? &pt
->dev
->ptype_specific
:
392 &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
396 * dev_add_pack - add packet handler
397 * @pt: packet type declaration
399 * Add a protocol handler to the networking stack. The passed &packet_type
400 * is linked into kernel lists and may not be freed until it has been
401 * removed from the kernel lists.
403 * This call does not sleep therefore it can not
404 * guarantee all CPU's that are in middle of receiving packets
405 * will see the new packet type (until the next received packet).
408 void dev_add_pack(struct packet_type
*pt
)
410 struct list_head
*head
= ptype_head(pt
);
412 spin_lock(&ptype_lock
);
413 list_add_rcu(&pt
->list
, head
);
414 spin_unlock(&ptype_lock
);
416 EXPORT_SYMBOL(dev_add_pack
);
419 * __dev_remove_pack - remove packet handler
420 * @pt: packet type declaration
422 * Remove a protocol handler that was previously added to the kernel
423 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
424 * from the kernel lists and can be freed or reused once this function
427 * The packet type might still be in use by receivers
428 * and must not be freed until after all the CPU's have gone
429 * through a quiescent state.
431 void __dev_remove_pack(struct packet_type
*pt
)
433 struct list_head
*head
= ptype_head(pt
);
434 struct packet_type
*pt1
;
436 spin_lock(&ptype_lock
);
438 list_for_each_entry(pt1
, head
, list
) {
440 list_del_rcu(&pt
->list
);
445 pr_warn("dev_remove_pack: %p not found\n", pt
);
447 spin_unlock(&ptype_lock
);
449 EXPORT_SYMBOL(__dev_remove_pack
);
452 * dev_remove_pack - remove packet handler
453 * @pt: packet type declaration
455 * Remove a protocol handler that was previously added to the kernel
456 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
457 * from the kernel lists and can be freed or reused once this function
460 * This call sleeps to guarantee that no CPU is looking at the packet
463 void dev_remove_pack(struct packet_type
*pt
)
465 __dev_remove_pack(pt
);
469 EXPORT_SYMBOL(dev_remove_pack
);
473 * dev_add_offload - register offload handlers
474 * @po: protocol offload declaration
476 * Add protocol offload handlers to the networking stack. The passed
477 * &proto_offload is linked into kernel lists and may not be freed until
478 * it has been removed from the kernel lists.
480 * This call does not sleep therefore it can not
481 * guarantee all CPU's that are in middle of receiving packets
482 * will see the new offload handlers (until the next received packet).
484 void dev_add_offload(struct packet_offload
*po
)
486 struct packet_offload
*elem
;
488 spin_lock(&offload_lock
);
489 list_for_each_entry(elem
, &offload_base
, list
) {
490 if (po
->priority
< elem
->priority
)
493 list_add_rcu(&po
->list
, elem
->list
.prev
);
494 spin_unlock(&offload_lock
);
496 EXPORT_SYMBOL(dev_add_offload
);
499 * __dev_remove_offload - remove offload handler
500 * @po: packet offload declaration
502 * Remove a protocol offload handler that was previously added to the
503 * kernel offload handlers by dev_add_offload(). The passed &offload_type
504 * is removed from the kernel lists and can be freed or reused once this
507 * The packet type might still be in use by receivers
508 * and must not be freed until after all the CPU's have gone
509 * through a quiescent state.
511 static void __dev_remove_offload(struct packet_offload
*po
)
513 struct list_head
*head
= &offload_base
;
514 struct packet_offload
*po1
;
516 spin_lock(&offload_lock
);
518 list_for_each_entry(po1
, head
, list
) {
520 list_del_rcu(&po
->list
);
525 pr_warn("dev_remove_offload: %p not found\n", po
);
527 spin_unlock(&offload_lock
);
531 * dev_remove_offload - remove packet offload handler
532 * @po: packet offload declaration
534 * Remove a packet offload handler that was previously added to the kernel
535 * offload handlers by dev_add_offload(). The passed &offload_type is
536 * removed from the kernel lists and can be freed or reused once this
539 * This call sleeps to guarantee that no CPU is looking at the packet
542 void dev_remove_offload(struct packet_offload
*po
)
544 __dev_remove_offload(po
);
548 EXPORT_SYMBOL(dev_remove_offload
);
550 /******************************************************************************
552 * Device Boot-time Settings Routines
554 ******************************************************************************/
556 /* Boot time configuration table */
557 static struct netdev_boot_setup dev_boot_setup
[NETDEV_BOOT_SETUP_MAX
];
560 * netdev_boot_setup_add - add new setup entry
561 * @name: name of the device
562 * @map: configured settings for the device
564 * Adds new setup entry to the dev_boot_setup list. The function
565 * returns 0 on error and 1 on success. This is a generic routine to
568 static int netdev_boot_setup_add(char *name
, struct ifmap
*map
)
570 struct netdev_boot_setup
*s
;
574 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
575 if (s
[i
].name
[0] == '\0' || s
[i
].name
[0] == ' ') {
576 memset(s
[i
].name
, 0, sizeof(s
[i
].name
));
577 strlcpy(s
[i
].name
, name
, IFNAMSIZ
);
578 memcpy(&s
[i
].map
, map
, sizeof(s
[i
].map
));
583 return i
>= NETDEV_BOOT_SETUP_MAX
? 0 : 1;
587 * netdev_boot_setup_check - check boot time settings
588 * @dev: the netdevice
590 * Check boot time settings for the device.
591 * The found settings are set for the device to be used
592 * later in the device probing.
593 * Returns 0 if no settings found, 1 if they are.
595 int netdev_boot_setup_check(struct net_device
*dev
)
597 struct netdev_boot_setup
*s
= dev_boot_setup
;
600 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
601 if (s
[i
].name
[0] != '\0' && s
[i
].name
[0] != ' ' &&
602 !strcmp(dev
->name
, s
[i
].name
)) {
603 dev
->irq
= s
[i
].map
.irq
;
604 dev
->base_addr
= s
[i
].map
.base_addr
;
605 dev
->mem_start
= s
[i
].map
.mem_start
;
606 dev
->mem_end
= s
[i
].map
.mem_end
;
612 EXPORT_SYMBOL(netdev_boot_setup_check
);
616 * netdev_boot_base - get address from boot time settings
617 * @prefix: prefix for network device
618 * @unit: id for network device
620 * Check boot time settings for the base address of device.
621 * The found settings are set for the device to be used
622 * later in the device probing.
623 * Returns 0 if no settings found.
625 unsigned long netdev_boot_base(const char *prefix
, int unit
)
627 const struct netdev_boot_setup
*s
= dev_boot_setup
;
631 sprintf(name
, "%s%d", prefix
, unit
);
634 * If device already registered then return base of 1
635 * to indicate not to probe for this interface
637 if (__dev_get_by_name(&init_net
, name
))
640 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++)
641 if (!strcmp(name
, s
[i
].name
))
642 return s
[i
].map
.base_addr
;
647 * Saves at boot time configured settings for any netdevice.
649 int __init
netdev_boot_setup(char *str
)
654 str
= get_options(str
, ARRAY_SIZE(ints
), ints
);
659 memset(&map
, 0, sizeof(map
));
663 map
.base_addr
= ints
[2];
665 map
.mem_start
= ints
[3];
667 map
.mem_end
= ints
[4];
669 /* Add new entry to the list */
670 return netdev_boot_setup_add(str
, &map
);
673 __setup("netdev=", netdev_boot_setup
);
675 /*******************************************************************************
677 * Device Interface Subroutines
679 *******************************************************************************/
682 * dev_get_iflink - get 'iflink' value of a interface
683 * @dev: targeted interface
685 * Indicates the ifindex the interface is linked to.
686 * Physical interfaces have the same 'ifindex' and 'iflink' values.
689 int dev_get_iflink(const struct net_device
*dev
)
691 if (dev
->netdev_ops
&& dev
->netdev_ops
->ndo_get_iflink
)
692 return dev
->netdev_ops
->ndo_get_iflink(dev
);
696 EXPORT_SYMBOL(dev_get_iflink
);
699 * dev_fill_metadata_dst - Retrieve tunnel egress information.
700 * @dev: targeted interface
703 * For better visibility of tunnel traffic OVS needs to retrieve
704 * egress tunnel information for a packet. Following API allows
705 * user to get this info.
707 int dev_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
709 struct ip_tunnel_info
*info
;
711 if (!dev
->netdev_ops
|| !dev
->netdev_ops
->ndo_fill_metadata_dst
)
714 info
= skb_tunnel_info_unclone(skb
);
717 if (unlikely(!(info
->mode
& IP_TUNNEL_INFO_TX
)))
720 return dev
->netdev_ops
->ndo_fill_metadata_dst(dev
, skb
);
722 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst
);
725 * __dev_get_by_name - find a device by its name
726 * @net: the applicable net namespace
727 * @name: name to find
729 * Find an interface by name. Must be called under RTNL semaphore
730 * or @dev_base_lock. If the name is found a pointer to the device
731 * is returned. If the name is not found then %NULL is returned. The
732 * reference counters are not incremented so the caller must be
733 * careful with locks.
736 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
738 struct net_device
*dev
;
739 struct hlist_head
*head
= dev_name_hash(net
, name
);
741 hlist_for_each_entry(dev
, head
, name_hlist
)
742 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
747 EXPORT_SYMBOL(__dev_get_by_name
);
750 * dev_get_by_name_rcu - find a device by its name
751 * @net: the applicable net namespace
752 * @name: name to find
754 * Find an interface by name.
755 * If the name is found a pointer to the device is returned.
756 * If the name is not found then %NULL is returned.
757 * The reference counters are not incremented so the caller must be
758 * careful with locks. The caller must hold RCU lock.
761 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
763 struct net_device
*dev
;
764 struct hlist_head
*head
= dev_name_hash(net
, name
);
766 hlist_for_each_entry_rcu(dev
, head
, name_hlist
)
767 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
772 EXPORT_SYMBOL(dev_get_by_name_rcu
);
775 * dev_get_by_name - find a device by its name
776 * @net: the applicable net namespace
777 * @name: name to find
779 * Find an interface by name. This can be called from any
780 * context and does its own locking. The returned handle has
781 * the usage count incremented and the caller must use dev_put() to
782 * release it when it is no longer needed. %NULL is returned if no
783 * matching device is found.
786 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
788 struct net_device
*dev
;
791 dev
= dev_get_by_name_rcu(net
, name
);
797 EXPORT_SYMBOL(dev_get_by_name
);
800 * __dev_get_by_index - find a device by its ifindex
801 * @net: the applicable net namespace
802 * @ifindex: index of device
804 * Search for an interface by index. Returns %NULL if the device
805 * is not found or a pointer to the device. The device has not
806 * had its reference counter increased so the caller must be careful
807 * about locking. The caller must hold either the RTNL semaphore
811 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
813 struct net_device
*dev
;
814 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
816 hlist_for_each_entry(dev
, head
, index_hlist
)
817 if (dev
->ifindex
== ifindex
)
822 EXPORT_SYMBOL(__dev_get_by_index
);
825 * dev_get_by_index_rcu - find a device by its ifindex
826 * @net: the applicable net namespace
827 * @ifindex: index of device
829 * Search for an interface by index. Returns %NULL if the device
830 * is not found or a pointer to the device. The device has not
831 * had its reference counter increased so the caller must be careful
832 * about locking. The caller must hold RCU lock.
835 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
837 struct net_device
*dev
;
838 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
840 hlist_for_each_entry_rcu(dev
, head
, index_hlist
)
841 if (dev
->ifindex
== ifindex
)
846 EXPORT_SYMBOL(dev_get_by_index_rcu
);
850 * dev_get_by_index - find a device by its ifindex
851 * @net: the applicable net namespace
852 * @ifindex: index of device
854 * Search for an interface by index. Returns NULL if the device
855 * is not found or a pointer to the device. The device returned has
856 * had a reference added and the pointer is safe until the user calls
857 * dev_put to indicate they have finished with it.
860 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
862 struct net_device
*dev
;
865 dev
= dev_get_by_index_rcu(net
, ifindex
);
871 EXPORT_SYMBOL(dev_get_by_index
);
874 * dev_get_by_napi_id - find a device by napi_id
875 * @napi_id: ID of the NAPI struct
877 * Search for an interface by NAPI ID. Returns %NULL if the device
878 * is not found or a pointer to the device. The device has not had
879 * its reference counter increased so the caller must be careful
880 * about locking. The caller must hold RCU lock.
883 struct net_device
*dev_get_by_napi_id(unsigned int napi_id
)
885 struct napi_struct
*napi
;
887 WARN_ON_ONCE(!rcu_read_lock_held());
889 if (napi_id
< MIN_NAPI_ID
)
892 napi
= napi_by_id(napi_id
);
894 return napi
? napi
->dev
: NULL
;
896 EXPORT_SYMBOL(dev_get_by_napi_id
);
899 * netdev_get_name - get a netdevice name, knowing its ifindex.
900 * @net: network namespace
901 * @name: a pointer to the buffer where the name will be stored.
902 * @ifindex: the ifindex of the interface to get the name from.
904 * The use of raw_seqcount_begin() and cond_resched() before
905 * retrying is required as we want to give the writers a chance
906 * to complete when CONFIG_PREEMPT is not set.
908 int netdev_get_name(struct net
*net
, char *name
, int ifindex
)
910 struct net_device
*dev
;
914 seq
= raw_seqcount_begin(&devnet_rename_seq
);
916 dev
= dev_get_by_index_rcu(net
, ifindex
);
922 strcpy(name
, dev
->name
);
924 if (read_seqcount_retry(&devnet_rename_seq
, seq
)) {
933 * dev_getbyhwaddr_rcu - find a device by its hardware address
934 * @net: the applicable net namespace
935 * @type: media type of device
936 * @ha: hardware address
938 * Search for an interface by MAC address. Returns NULL if the device
939 * is not found or a pointer to the device.
940 * The caller must hold RCU or RTNL.
941 * The returned device has not had its ref count increased
942 * and the caller must therefore be careful about locking
946 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
949 struct net_device
*dev
;
951 for_each_netdev_rcu(net
, dev
)
952 if (dev
->type
== type
&&
953 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
958 EXPORT_SYMBOL(dev_getbyhwaddr_rcu
);
960 struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
962 struct net_device
*dev
;
965 for_each_netdev(net
, dev
)
966 if (dev
->type
== type
)
971 EXPORT_SYMBOL(__dev_getfirstbyhwtype
);
973 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
975 struct net_device
*dev
, *ret
= NULL
;
978 for_each_netdev_rcu(net
, dev
)
979 if (dev
->type
== type
) {
987 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
990 * __dev_get_by_flags - find any device with given flags
991 * @net: the applicable net namespace
992 * @if_flags: IFF_* values
993 * @mask: bitmask of bits in if_flags to check
995 * Search for any interface with the given flags. Returns NULL if a device
996 * is not found or a pointer to the device. Must be called inside
997 * rtnl_lock(), and result refcount is unchanged.
1000 struct net_device
*__dev_get_by_flags(struct net
*net
, unsigned short if_flags
,
1001 unsigned short mask
)
1003 struct net_device
*dev
, *ret
;
1008 for_each_netdev(net
, dev
) {
1009 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
1016 EXPORT_SYMBOL(__dev_get_by_flags
);
1019 * dev_valid_name - check if name is okay for network device
1020 * @name: name string
1022 * Network device names need to be valid file names to
1023 * to allow sysfs to work. We also disallow any kind of
1026 bool dev_valid_name(const char *name
)
1030 if (strnlen(name
, IFNAMSIZ
) == IFNAMSIZ
)
1032 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
1036 if (*name
== '/' || *name
== ':' || isspace(*name
))
1042 EXPORT_SYMBOL(dev_valid_name
);
1045 * __dev_alloc_name - allocate a name for a device
1046 * @net: network namespace to allocate the device name in
1047 * @name: name format string
1048 * @buf: scratch buffer and result name string
1050 * Passed a format string - eg "lt%d" it will try and find a suitable
1051 * id. It scans list of devices to build up a free map, then chooses
1052 * the first empty slot. The caller must hold the dev_base or rtnl lock
1053 * while allocating the name and adding the device in order to avoid
1055 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1056 * Returns the number of the unit assigned or a negative errno code.
1059 static int __dev_alloc_name(struct net
*net
, const char *name
, char *buf
)
1063 const int max_netdevices
= 8*PAGE_SIZE
;
1064 unsigned long *inuse
;
1065 struct net_device
*d
;
1067 if (!dev_valid_name(name
))
1070 p
= strchr(name
, '%');
1073 * Verify the string as this thing may have come from
1074 * the user. There must be either one "%d" and no other "%"
1077 if (p
[1] != 'd' || strchr(p
+ 2, '%'))
1080 /* Use one page as a bit array of possible slots */
1081 inuse
= (unsigned long *) get_zeroed_page(GFP_ATOMIC
);
1085 for_each_netdev(net
, d
) {
1086 if (!sscanf(d
->name
, name
, &i
))
1088 if (i
< 0 || i
>= max_netdevices
)
1091 /* avoid cases where sscanf is not exact inverse of printf */
1092 snprintf(buf
, IFNAMSIZ
, name
, i
);
1093 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
1097 i
= find_first_zero_bit(inuse
, max_netdevices
);
1098 free_page((unsigned long) inuse
);
1101 snprintf(buf
, IFNAMSIZ
, name
, i
);
1102 if (!__dev_get_by_name(net
, buf
))
1105 /* It is possible to run out of possible slots
1106 * when the name is long and there isn't enough space left
1107 * for the digits, or if all bits are used.
1112 static int dev_alloc_name_ns(struct net
*net
,
1113 struct net_device
*dev
,
1120 ret
= __dev_alloc_name(net
, name
, buf
);
1122 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1127 * dev_alloc_name - allocate a name for a device
1129 * @name: name format string
1131 * Passed a format string - eg "lt%d" it will try and find a suitable
1132 * id. It scans list of devices to build up a free map, then chooses
1133 * the first empty slot. The caller must hold the dev_base or rtnl lock
1134 * while allocating the name and adding the device in order to avoid
1136 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1137 * Returns the number of the unit assigned or a negative errno code.
1140 int dev_alloc_name(struct net_device
*dev
, const char *name
)
1142 return dev_alloc_name_ns(dev_net(dev
), dev
, name
);
1144 EXPORT_SYMBOL(dev_alloc_name
);
1146 int dev_get_valid_name(struct net
*net
, struct net_device
*dev
,
1151 if (!dev_valid_name(name
))
1154 if (strchr(name
, '%'))
1155 return dev_alloc_name_ns(net
, dev
, name
);
1156 else if (__dev_get_by_name(net
, name
))
1158 else if (dev
->name
!= name
)
1159 strlcpy(dev
->name
, name
, IFNAMSIZ
);
1163 EXPORT_SYMBOL(dev_get_valid_name
);
1166 * dev_change_name - change name of a device
1168 * @newname: name (or format string) must be at least IFNAMSIZ
1170 * Change name of a device, can pass format strings "eth%d".
1173 int dev_change_name(struct net_device
*dev
, const char *newname
)
1175 unsigned char old_assign_type
;
1176 char oldname
[IFNAMSIZ
];
1182 BUG_ON(!dev_net(dev
));
1185 if (dev
->flags
& IFF_UP
)
1188 write_seqcount_begin(&devnet_rename_seq
);
1190 if (strncmp(newname
, dev
->name
, IFNAMSIZ
) == 0) {
1191 write_seqcount_end(&devnet_rename_seq
);
1195 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
1197 err
= dev_get_valid_name(net
, dev
, newname
);
1199 write_seqcount_end(&devnet_rename_seq
);
1203 if (oldname
[0] && !strchr(oldname
, '%'))
1204 netdev_info(dev
, "renamed from %s\n", oldname
);
1206 old_assign_type
= dev
->name_assign_type
;
1207 dev
->name_assign_type
= NET_NAME_RENAMED
;
1210 ret
= device_rename(&dev
->dev
, dev
->name
);
1212 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1213 dev
->name_assign_type
= old_assign_type
;
1214 write_seqcount_end(&devnet_rename_seq
);
1218 write_seqcount_end(&devnet_rename_seq
);
1220 netdev_adjacent_rename_links(dev
, oldname
);
1222 write_lock_bh(&dev_base_lock
);
1223 hlist_del_rcu(&dev
->name_hlist
);
1224 write_unlock_bh(&dev_base_lock
);
1228 write_lock_bh(&dev_base_lock
);
1229 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
1230 write_unlock_bh(&dev_base_lock
);
1232 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1233 ret
= notifier_to_errno(ret
);
1236 /* err >= 0 after dev_alloc_name() or stores the first errno */
1239 write_seqcount_begin(&devnet_rename_seq
);
1240 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1241 memcpy(oldname
, newname
, IFNAMSIZ
);
1242 dev
->name_assign_type
= old_assign_type
;
1243 old_assign_type
= NET_NAME_RENAMED
;
1246 pr_err("%s: name change rollback failed: %d\n",
1255 * dev_set_alias - change ifalias of a device
1257 * @alias: name up to IFALIASZ
1258 * @len: limit of bytes to copy from info
1260 * Set ifalias for a device,
1262 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1264 struct dev_ifalias
*new_alias
= NULL
;
1266 if (len
>= IFALIASZ
)
1270 new_alias
= kmalloc(sizeof(*new_alias
) + len
+ 1, GFP_KERNEL
);
1274 memcpy(new_alias
->ifalias
, alias
, len
);
1275 new_alias
->ifalias
[len
] = 0;
1278 mutex_lock(&ifalias_mutex
);
1279 rcu_swap_protected(dev
->ifalias
, new_alias
,
1280 mutex_is_locked(&ifalias_mutex
));
1281 mutex_unlock(&ifalias_mutex
);
1284 kfree_rcu(new_alias
, rcuhead
);
1288 EXPORT_SYMBOL(dev_set_alias
);
1291 * dev_get_alias - get ifalias of a device
1293 * @name: buffer to store name of ifalias
1294 * @len: size of buffer
1296 * get ifalias for a device. Caller must make sure dev cannot go
1297 * away, e.g. rcu read lock or own a reference count to device.
1299 int dev_get_alias(const struct net_device
*dev
, char *name
, size_t len
)
1301 const struct dev_ifalias
*alias
;
1305 alias
= rcu_dereference(dev
->ifalias
);
1307 ret
= snprintf(name
, len
, "%s", alias
->ifalias
);
1314 * netdev_features_change - device changes features
1315 * @dev: device to cause notification
1317 * Called to indicate a device has changed features.
1319 void netdev_features_change(struct net_device
*dev
)
1321 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1323 EXPORT_SYMBOL(netdev_features_change
);
1326 * netdev_state_change - device changes state
1327 * @dev: device to cause notification
1329 * Called to indicate a device has changed state. This function calls
1330 * the notifier chains for netdev_chain and sends a NEWLINK message
1331 * to the routing socket.
1333 void netdev_state_change(struct net_device
*dev
)
1335 if (dev
->flags
& IFF_UP
) {
1336 struct netdev_notifier_change_info change_info
= {
1340 call_netdevice_notifiers_info(NETDEV_CHANGE
,
1342 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0, GFP_KERNEL
);
1345 EXPORT_SYMBOL(netdev_state_change
);
1348 * netdev_notify_peers - notify network peers about existence of @dev
1349 * @dev: network device
1351 * Generate traffic such that interested network peers are aware of
1352 * @dev, such as by generating a gratuitous ARP. This may be used when
1353 * a device wants to inform the rest of the network about some sort of
1354 * reconfiguration such as a failover event or virtual machine
1357 void netdev_notify_peers(struct net_device
*dev
)
1360 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, dev
);
1361 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, dev
);
1364 EXPORT_SYMBOL(netdev_notify_peers
);
1366 static int __dev_open(struct net_device
*dev
)
1368 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1373 if (!netif_device_present(dev
))
1376 /* Block netpoll from trying to do any rx path servicing.
1377 * If we don't do this there is a chance ndo_poll_controller
1378 * or ndo_poll may be running while we open the device
1380 netpoll_poll_disable(dev
);
1382 ret
= call_netdevice_notifiers(NETDEV_PRE_UP
, dev
);
1383 ret
= notifier_to_errno(ret
);
1387 set_bit(__LINK_STATE_START
, &dev
->state
);
1389 if (ops
->ndo_validate_addr
)
1390 ret
= ops
->ndo_validate_addr(dev
);
1392 if (!ret
&& ops
->ndo_open
)
1393 ret
= ops
->ndo_open(dev
);
1395 netpoll_poll_enable(dev
);
1398 clear_bit(__LINK_STATE_START
, &dev
->state
);
1400 dev
->flags
|= IFF_UP
;
1401 dev_set_rx_mode(dev
);
1403 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
1410 * dev_open - prepare an interface for use.
1411 * @dev: device to open
1413 * Takes a device from down to up state. The device's private open
1414 * function is invoked and then the multicast lists are loaded. Finally
1415 * the device is moved into the up state and a %NETDEV_UP message is
1416 * sent to the netdev notifier chain.
1418 * Calling this function on an active interface is a nop. On a failure
1419 * a negative errno code is returned.
1421 int dev_open(struct net_device
*dev
)
1425 if (dev
->flags
& IFF_UP
)
1428 ret
= __dev_open(dev
);
1432 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1433 call_netdevice_notifiers(NETDEV_UP
, dev
);
1437 EXPORT_SYMBOL(dev_open
);
1439 static void __dev_close_many(struct list_head
*head
)
1441 struct net_device
*dev
;
1446 list_for_each_entry(dev
, head
, close_list
) {
1447 /* Temporarily disable netpoll until the interface is down */
1448 netpoll_poll_disable(dev
);
1450 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1452 clear_bit(__LINK_STATE_START
, &dev
->state
);
1454 /* Synchronize to scheduled poll. We cannot touch poll list, it
1455 * can be even on different cpu. So just clear netif_running().
1457 * dev->stop() will invoke napi_disable() on all of it's
1458 * napi_struct instances on this device.
1460 smp_mb__after_atomic(); /* Commit netif_running(). */
1463 dev_deactivate_many(head
);
1465 list_for_each_entry(dev
, head
, close_list
) {
1466 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1469 * Call the device specific close. This cannot fail.
1470 * Only if device is UP
1472 * We allow it to be called even after a DETACH hot-plug
1478 dev
->flags
&= ~IFF_UP
;
1479 netpoll_poll_enable(dev
);
1483 static void __dev_close(struct net_device
*dev
)
1487 list_add(&dev
->close_list
, &single
);
1488 __dev_close_many(&single
);
1492 void dev_close_many(struct list_head
*head
, bool unlink
)
1494 struct net_device
*dev
, *tmp
;
1496 /* Remove the devices that don't need to be closed */
1497 list_for_each_entry_safe(dev
, tmp
, head
, close_list
)
1498 if (!(dev
->flags
& IFF_UP
))
1499 list_del_init(&dev
->close_list
);
1501 __dev_close_many(head
);
1503 list_for_each_entry_safe(dev
, tmp
, head
, close_list
) {
1504 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1505 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1507 list_del_init(&dev
->close_list
);
1510 EXPORT_SYMBOL(dev_close_many
);
1513 * dev_close - shutdown an interface.
1514 * @dev: device to shutdown
1516 * This function moves an active device into down state. A
1517 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1518 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1521 void dev_close(struct net_device
*dev
)
1523 if (dev
->flags
& IFF_UP
) {
1526 list_add(&dev
->close_list
, &single
);
1527 dev_close_many(&single
, true);
1531 EXPORT_SYMBOL(dev_close
);
1535 * dev_disable_lro - disable Large Receive Offload on a device
1538 * Disable Large Receive Offload (LRO) on a net device. Must be
1539 * called under RTNL. This is needed if received packets may be
1540 * forwarded to another interface.
1542 void dev_disable_lro(struct net_device
*dev
)
1544 struct net_device
*lower_dev
;
1545 struct list_head
*iter
;
1547 dev
->wanted_features
&= ~NETIF_F_LRO
;
1548 netdev_update_features(dev
);
1550 if (unlikely(dev
->features
& NETIF_F_LRO
))
1551 netdev_WARN(dev
, "failed to disable LRO!\n");
1553 netdev_for_each_lower_dev(dev
, lower_dev
, iter
)
1554 dev_disable_lro(lower_dev
);
1556 EXPORT_SYMBOL(dev_disable_lro
);
1559 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1562 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1563 * called under RTNL. This is needed if Generic XDP is installed on
1566 static void dev_disable_gro_hw(struct net_device
*dev
)
1568 dev
->wanted_features
&= ~NETIF_F_GRO_HW
;
1569 netdev_update_features(dev
);
1571 if (unlikely(dev
->features
& NETIF_F_GRO_HW
))
1572 netdev_WARN(dev
, "failed to disable GRO_HW!\n");
1575 const char *netdev_cmd_to_name(enum netdev_cmd cmd
)
1578 case NETDEV_##val: \
1579 return "NETDEV_" __stringify(val);
1581 N(UP
) N(DOWN
) N(REBOOT
) N(CHANGE
) N(REGISTER
) N(UNREGISTER
)
1582 N(CHANGEMTU
) N(CHANGEADDR
) N(GOING_DOWN
) N(CHANGENAME
) N(FEAT_CHANGE
)
1583 N(BONDING_FAILOVER
) N(PRE_UP
) N(PRE_TYPE_CHANGE
) N(POST_TYPE_CHANGE
)
1584 N(POST_INIT
) N(RELEASE
) N(NOTIFY_PEERS
) N(JOIN
) N(CHANGEUPPER
)
1585 N(RESEND_IGMP
) N(PRECHANGEMTU
) N(CHANGEINFODATA
) N(BONDING_INFO
)
1586 N(PRECHANGEUPPER
) N(CHANGELOWERSTATE
) N(UDP_TUNNEL_PUSH_INFO
)
1587 N(UDP_TUNNEL_DROP_INFO
) N(CHANGE_TX_QUEUE_LEN
)
1588 N(CVLAN_FILTER_PUSH_INFO
) N(CVLAN_FILTER_DROP_INFO
)
1589 N(SVLAN_FILTER_PUSH_INFO
) N(SVLAN_FILTER_DROP_INFO
)
1592 return "UNKNOWN_NETDEV_EVENT";
1594 EXPORT_SYMBOL_GPL(netdev_cmd_to_name
);
1596 static int call_netdevice_notifier(struct notifier_block
*nb
, unsigned long val
,
1597 struct net_device
*dev
)
1599 struct netdev_notifier_info info
= {
1603 return nb
->notifier_call(nb
, val
, &info
);
1606 static int dev_boot_phase
= 1;
1609 * register_netdevice_notifier - register a network notifier block
1612 * Register a notifier to be called when network device events occur.
1613 * The notifier passed is linked into the kernel structures and must
1614 * not be reused until it has been unregistered. A negative errno code
1615 * is returned on a failure.
1617 * When registered all registration and up events are replayed
1618 * to the new notifier to allow device to have a race free
1619 * view of the network device list.
1622 int register_netdevice_notifier(struct notifier_block
*nb
)
1624 struct net_device
*dev
;
1625 struct net_device
*last
;
1629 /* Close race with setup_net() and cleanup_net() */
1630 down_write(&pernet_ops_rwsem
);
1632 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1638 for_each_netdev(net
, dev
) {
1639 err
= call_netdevice_notifier(nb
, NETDEV_REGISTER
, dev
);
1640 err
= notifier_to_errno(err
);
1644 if (!(dev
->flags
& IFF_UP
))
1647 call_netdevice_notifier(nb
, NETDEV_UP
, dev
);
1653 up_write(&pernet_ops_rwsem
);
1659 for_each_netdev(net
, dev
) {
1663 if (dev
->flags
& IFF_UP
) {
1664 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1666 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1668 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1673 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1676 EXPORT_SYMBOL(register_netdevice_notifier
);
1679 * unregister_netdevice_notifier - unregister a network notifier block
1682 * Unregister a notifier previously registered by
1683 * register_netdevice_notifier(). The notifier is unlinked into the
1684 * kernel structures and may then be reused. A negative errno code
1685 * is returned on a failure.
1687 * After unregistering unregister and down device events are synthesized
1688 * for all devices on the device list to the removed notifier to remove
1689 * the need for special case cleanup code.
1692 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1694 struct net_device
*dev
;
1698 /* Close race with setup_net() and cleanup_net() */
1699 down_write(&pernet_ops_rwsem
);
1701 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1706 for_each_netdev(net
, dev
) {
1707 if (dev
->flags
& IFF_UP
) {
1708 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1710 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1712 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1717 up_write(&pernet_ops_rwsem
);
1720 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1723 * call_netdevice_notifiers_info - call all network notifier blocks
1724 * @val: value passed unmodified to notifier function
1725 * @info: notifier information data
1727 * Call all network notifier blocks. Parameters and return value
1728 * are as for raw_notifier_call_chain().
1731 static int call_netdevice_notifiers_info(unsigned long val
,
1732 struct netdev_notifier_info
*info
)
1735 return raw_notifier_call_chain(&netdev_chain
, val
, info
);
1739 * call_netdevice_notifiers - call all network notifier blocks
1740 * @val: value passed unmodified to notifier function
1741 * @dev: net_device pointer passed unmodified to notifier function
1743 * Call all network notifier blocks. Parameters and return value
1744 * are as for raw_notifier_call_chain().
1747 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
1749 struct netdev_notifier_info info
= {
1753 return call_netdevice_notifiers_info(val
, &info
);
1755 EXPORT_SYMBOL(call_netdevice_notifiers
);
1757 #ifdef CONFIG_NET_INGRESS
1758 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key
);
1760 void net_inc_ingress_queue(void)
1762 static_branch_inc(&ingress_needed_key
);
1764 EXPORT_SYMBOL_GPL(net_inc_ingress_queue
);
1766 void net_dec_ingress_queue(void)
1768 static_branch_dec(&ingress_needed_key
);
1770 EXPORT_SYMBOL_GPL(net_dec_ingress_queue
);
1773 #ifdef CONFIG_NET_EGRESS
1774 static DEFINE_STATIC_KEY_FALSE(egress_needed_key
);
1776 void net_inc_egress_queue(void)
1778 static_branch_inc(&egress_needed_key
);
1780 EXPORT_SYMBOL_GPL(net_inc_egress_queue
);
1782 void net_dec_egress_queue(void)
1784 static_branch_dec(&egress_needed_key
);
1786 EXPORT_SYMBOL_GPL(net_dec_egress_queue
);
1789 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key
);
1790 #ifdef HAVE_JUMP_LABEL
1791 static atomic_t netstamp_needed_deferred
;
1792 static atomic_t netstamp_wanted
;
1793 static void netstamp_clear(struct work_struct
*work
)
1795 int deferred
= atomic_xchg(&netstamp_needed_deferred
, 0);
1798 wanted
= atomic_add_return(deferred
, &netstamp_wanted
);
1800 static_branch_enable(&netstamp_needed_key
);
1802 static_branch_disable(&netstamp_needed_key
);
1804 static DECLARE_WORK(netstamp_work
, netstamp_clear
);
1807 void net_enable_timestamp(void)
1809 #ifdef HAVE_JUMP_LABEL
1813 wanted
= atomic_read(&netstamp_wanted
);
1816 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
+ 1) == wanted
)
1819 atomic_inc(&netstamp_needed_deferred
);
1820 schedule_work(&netstamp_work
);
1822 static_branch_inc(&netstamp_needed_key
);
1825 EXPORT_SYMBOL(net_enable_timestamp
);
1827 void net_disable_timestamp(void)
1829 #ifdef HAVE_JUMP_LABEL
1833 wanted
= atomic_read(&netstamp_wanted
);
1836 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
- 1) == wanted
)
1839 atomic_dec(&netstamp_needed_deferred
);
1840 schedule_work(&netstamp_work
);
1842 static_branch_dec(&netstamp_needed_key
);
1845 EXPORT_SYMBOL(net_disable_timestamp
);
1847 static inline void net_timestamp_set(struct sk_buff
*skb
)
1850 if (static_branch_unlikely(&netstamp_needed_key
))
1851 __net_timestamp(skb
);
1854 #define net_timestamp_check(COND, SKB) \
1855 if (static_branch_unlikely(&netstamp_needed_key)) { \
1856 if ((COND) && !(SKB)->tstamp) \
1857 __net_timestamp(SKB); \
1860 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
1864 if (!(dev
->flags
& IFF_UP
))
1867 len
= dev
->mtu
+ dev
->hard_header_len
+ VLAN_HLEN
;
1868 if (skb
->len
<= len
)
1871 /* if TSO is enabled, we don't care about the length as the packet
1872 * could be forwarded without being segmented before
1874 if (skb_is_gso(skb
))
1879 EXPORT_SYMBOL_GPL(is_skb_forwardable
);
1881 int __dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1883 int ret
= ____dev_forward_skb(dev
, skb
);
1886 skb
->protocol
= eth_type_trans(skb
, dev
);
1887 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
1892 EXPORT_SYMBOL_GPL(__dev_forward_skb
);
1895 * dev_forward_skb - loopback an skb to another netif
1897 * @dev: destination network device
1898 * @skb: buffer to forward
1901 * NET_RX_SUCCESS (no congestion)
1902 * NET_RX_DROP (packet was dropped, but freed)
1904 * dev_forward_skb can be used for injecting an skb from the
1905 * start_xmit function of one device into the receive queue
1906 * of another device.
1908 * The receiving device may be in another namespace, so
1909 * we have to clear all information in the skb that could
1910 * impact namespace isolation.
1912 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1914 return __dev_forward_skb(dev
, skb
) ?: netif_rx_internal(skb
);
1916 EXPORT_SYMBOL_GPL(dev_forward_skb
);
1918 static inline int deliver_skb(struct sk_buff
*skb
,
1919 struct packet_type
*pt_prev
,
1920 struct net_device
*orig_dev
)
1922 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
1924 refcount_inc(&skb
->users
);
1925 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
1928 static inline void deliver_ptype_list_skb(struct sk_buff
*skb
,
1929 struct packet_type
**pt
,
1930 struct net_device
*orig_dev
,
1932 struct list_head
*ptype_list
)
1934 struct packet_type
*ptype
, *pt_prev
= *pt
;
1936 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1937 if (ptype
->type
!= type
)
1940 deliver_skb(skb
, pt_prev
, orig_dev
);
1946 static inline bool skb_loop_sk(struct packet_type
*ptype
, struct sk_buff
*skb
)
1948 if (!ptype
->af_packet_priv
|| !skb
->sk
)
1951 if (ptype
->id_match
)
1952 return ptype
->id_match(ptype
, skb
->sk
);
1953 else if ((struct sock
*)ptype
->af_packet_priv
== skb
->sk
)
1960 * Support routine. Sends outgoing frames to any network
1961 * taps currently in use.
1964 void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
1966 struct packet_type
*ptype
;
1967 struct sk_buff
*skb2
= NULL
;
1968 struct packet_type
*pt_prev
= NULL
;
1969 struct list_head
*ptype_list
= &ptype_all
;
1973 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1974 /* Never send packets back to the socket
1975 * they originated from - MvS (miquels@drinkel.ow.org)
1977 if (skb_loop_sk(ptype
, skb
))
1981 deliver_skb(skb2
, pt_prev
, skb
->dev
);
1986 /* need to clone skb, done only once */
1987 skb2
= skb_clone(skb
, GFP_ATOMIC
);
1991 net_timestamp_set(skb2
);
1993 /* skb->nh should be correctly
1994 * set by sender, so that the second statement is
1995 * just protection against buggy protocols.
1997 skb_reset_mac_header(skb2
);
1999 if (skb_network_header(skb2
) < skb2
->data
||
2000 skb_network_header(skb2
) > skb_tail_pointer(skb2
)) {
2001 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2002 ntohs(skb2
->protocol
),
2004 skb_reset_network_header(skb2
);
2007 skb2
->transport_header
= skb2
->network_header
;
2008 skb2
->pkt_type
= PACKET_OUTGOING
;
2012 if (ptype_list
== &ptype_all
) {
2013 ptype_list
= &dev
->ptype_all
;
2018 if (!skb_orphan_frags_rx(skb2
, GFP_ATOMIC
))
2019 pt_prev
->func(skb2
, skb
->dev
, pt_prev
, skb
->dev
);
2025 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit
);
2028 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2029 * @dev: Network device
2030 * @txq: number of queues available
2032 * If real_num_tx_queues is changed the tc mappings may no longer be
2033 * valid. To resolve this verify the tc mapping remains valid and if
2034 * not NULL the mapping. With no priorities mapping to this
2035 * offset/count pair it will no longer be used. In the worst case TC0
2036 * is invalid nothing can be done so disable priority mappings. If is
2037 * expected that drivers will fix this mapping if they can before
2038 * calling netif_set_real_num_tx_queues.
2040 static void netif_setup_tc(struct net_device
*dev
, unsigned int txq
)
2043 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2045 /* If TC0 is invalidated disable TC mapping */
2046 if (tc
->offset
+ tc
->count
> txq
) {
2047 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2052 /* Invalidated prio to tc mappings set to TC0 */
2053 for (i
= 1; i
< TC_BITMASK
+ 1; i
++) {
2054 int q
= netdev_get_prio_tc_map(dev
, i
);
2056 tc
= &dev
->tc_to_txq
[q
];
2057 if (tc
->offset
+ tc
->count
> txq
) {
2058 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2060 netdev_set_prio_tc_map(dev
, i
, 0);
2065 int netdev_txq_to_tc(struct net_device
*dev
, unsigned int txq
)
2068 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2071 for (i
= 0; i
< TC_MAX_QUEUE
; i
++, tc
++) {
2072 if ((txq
- tc
->offset
) < tc
->count
)
2081 EXPORT_SYMBOL(netdev_txq_to_tc
);
2084 static DEFINE_MUTEX(xps_map_mutex
);
2085 #define xmap_dereference(P) \
2086 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2088 static bool remove_xps_queue(struct xps_dev_maps
*dev_maps
,
2091 struct xps_map
*map
= NULL
;
2095 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2099 for (pos
= map
->len
; pos
--;) {
2100 if (map
->queues
[pos
] != index
)
2104 map
->queues
[pos
] = map
->queues
[--map
->len
];
2108 RCU_INIT_POINTER(dev_maps
->cpu_map
[tci
], NULL
);
2109 kfree_rcu(map
, rcu
);
2116 static bool remove_xps_queue_cpu(struct net_device
*dev
,
2117 struct xps_dev_maps
*dev_maps
,
2118 int cpu
, u16 offset
, u16 count
)
2120 int num_tc
= dev
->num_tc
? : 1;
2121 bool active
= false;
2124 for (tci
= cpu
* num_tc
; num_tc
--; tci
++) {
2127 for (i
= count
, j
= offset
; i
--; j
++) {
2128 if (!remove_xps_queue(dev_maps
, tci
, j
))
2138 static void netif_reset_xps_queues(struct net_device
*dev
, u16 offset
,
2141 struct xps_dev_maps
*dev_maps
;
2143 bool active
= false;
2145 mutex_lock(&xps_map_mutex
);
2146 dev_maps
= xmap_dereference(dev
->xps_maps
);
2151 for_each_possible_cpu(cpu
)
2152 active
|= remove_xps_queue_cpu(dev
, dev_maps
, cpu
,
2156 RCU_INIT_POINTER(dev
->xps_maps
, NULL
);
2157 kfree_rcu(dev_maps
, rcu
);
2160 for (i
= offset
+ (count
- 1); count
--; i
--)
2161 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, i
),
2165 mutex_unlock(&xps_map_mutex
);
2168 static void netif_reset_xps_queues_gt(struct net_device
*dev
, u16 index
)
2170 netif_reset_xps_queues(dev
, index
, dev
->num_tx_queues
- index
);
2173 static struct xps_map
*expand_xps_map(struct xps_map
*map
,
2176 struct xps_map
*new_map
;
2177 int alloc_len
= XPS_MIN_MAP_ALLOC
;
2180 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
2181 if (map
->queues
[pos
] != index
)
2186 /* Need to add queue to this CPU's existing map */
2188 if (pos
< map
->alloc_len
)
2191 alloc_len
= map
->alloc_len
* 2;
2194 /* Need to allocate new map to store queue on this CPU's map */
2195 new_map
= kzalloc_node(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
,
2200 for (i
= 0; i
< pos
; i
++)
2201 new_map
->queues
[i
] = map
->queues
[i
];
2202 new_map
->alloc_len
= alloc_len
;
2208 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
2211 struct xps_dev_maps
*dev_maps
, *new_dev_maps
= NULL
;
2212 int i
, cpu
, tci
, numa_node_id
= -2;
2213 int maps_sz
, num_tc
= 1, tc
= 0;
2214 struct xps_map
*map
, *new_map
;
2215 bool active
= false;
2218 num_tc
= dev
->num_tc
;
2219 tc
= netdev_txq_to_tc(dev
, index
);
2224 maps_sz
= XPS_DEV_MAPS_SIZE(num_tc
);
2225 if (maps_sz
< L1_CACHE_BYTES
)
2226 maps_sz
= L1_CACHE_BYTES
;
2228 mutex_lock(&xps_map_mutex
);
2230 dev_maps
= xmap_dereference(dev
->xps_maps
);
2232 /* allocate memory for queue storage */
2233 for_each_cpu_and(cpu
, cpu_online_mask
, mask
) {
2235 new_dev_maps
= kzalloc(maps_sz
, GFP_KERNEL
);
2236 if (!new_dev_maps
) {
2237 mutex_unlock(&xps_map_mutex
);
2241 tci
= cpu
* num_tc
+ tc
;
2242 map
= dev_maps
? xmap_dereference(dev_maps
->cpu_map
[tci
]) :
2245 map
= expand_xps_map(map
, cpu
, index
);
2249 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2253 goto out_no_new_maps
;
2255 for_each_possible_cpu(cpu
) {
2256 /* copy maps belonging to foreign traffic classes */
2257 for (i
= tc
, tci
= cpu
* num_tc
; dev_maps
&& i
--; tci
++) {
2258 /* fill in the new device map from the old device map */
2259 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2260 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2263 /* We need to explicitly update tci as prevous loop
2264 * could break out early if dev_maps is NULL.
2266 tci
= cpu
* num_tc
+ tc
;
2268 if (cpumask_test_cpu(cpu
, mask
) && cpu_online(cpu
)) {
2269 /* add queue to CPU maps */
2272 map
= xmap_dereference(new_dev_maps
->cpu_map
[tci
]);
2273 while ((pos
< map
->len
) && (map
->queues
[pos
] != index
))
2276 if (pos
== map
->len
)
2277 map
->queues
[map
->len
++] = index
;
2279 if (numa_node_id
== -2)
2280 numa_node_id
= cpu_to_node(cpu
);
2281 else if (numa_node_id
!= cpu_to_node(cpu
))
2284 } else if (dev_maps
) {
2285 /* fill in the new device map from the old device map */
2286 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2287 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2290 /* copy maps belonging to foreign traffic classes */
2291 for (i
= num_tc
- tc
, tci
++; dev_maps
&& --i
; tci
++) {
2292 /* fill in the new device map from the old device map */
2293 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2294 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2298 rcu_assign_pointer(dev
->xps_maps
, new_dev_maps
);
2300 /* Cleanup old maps */
2302 goto out_no_old_maps
;
2304 for_each_possible_cpu(cpu
) {
2305 for (i
= num_tc
, tci
= cpu
* num_tc
; i
--; tci
++) {
2306 new_map
= xmap_dereference(new_dev_maps
->cpu_map
[tci
]);
2307 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2308 if (map
&& map
!= new_map
)
2309 kfree_rcu(map
, rcu
);
2313 kfree_rcu(dev_maps
, rcu
);
2316 dev_maps
= new_dev_maps
;
2320 /* update Tx queue numa node */
2321 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, index
),
2322 (numa_node_id
>= 0) ? numa_node_id
:
2328 /* removes queue from unused CPUs */
2329 for_each_possible_cpu(cpu
) {
2330 for (i
= tc
, tci
= cpu
* num_tc
; i
--; tci
++)
2331 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2332 if (!cpumask_test_cpu(cpu
, mask
) || !cpu_online(cpu
))
2333 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2334 for (i
= num_tc
- tc
, tci
++; --i
; tci
++)
2335 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2338 /* free map if not active */
2340 RCU_INIT_POINTER(dev
->xps_maps
, NULL
);
2341 kfree_rcu(dev_maps
, rcu
);
2345 mutex_unlock(&xps_map_mutex
);
2349 /* remove any maps that we added */
2350 for_each_possible_cpu(cpu
) {
2351 for (i
= num_tc
, tci
= cpu
* num_tc
; i
--; tci
++) {
2352 new_map
= xmap_dereference(new_dev_maps
->cpu_map
[tci
]);
2354 xmap_dereference(dev_maps
->cpu_map
[tci
]) :
2356 if (new_map
&& new_map
!= map
)
2361 mutex_unlock(&xps_map_mutex
);
2363 kfree(new_dev_maps
);
2366 EXPORT_SYMBOL(netif_set_xps_queue
);
2369 void netdev_reset_tc(struct net_device
*dev
)
2372 netif_reset_xps_queues_gt(dev
, 0);
2375 memset(dev
->tc_to_txq
, 0, sizeof(dev
->tc_to_txq
));
2376 memset(dev
->prio_tc_map
, 0, sizeof(dev
->prio_tc_map
));
2378 EXPORT_SYMBOL(netdev_reset_tc
);
2380 int netdev_set_tc_queue(struct net_device
*dev
, u8 tc
, u16 count
, u16 offset
)
2382 if (tc
>= dev
->num_tc
)
2386 netif_reset_xps_queues(dev
, offset
, count
);
2388 dev
->tc_to_txq
[tc
].count
= count
;
2389 dev
->tc_to_txq
[tc
].offset
= offset
;
2392 EXPORT_SYMBOL(netdev_set_tc_queue
);
2394 int netdev_set_num_tc(struct net_device
*dev
, u8 num_tc
)
2396 if (num_tc
> TC_MAX_QUEUE
)
2400 netif_reset_xps_queues_gt(dev
, 0);
2402 dev
->num_tc
= num_tc
;
2405 EXPORT_SYMBOL(netdev_set_num_tc
);
2408 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2409 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2411 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
)
2416 disabling
= txq
< dev
->real_num_tx_queues
;
2418 if (txq
< 1 || txq
> dev
->num_tx_queues
)
2421 if (dev
->reg_state
== NETREG_REGISTERED
||
2422 dev
->reg_state
== NETREG_UNREGISTERING
) {
2425 rc
= netdev_queue_update_kobjects(dev
, dev
->real_num_tx_queues
,
2431 netif_setup_tc(dev
, txq
);
2433 dev
->real_num_tx_queues
= txq
;
2437 qdisc_reset_all_tx_gt(dev
, txq
);
2439 netif_reset_xps_queues_gt(dev
, txq
);
2443 dev
->real_num_tx_queues
= txq
;
2448 EXPORT_SYMBOL(netif_set_real_num_tx_queues
);
2452 * netif_set_real_num_rx_queues - set actual number of RX queues used
2453 * @dev: Network device
2454 * @rxq: Actual number of RX queues
2456 * This must be called either with the rtnl_lock held or before
2457 * registration of the net device. Returns 0 on success, or a
2458 * negative error code. If called before registration, it always
2461 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
)
2465 if (rxq
< 1 || rxq
> dev
->num_rx_queues
)
2468 if (dev
->reg_state
== NETREG_REGISTERED
) {
2471 rc
= net_rx_queue_update_kobjects(dev
, dev
->real_num_rx_queues
,
2477 dev
->real_num_rx_queues
= rxq
;
2480 EXPORT_SYMBOL(netif_set_real_num_rx_queues
);
2484 * netif_get_num_default_rss_queues - default number of RSS queues
2486 * This routine should set an upper limit on the number of RSS queues
2487 * used by default by multiqueue devices.
2489 int netif_get_num_default_rss_queues(void)
2491 return is_kdump_kernel() ?
2492 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES
, num_online_cpus());
2494 EXPORT_SYMBOL(netif_get_num_default_rss_queues
);
2496 static void __netif_reschedule(struct Qdisc
*q
)
2498 struct softnet_data
*sd
;
2499 unsigned long flags
;
2501 local_irq_save(flags
);
2502 sd
= this_cpu_ptr(&softnet_data
);
2503 q
->next_sched
= NULL
;
2504 *sd
->output_queue_tailp
= q
;
2505 sd
->output_queue_tailp
= &q
->next_sched
;
2506 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2507 local_irq_restore(flags
);
2510 void __netif_schedule(struct Qdisc
*q
)
2512 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
2513 __netif_reschedule(q
);
2515 EXPORT_SYMBOL(__netif_schedule
);
2517 struct dev_kfree_skb_cb
{
2518 enum skb_free_reason reason
;
2521 static struct dev_kfree_skb_cb
*get_kfree_skb_cb(const struct sk_buff
*skb
)
2523 return (struct dev_kfree_skb_cb
*)skb
->cb
;
2526 void netif_schedule_queue(struct netdev_queue
*txq
)
2529 if (!(txq
->state
& QUEUE_STATE_ANY_XOFF
)) {
2530 struct Qdisc
*q
= rcu_dereference(txq
->qdisc
);
2532 __netif_schedule(q
);
2536 EXPORT_SYMBOL(netif_schedule_queue
);
2538 void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
2540 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
)) {
2544 q
= rcu_dereference(dev_queue
->qdisc
);
2545 __netif_schedule(q
);
2549 EXPORT_SYMBOL(netif_tx_wake_queue
);
2551 void __dev_kfree_skb_irq(struct sk_buff
*skb
, enum skb_free_reason reason
)
2553 unsigned long flags
;
2558 if (likely(refcount_read(&skb
->users
) == 1)) {
2560 refcount_set(&skb
->users
, 0);
2561 } else if (likely(!refcount_dec_and_test(&skb
->users
))) {
2564 get_kfree_skb_cb(skb
)->reason
= reason
;
2565 local_irq_save(flags
);
2566 skb
->next
= __this_cpu_read(softnet_data
.completion_queue
);
2567 __this_cpu_write(softnet_data
.completion_queue
, skb
);
2568 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2569 local_irq_restore(flags
);
2571 EXPORT_SYMBOL(__dev_kfree_skb_irq
);
2573 void __dev_kfree_skb_any(struct sk_buff
*skb
, enum skb_free_reason reason
)
2575 if (in_irq() || irqs_disabled())
2576 __dev_kfree_skb_irq(skb
, reason
);
2580 EXPORT_SYMBOL(__dev_kfree_skb_any
);
2584 * netif_device_detach - mark device as removed
2585 * @dev: network device
2587 * Mark device as removed from system and therefore no longer available.
2589 void netif_device_detach(struct net_device
*dev
)
2591 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2592 netif_running(dev
)) {
2593 netif_tx_stop_all_queues(dev
);
2596 EXPORT_SYMBOL(netif_device_detach
);
2599 * netif_device_attach - mark device as attached
2600 * @dev: network device
2602 * Mark device as attached from system and restart if needed.
2604 void netif_device_attach(struct net_device
*dev
)
2606 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2607 netif_running(dev
)) {
2608 netif_tx_wake_all_queues(dev
);
2609 __netdev_watchdog_up(dev
);
2612 EXPORT_SYMBOL(netif_device_attach
);
2615 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2616 * to be used as a distribution range.
2618 static u16
skb_tx_hash(const struct net_device
*dev
, struct sk_buff
*skb
)
2622 u16 qcount
= dev
->real_num_tx_queues
;
2624 if (skb_rx_queue_recorded(skb
)) {
2625 hash
= skb_get_rx_queue(skb
);
2626 while (unlikely(hash
>= qcount
))
2632 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
2634 qoffset
= dev
->tc_to_txq
[tc
].offset
;
2635 qcount
= dev
->tc_to_txq
[tc
].count
;
2638 return (u16
) reciprocal_scale(skb_get_hash(skb
), qcount
) + qoffset
;
2641 static void skb_warn_bad_offload(const struct sk_buff
*skb
)
2643 static const netdev_features_t null_features
;
2644 struct net_device
*dev
= skb
->dev
;
2645 const char *name
= "";
2647 if (!net_ratelimit())
2651 if (dev
->dev
.parent
)
2652 name
= dev_driver_string(dev
->dev
.parent
);
2654 name
= netdev_name(dev
);
2656 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2657 "gso_type=%d ip_summed=%d\n",
2658 name
, dev
? &dev
->features
: &null_features
,
2659 skb
->sk
? &skb
->sk
->sk_route_caps
: &null_features
,
2660 skb
->len
, skb
->data_len
, skb_shinfo(skb
)->gso_size
,
2661 skb_shinfo(skb
)->gso_type
, skb
->ip_summed
);
2665 * Invalidate hardware checksum when packet is to be mangled, and
2666 * complete checksum manually on outgoing path.
2668 int skb_checksum_help(struct sk_buff
*skb
)
2671 int ret
= 0, offset
;
2673 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
2674 goto out_set_summed
;
2676 if (unlikely(skb_shinfo(skb
)->gso_size
)) {
2677 skb_warn_bad_offload(skb
);
2681 /* Before computing a checksum, we should make sure no frag could
2682 * be modified by an external entity : checksum could be wrong.
2684 if (skb_has_shared_frag(skb
)) {
2685 ret
= __skb_linearize(skb
);
2690 offset
= skb_checksum_start_offset(skb
);
2691 BUG_ON(offset
>= skb_headlen(skb
));
2692 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
2694 offset
+= skb
->csum_offset
;
2695 BUG_ON(offset
+ sizeof(__sum16
) > skb_headlen(skb
));
2697 if (skb_cloned(skb
) &&
2698 !skb_clone_writable(skb
, offset
+ sizeof(__sum16
))) {
2699 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2704 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
) ?: CSUM_MANGLED_0
;
2706 skb
->ip_summed
= CHECKSUM_NONE
;
2710 EXPORT_SYMBOL(skb_checksum_help
);
2712 int skb_crc32c_csum_help(struct sk_buff
*skb
)
2715 int ret
= 0, offset
, start
;
2717 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2720 if (unlikely(skb_is_gso(skb
)))
2723 /* Before computing a checksum, we should make sure no frag could
2724 * be modified by an external entity : checksum could be wrong.
2726 if (unlikely(skb_has_shared_frag(skb
))) {
2727 ret
= __skb_linearize(skb
);
2731 start
= skb_checksum_start_offset(skb
);
2732 offset
= start
+ offsetof(struct sctphdr
, checksum
);
2733 if (WARN_ON_ONCE(offset
>= skb_headlen(skb
))) {
2737 if (skb_cloned(skb
) &&
2738 !skb_clone_writable(skb
, offset
+ sizeof(__le32
))) {
2739 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2743 crc32c_csum
= cpu_to_le32(~__skb_checksum(skb
, start
,
2744 skb
->len
- start
, ~(__u32
)0,
2746 *(__le32
*)(skb
->data
+ offset
) = crc32c_csum
;
2747 skb
->ip_summed
= CHECKSUM_NONE
;
2748 skb
->csum_not_inet
= 0;
2753 __be16
skb_network_protocol(struct sk_buff
*skb
, int *depth
)
2755 __be16 type
= skb
->protocol
;
2757 /* Tunnel gso handlers can set protocol to ethernet. */
2758 if (type
== htons(ETH_P_TEB
)) {
2761 if (unlikely(!pskb_may_pull(skb
, sizeof(struct ethhdr
))))
2764 eth
= (struct ethhdr
*)skb
->data
;
2765 type
= eth
->h_proto
;
2768 return __vlan_get_protocol(skb
, type
, depth
);
2772 * skb_mac_gso_segment - mac layer segmentation handler.
2773 * @skb: buffer to segment
2774 * @features: features for the output path (see dev->features)
2776 struct sk_buff
*skb_mac_gso_segment(struct sk_buff
*skb
,
2777 netdev_features_t features
)
2779 struct sk_buff
*segs
= ERR_PTR(-EPROTONOSUPPORT
);
2780 struct packet_offload
*ptype
;
2781 int vlan_depth
= skb
->mac_len
;
2782 __be16 type
= skb_network_protocol(skb
, &vlan_depth
);
2784 if (unlikely(!type
))
2785 return ERR_PTR(-EINVAL
);
2787 __skb_pull(skb
, vlan_depth
);
2790 list_for_each_entry_rcu(ptype
, &offload_base
, list
) {
2791 if (ptype
->type
== type
&& ptype
->callbacks
.gso_segment
) {
2792 segs
= ptype
->callbacks
.gso_segment(skb
, features
);
2798 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
2802 EXPORT_SYMBOL(skb_mac_gso_segment
);
2805 /* openvswitch calls this on rx path, so we need a different check.
2807 static inline bool skb_needs_check(struct sk_buff
*skb
, bool tx_path
)
2810 return skb
->ip_summed
!= CHECKSUM_PARTIAL
&&
2811 skb
->ip_summed
!= CHECKSUM_UNNECESSARY
;
2813 return skb
->ip_summed
== CHECKSUM_NONE
;
2817 * __skb_gso_segment - Perform segmentation on skb.
2818 * @skb: buffer to segment
2819 * @features: features for the output path (see dev->features)
2820 * @tx_path: whether it is called in TX path
2822 * This function segments the given skb and returns a list of segments.
2824 * It may return NULL if the skb requires no segmentation. This is
2825 * only possible when GSO is used for verifying header integrity.
2827 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
2829 struct sk_buff
*__skb_gso_segment(struct sk_buff
*skb
,
2830 netdev_features_t features
, bool tx_path
)
2832 struct sk_buff
*segs
;
2834 if (unlikely(skb_needs_check(skb
, tx_path
))) {
2837 /* We're going to init ->check field in TCP or UDP header */
2838 err
= skb_cow_head(skb
, 0);
2840 return ERR_PTR(err
);
2843 /* Only report GSO partial support if it will enable us to
2844 * support segmentation on this frame without needing additional
2847 if (features
& NETIF_F_GSO_PARTIAL
) {
2848 netdev_features_t partial_features
= NETIF_F_GSO_ROBUST
;
2849 struct net_device
*dev
= skb
->dev
;
2851 partial_features
|= dev
->features
& dev
->gso_partial_features
;
2852 if (!skb_gso_ok(skb
, features
| partial_features
))
2853 features
&= ~NETIF_F_GSO_PARTIAL
;
2856 BUILD_BUG_ON(SKB_SGO_CB_OFFSET
+
2857 sizeof(*SKB_GSO_CB(skb
)) > sizeof(skb
->cb
));
2859 SKB_GSO_CB(skb
)->mac_offset
= skb_headroom(skb
);
2860 SKB_GSO_CB(skb
)->encap_level
= 0;
2862 skb_reset_mac_header(skb
);
2863 skb_reset_mac_len(skb
);
2865 segs
= skb_mac_gso_segment(skb
, features
);
2867 if (unlikely(skb_needs_check(skb
, tx_path
) && !IS_ERR(segs
)))
2868 skb_warn_bad_offload(skb
);
2872 EXPORT_SYMBOL(__skb_gso_segment
);
2874 /* Take action when hardware reception checksum errors are detected. */
2876 void netdev_rx_csum_fault(struct net_device
*dev
)
2878 if (net_ratelimit()) {
2879 pr_err("%s: hw csum failure\n", dev
? dev
->name
: "<unknown>");
2883 EXPORT_SYMBOL(netdev_rx_csum_fault
);
2886 /* XXX: check that highmem exists at all on the given machine. */
2887 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
2889 #ifdef CONFIG_HIGHMEM
2892 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
2893 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2894 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2896 if (PageHighMem(skb_frag_page(frag
)))
2904 /* If MPLS offload request, verify we are testing hardware MPLS features
2905 * instead of standard features for the netdev.
2907 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2908 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
2909 netdev_features_t features
,
2912 if (eth_p_mpls(type
))
2913 features
&= skb
->dev
->mpls_features
;
2918 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
2919 netdev_features_t features
,
2926 static netdev_features_t
harmonize_features(struct sk_buff
*skb
,
2927 netdev_features_t features
)
2932 type
= skb_network_protocol(skb
, &tmp
);
2933 features
= net_mpls_features(skb
, features
, type
);
2935 if (skb
->ip_summed
!= CHECKSUM_NONE
&&
2936 !can_checksum_protocol(features
, type
)) {
2937 features
&= ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
2939 if (illegal_highdma(skb
->dev
, skb
))
2940 features
&= ~NETIF_F_SG
;
2945 netdev_features_t
passthru_features_check(struct sk_buff
*skb
,
2946 struct net_device
*dev
,
2947 netdev_features_t features
)
2951 EXPORT_SYMBOL(passthru_features_check
);
2953 static netdev_features_t
dflt_features_check(struct sk_buff
*skb
,
2954 struct net_device
*dev
,
2955 netdev_features_t features
)
2957 return vlan_features_check(skb
, features
);
2960 static netdev_features_t
gso_features_check(const struct sk_buff
*skb
,
2961 struct net_device
*dev
,
2962 netdev_features_t features
)
2964 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
;
2966 if (gso_segs
> dev
->gso_max_segs
)
2967 return features
& ~NETIF_F_GSO_MASK
;
2969 /* Support for GSO partial features requires software
2970 * intervention before we can actually process the packets
2971 * so we need to strip support for any partial features now
2972 * and we can pull them back in after we have partially
2973 * segmented the frame.
2975 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
))
2976 features
&= ~dev
->gso_partial_features
;
2978 /* Make sure to clear the IPv4 ID mangling feature if the
2979 * IPv4 header has the potential to be fragmented.
2981 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
2982 struct iphdr
*iph
= skb
->encapsulation
?
2983 inner_ip_hdr(skb
) : ip_hdr(skb
);
2985 if (!(iph
->frag_off
& htons(IP_DF
)))
2986 features
&= ~NETIF_F_TSO_MANGLEID
;
2992 netdev_features_t
netif_skb_features(struct sk_buff
*skb
)
2994 struct net_device
*dev
= skb
->dev
;
2995 netdev_features_t features
= dev
->features
;
2997 if (skb_is_gso(skb
))
2998 features
= gso_features_check(skb
, dev
, features
);
3000 /* If encapsulation offload request, verify we are testing
3001 * hardware encapsulation features instead of standard
3002 * features for the netdev
3004 if (skb
->encapsulation
)
3005 features
&= dev
->hw_enc_features
;
3007 if (skb_vlan_tagged(skb
))
3008 features
= netdev_intersect_features(features
,
3009 dev
->vlan_features
|
3010 NETIF_F_HW_VLAN_CTAG_TX
|
3011 NETIF_F_HW_VLAN_STAG_TX
);
3013 if (dev
->netdev_ops
->ndo_features_check
)
3014 features
&= dev
->netdev_ops
->ndo_features_check(skb
, dev
,
3017 features
&= dflt_features_check(skb
, dev
, features
);
3019 return harmonize_features(skb
, features
);
3021 EXPORT_SYMBOL(netif_skb_features
);
3023 static int xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
3024 struct netdev_queue
*txq
, bool more
)
3029 if (!list_empty(&ptype_all
) || !list_empty(&dev
->ptype_all
))
3030 dev_queue_xmit_nit(skb
, dev
);
3033 trace_net_dev_start_xmit(skb
, dev
);
3034 rc
= netdev_start_xmit(skb
, dev
, txq
, more
);
3035 trace_net_dev_xmit(skb
, rc
, dev
, len
);
3040 struct sk_buff
*dev_hard_start_xmit(struct sk_buff
*first
, struct net_device
*dev
,
3041 struct netdev_queue
*txq
, int *ret
)
3043 struct sk_buff
*skb
= first
;
3044 int rc
= NETDEV_TX_OK
;
3047 struct sk_buff
*next
= skb
->next
;
3050 rc
= xmit_one(skb
, dev
, txq
, next
!= NULL
);
3051 if (unlikely(!dev_xmit_complete(rc
))) {
3057 if (netif_xmit_stopped(txq
) && skb
) {
3058 rc
= NETDEV_TX_BUSY
;
3068 static struct sk_buff
*validate_xmit_vlan(struct sk_buff
*skb
,
3069 netdev_features_t features
)
3071 if (skb_vlan_tag_present(skb
) &&
3072 !vlan_hw_offload_capable(features
, skb
->vlan_proto
))
3073 skb
= __vlan_hwaccel_push_inside(skb
);
3077 int skb_csum_hwoffload_help(struct sk_buff
*skb
,
3078 const netdev_features_t features
)
3080 if (unlikely(skb
->csum_not_inet
))
3081 return !!(features
& NETIF_F_SCTP_CRC
) ? 0 :
3082 skb_crc32c_csum_help(skb
);
3084 return !!(features
& NETIF_F_CSUM_MASK
) ? 0 : skb_checksum_help(skb
);
3086 EXPORT_SYMBOL(skb_csum_hwoffload_help
);
3088 static struct sk_buff
*validate_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3090 netdev_features_t features
;
3092 features
= netif_skb_features(skb
);
3093 skb
= validate_xmit_vlan(skb
, features
);
3097 skb
= sk_validate_xmit_skb(skb
, dev
);
3101 if (netif_needs_gso(skb
, features
)) {
3102 struct sk_buff
*segs
;
3104 segs
= skb_gso_segment(skb
, features
);
3112 if (skb_needs_linearize(skb
, features
) &&
3113 __skb_linearize(skb
))
3116 /* If packet is not checksummed and device does not
3117 * support checksumming for this protocol, complete
3118 * checksumming here.
3120 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3121 if (skb
->encapsulation
)
3122 skb_set_inner_transport_header(skb
,
3123 skb_checksum_start_offset(skb
));
3125 skb_set_transport_header(skb
,
3126 skb_checksum_start_offset(skb
));
3127 if (skb_csum_hwoffload_help(skb
, features
))
3132 skb
= validate_xmit_xfrm(skb
, features
, again
);
3139 atomic_long_inc(&dev
->tx_dropped
);
3143 struct sk_buff
*validate_xmit_skb_list(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3145 struct sk_buff
*next
, *head
= NULL
, *tail
;
3147 for (; skb
!= NULL
; skb
= next
) {
3151 /* in case skb wont be segmented, point to itself */
3154 skb
= validate_xmit_skb(skb
, dev
, again
);
3162 /* If skb was segmented, skb->prev points to
3163 * the last segment. If not, it still contains skb.
3169 EXPORT_SYMBOL_GPL(validate_xmit_skb_list
);
3171 static void qdisc_pkt_len_init(struct sk_buff
*skb
)
3173 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
3175 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
3177 /* To get more precise estimation of bytes sent on wire,
3178 * we add to pkt_len the headers size of all segments
3180 if (shinfo
->gso_size
) {
3181 unsigned int hdr_len
;
3182 u16 gso_segs
= shinfo
->gso_segs
;
3184 /* mac layer + network layer */
3185 hdr_len
= skb_transport_header(skb
) - skb_mac_header(skb
);
3187 /* + transport layer */
3188 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))) {
3189 const struct tcphdr
*th
;
3190 struct tcphdr _tcphdr
;
3192 th
= skb_header_pointer(skb
, skb_transport_offset(skb
),
3193 sizeof(_tcphdr
), &_tcphdr
);
3195 hdr_len
+= __tcp_hdrlen(th
);
3197 struct udphdr _udphdr
;
3199 if (skb_header_pointer(skb
, skb_transport_offset(skb
),
3200 sizeof(_udphdr
), &_udphdr
))
3201 hdr_len
+= sizeof(struct udphdr
);
3204 if (shinfo
->gso_type
& SKB_GSO_DODGY
)
3205 gso_segs
= DIV_ROUND_UP(skb
->len
- hdr_len
,
3208 qdisc_skb_cb(skb
)->pkt_len
+= (gso_segs
- 1) * hdr_len
;
3212 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
3213 struct net_device
*dev
,
3214 struct netdev_queue
*txq
)
3216 spinlock_t
*root_lock
= qdisc_lock(q
);
3217 struct sk_buff
*to_free
= NULL
;
3221 qdisc_calculate_pkt_len(skb
, q
);
3223 if (q
->flags
& TCQ_F_NOLOCK
) {
3224 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
3225 __qdisc_drop(skb
, &to_free
);
3228 rc
= q
->enqueue(skb
, q
, &to_free
) & NET_XMIT_MASK
;
3232 if (unlikely(to_free
))
3233 kfree_skb_list(to_free
);
3238 * Heuristic to force contended enqueues to serialize on a
3239 * separate lock before trying to get qdisc main lock.
3240 * This permits qdisc->running owner to get the lock more
3241 * often and dequeue packets faster.
3243 contended
= qdisc_is_running(q
);
3244 if (unlikely(contended
))
3245 spin_lock(&q
->busylock
);
3247 spin_lock(root_lock
);
3248 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
3249 __qdisc_drop(skb
, &to_free
);
3251 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
3252 qdisc_run_begin(q
)) {
3254 * This is a work-conserving queue; there are no old skbs
3255 * waiting to be sent out; and the qdisc is not running -
3256 * xmit the skb directly.
3259 qdisc_bstats_update(q
, skb
);
3261 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, true)) {
3262 if (unlikely(contended
)) {
3263 spin_unlock(&q
->busylock
);
3270 rc
= NET_XMIT_SUCCESS
;
3272 rc
= q
->enqueue(skb
, q
, &to_free
) & NET_XMIT_MASK
;
3273 if (qdisc_run_begin(q
)) {
3274 if (unlikely(contended
)) {
3275 spin_unlock(&q
->busylock
);
3282 spin_unlock(root_lock
);
3283 if (unlikely(to_free
))
3284 kfree_skb_list(to_free
);
3285 if (unlikely(contended
))
3286 spin_unlock(&q
->busylock
);
3290 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3291 static void skb_update_prio(struct sk_buff
*skb
)
3293 const struct netprio_map
*map
;
3294 const struct sock
*sk
;
3295 unsigned int prioidx
;
3299 map
= rcu_dereference_bh(skb
->dev
->priomap
);
3302 sk
= skb_to_full_sk(skb
);
3306 prioidx
= sock_cgroup_prioidx(&sk
->sk_cgrp_data
);
3308 if (prioidx
< map
->priomap_len
)
3309 skb
->priority
= map
->priomap
[prioidx
];
3312 #define skb_update_prio(skb)
3315 DEFINE_PER_CPU(int, xmit_recursion
);
3316 EXPORT_SYMBOL(xmit_recursion
);
3319 * dev_loopback_xmit - loop back @skb
3320 * @net: network namespace this loopback is happening in
3321 * @sk: sk needed to be a netfilter okfn
3322 * @skb: buffer to transmit
3324 int dev_loopback_xmit(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
3326 skb_reset_mac_header(skb
);
3327 __skb_pull(skb
, skb_network_offset(skb
));
3328 skb
->pkt_type
= PACKET_LOOPBACK
;
3329 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3330 WARN_ON(!skb_dst(skb
));
3335 EXPORT_SYMBOL(dev_loopback_xmit
);
3337 #ifdef CONFIG_NET_EGRESS
3338 static struct sk_buff
*
3339 sch_handle_egress(struct sk_buff
*skb
, int *ret
, struct net_device
*dev
)
3341 struct mini_Qdisc
*miniq
= rcu_dereference_bh(dev
->miniq_egress
);
3342 struct tcf_result cl_res
;
3347 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3348 mini_qdisc_bstats_cpu_update(miniq
, skb
);
3350 switch (tcf_classify(skb
, miniq
->filter_list
, &cl_res
, false)) {
3352 case TC_ACT_RECLASSIFY
:
3353 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
3356 mini_qdisc_qstats_cpu_drop(miniq
);
3357 *ret
= NET_XMIT_DROP
;
3363 *ret
= NET_XMIT_SUCCESS
;
3366 case TC_ACT_REDIRECT
:
3367 /* No need to push/pop skb's mac_header here on egress! */
3368 skb_do_redirect(skb
);
3369 *ret
= NET_XMIT_SUCCESS
;
3377 #endif /* CONFIG_NET_EGRESS */
3379 static inline int get_xps_queue(struct net_device
*dev
, struct sk_buff
*skb
)
3382 struct xps_dev_maps
*dev_maps
;
3383 struct xps_map
*map
;
3384 int queue_index
= -1;
3387 dev_maps
= rcu_dereference(dev
->xps_maps
);
3389 unsigned int tci
= skb
->sender_cpu
- 1;
3393 tci
+= netdev_get_prio_tc_map(dev
, skb
->priority
);
3396 map
= rcu_dereference(dev_maps
->cpu_map
[tci
]);
3399 queue_index
= map
->queues
[0];
3401 queue_index
= map
->queues
[reciprocal_scale(skb_get_hash(skb
),
3403 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
3415 static u16
__netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
)
3417 struct sock
*sk
= skb
->sk
;
3418 int queue_index
= sk_tx_queue_get(sk
);
3420 if (queue_index
< 0 || skb
->ooo_okay
||
3421 queue_index
>= dev
->real_num_tx_queues
) {
3422 int new_index
= get_xps_queue(dev
, skb
);
3425 new_index
= skb_tx_hash(dev
, skb
);
3427 if (queue_index
!= new_index
&& sk
&&
3429 rcu_access_pointer(sk
->sk_dst_cache
))
3430 sk_tx_queue_set(sk
, new_index
);
3432 queue_index
= new_index
;
3438 struct netdev_queue
*netdev_pick_tx(struct net_device
*dev
,
3439 struct sk_buff
*skb
,
3442 int queue_index
= 0;
3445 u32 sender_cpu
= skb
->sender_cpu
- 1;
3447 if (sender_cpu
>= (u32
)NR_CPUS
)
3448 skb
->sender_cpu
= raw_smp_processor_id() + 1;
3451 if (dev
->real_num_tx_queues
!= 1) {
3452 const struct net_device_ops
*ops
= dev
->netdev_ops
;
3454 if (ops
->ndo_select_queue
)
3455 queue_index
= ops
->ndo_select_queue(dev
, skb
, accel_priv
,
3458 queue_index
= __netdev_pick_tx(dev
, skb
);
3460 queue_index
= netdev_cap_txqueue(dev
, queue_index
);
3463 skb_set_queue_mapping(skb
, queue_index
);
3464 return netdev_get_tx_queue(dev
, queue_index
);
3468 * __dev_queue_xmit - transmit a buffer
3469 * @skb: buffer to transmit
3470 * @accel_priv: private data used for L2 forwarding offload
3472 * Queue a buffer for transmission to a network device. The caller must
3473 * have set the device and priority and built the buffer before calling
3474 * this function. The function can be called from an interrupt.
3476 * A negative errno code is returned on a failure. A success does not
3477 * guarantee the frame will be transmitted as it may be dropped due
3478 * to congestion or traffic shaping.
3480 * -----------------------------------------------------------------------------------
3481 * I notice this method can also return errors from the queue disciplines,
3482 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3485 * Regardless of the return value, the skb is consumed, so it is currently
3486 * difficult to retry a send to this method. (You can bump the ref count
3487 * before sending to hold a reference for retry if you are careful.)
3489 * When calling this method, interrupts MUST be enabled. This is because
3490 * the BH enable code must have IRQs enabled so that it will not deadlock.
3493 static int __dev_queue_xmit(struct sk_buff
*skb
, void *accel_priv
)
3495 struct net_device
*dev
= skb
->dev
;
3496 struct netdev_queue
*txq
;
3501 skb_reset_mac_header(skb
);
3503 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_SCHED_TSTAMP
))
3504 __skb_tstamp_tx(skb
, NULL
, skb
->sk
, SCM_TSTAMP_SCHED
);
3506 /* Disable soft irqs for various locks below. Also
3507 * stops preemption for RCU.
3511 skb_update_prio(skb
);
3513 qdisc_pkt_len_init(skb
);
3514 #ifdef CONFIG_NET_CLS_ACT
3515 skb
->tc_at_ingress
= 0;
3516 # ifdef CONFIG_NET_EGRESS
3517 if (static_branch_unlikely(&egress_needed_key
)) {
3518 skb
= sch_handle_egress(skb
, &rc
, dev
);
3524 /* If device/qdisc don't need skb->dst, release it right now while
3525 * its hot in this cpu cache.
3527 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
3532 txq
= netdev_pick_tx(dev
, skb
, accel_priv
);
3533 q
= rcu_dereference_bh(txq
->qdisc
);
3535 trace_net_dev_queue(skb
);
3537 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
3541 /* The device has no queue. Common case for software devices:
3542 * loopback, all the sorts of tunnels...
3544 * Really, it is unlikely that netif_tx_lock protection is necessary
3545 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3547 * However, it is possible, that they rely on protection
3550 * Check this and shot the lock. It is not prone from deadlocks.
3551 *Either shot noqueue qdisc, it is even simpler 8)
3553 if (dev
->flags
& IFF_UP
) {
3554 int cpu
= smp_processor_id(); /* ok because BHs are off */
3556 if (txq
->xmit_lock_owner
!= cpu
) {
3557 if (unlikely(__this_cpu_read(xmit_recursion
) >
3558 XMIT_RECURSION_LIMIT
))
3559 goto recursion_alert
;
3561 skb
= validate_xmit_skb(skb
, dev
, &again
);
3565 HARD_TX_LOCK(dev
, txq
, cpu
);
3567 if (!netif_xmit_stopped(txq
)) {
3568 __this_cpu_inc(xmit_recursion
);
3569 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &rc
);
3570 __this_cpu_dec(xmit_recursion
);
3571 if (dev_xmit_complete(rc
)) {
3572 HARD_TX_UNLOCK(dev
, txq
);
3576 HARD_TX_UNLOCK(dev
, txq
);
3577 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3580 /* Recursion is detected! It is possible,
3584 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3590 rcu_read_unlock_bh();
3592 atomic_long_inc(&dev
->tx_dropped
);
3593 kfree_skb_list(skb
);
3596 rcu_read_unlock_bh();
3600 int dev_queue_xmit(struct sk_buff
*skb
)
3602 return __dev_queue_xmit(skb
, NULL
);
3604 EXPORT_SYMBOL(dev_queue_xmit
);
3606 int dev_queue_xmit_accel(struct sk_buff
*skb
, void *accel_priv
)
3608 return __dev_queue_xmit(skb
, accel_priv
);
3610 EXPORT_SYMBOL(dev_queue_xmit_accel
);
3612 int dev_direct_xmit(struct sk_buff
*skb
, u16 queue_id
)
3614 struct net_device
*dev
= skb
->dev
;
3615 struct sk_buff
*orig_skb
= skb
;
3616 struct netdev_queue
*txq
;
3617 int ret
= NETDEV_TX_BUSY
;
3620 if (unlikely(!netif_running(dev
) ||
3621 !netif_carrier_ok(dev
)))
3624 skb
= validate_xmit_skb_list(skb
, dev
, &again
);
3625 if (skb
!= orig_skb
)
3628 skb_set_queue_mapping(skb
, queue_id
);
3629 txq
= skb_get_tx_queue(dev
, skb
);
3633 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
3634 if (!netif_xmit_frozen_or_drv_stopped(txq
))
3635 ret
= netdev_start_xmit(skb
, dev
, txq
, false);
3636 HARD_TX_UNLOCK(dev
, txq
);
3640 if (!dev_xmit_complete(ret
))
3645 atomic_long_inc(&dev
->tx_dropped
);
3646 kfree_skb_list(skb
);
3647 return NET_XMIT_DROP
;
3649 EXPORT_SYMBOL(dev_direct_xmit
);
3651 /*************************************************************************
3653 *************************************************************************/
3655 int netdev_max_backlog __read_mostly
= 1000;
3656 EXPORT_SYMBOL(netdev_max_backlog
);
3658 int netdev_tstamp_prequeue __read_mostly
= 1;
3659 int netdev_budget __read_mostly
= 300;
3660 unsigned int __read_mostly netdev_budget_usecs
= 2000;
3661 int weight_p __read_mostly
= 64; /* old backlog weight */
3662 int dev_weight_rx_bias __read_mostly
= 1; /* bias for backlog weight */
3663 int dev_weight_tx_bias __read_mostly
= 1; /* bias for output_queue quota */
3664 int dev_rx_weight __read_mostly
= 64;
3665 int dev_tx_weight __read_mostly
= 64;
3667 /* Called with irq disabled */
3668 static inline void ____napi_schedule(struct softnet_data
*sd
,
3669 struct napi_struct
*napi
)
3671 list_add_tail(&napi
->poll_list
, &sd
->poll_list
);
3672 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3677 /* One global table that all flow-based protocols share. */
3678 struct rps_sock_flow_table __rcu
*rps_sock_flow_table __read_mostly
;
3679 EXPORT_SYMBOL(rps_sock_flow_table
);
3680 u32 rps_cpu_mask __read_mostly
;
3681 EXPORT_SYMBOL(rps_cpu_mask
);
3683 struct static_key rps_needed __read_mostly
;
3684 EXPORT_SYMBOL(rps_needed
);
3685 struct static_key rfs_needed __read_mostly
;
3686 EXPORT_SYMBOL(rfs_needed
);
3688 static struct rps_dev_flow
*
3689 set_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3690 struct rps_dev_flow
*rflow
, u16 next_cpu
)
3692 if (next_cpu
< nr_cpu_ids
) {
3693 #ifdef CONFIG_RFS_ACCEL
3694 struct netdev_rx_queue
*rxqueue
;
3695 struct rps_dev_flow_table
*flow_table
;
3696 struct rps_dev_flow
*old_rflow
;
3701 /* Should we steer this flow to a different hardware queue? */
3702 if (!skb_rx_queue_recorded(skb
) || !dev
->rx_cpu_rmap
||
3703 !(dev
->features
& NETIF_F_NTUPLE
))
3705 rxq_index
= cpu_rmap_lookup_index(dev
->rx_cpu_rmap
, next_cpu
);
3706 if (rxq_index
== skb_get_rx_queue(skb
))
3709 rxqueue
= dev
->_rx
+ rxq_index
;
3710 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3713 flow_id
= skb_get_hash(skb
) & flow_table
->mask
;
3714 rc
= dev
->netdev_ops
->ndo_rx_flow_steer(dev
, skb
,
3715 rxq_index
, flow_id
);
3719 rflow
= &flow_table
->flows
[flow_id
];
3721 if (old_rflow
->filter
== rflow
->filter
)
3722 old_rflow
->filter
= RPS_NO_FILTER
;
3726 per_cpu(softnet_data
, next_cpu
).input_queue_head
;
3729 rflow
->cpu
= next_cpu
;
3734 * get_rps_cpu is called from netif_receive_skb and returns the target
3735 * CPU from the RPS map of the receiving queue for a given skb.
3736 * rcu_read_lock must be held on entry.
3738 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3739 struct rps_dev_flow
**rflowp
)
3741 const struct rps_sock_flow_table
*sock_flow_table
;
3742 struct netdev_rx_queue
*rxqueue
= dev
->_rx
;
3743 struct rps_dev_flow_table
*flow_table
;
3744 struct rps_map
*map
;
3749 if (skb_rx_queue_recorded(skb
)) {
3750 u16 index
= skb_get_rx_queue(skb
);
3752 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
3753 WARN_ONCE(dev
->real_num_rx_queues
> 1,
3754 "%s received packet on queue %u, but number "
3755 "of RX queues is %u\n",
3756 dev
->name
, index
, dev
->real_num_rx_queues
);
3762 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3764 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3765 map
= rcu_dereference(rxqueue
->rps_map
);
3766 if (!flow_table
&& !map
)
3769 skb_reset_network_header(skb
);
3770 hash
= skb_get_hash(skb
);
3774 sock_flow_table
= rcu_dereference(rps_sock_flow_table
);
3775 if (flow_table
&& sock_flow_table
) {
3776 struct rps_dev_flow
*rflow
;
3780 /* First check into global flow table if there is a match */
3781 ident
= sock_flow_table
->ents
[hash
& sock_flow_table
->mask
];
3782 if ((ident
^ hash
) & ~rps_cpu_mask
)
3785 next_cpu
= ident
& rps_cpu_mask
;
3787 /* OK, now we know there is a match,
3788 * we can look at the local (per receive queue) flow table
3790 rflow
= &flow_table
->flows
[hash
& flow_table
->mask
];
3794 * If the desired CPU (where last recvmsg was done) is
3795 * different from current CPU (one in the rx-queue flow
3796 * table entry), switch if one of the following holds:
3797 * - Current CPU is unset (>= nr_cpu_ids).
3798 * - Current CPU is offline.
3799 * - The current CPU's queue tail has advanced beyond the
3800 * last packet that was enqueued using this table entry.
3801 * This guarantees that all previous packets for the flow
3802 * have been dequeued, thus preserving in order delivery.
3804 if (unlikely(tcpu
!= next_cpu
) &&
3805 (tcpu
>= nr_cpu_ids
|| !cpu_online(tcpu
) ||
3806 ((int)(per_cpu(softnet_data
, tcpu
).input_queue_head
-
3807 rflow
->last_qtail
)) >= 0)) {
3809 rflow
= set_rps_cpu(dev
, skb
, rflow
, next_cpu
);
3812 if (tcpu
< nr_cpu_ids
&& cpu_online(tcpu
)) {
3822 tcpu
= map
->cpus
[reciprocal_scale(hash
, map
->len
)];
3823 if (cpu_online(tcpu
)) {
3833 #ifdef CONFIG_RFS_ACCEL
3836 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3837 * @dev: Device on which the filter was set
3838 * @rxq_index: RX queue index
3839 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3840 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3842 * Drivers that implement ndo_rx_flow_steer() should periodically call
3843 * this function for each installed filter and remove the filters for
3844 * which it returns %true.
3846 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
3847 u32 flow_id
, u16 filter_id
)
3849 struct netdev_rx_queue
*rxqueue
= dev
->_rx
+ rxq_index
;
3850 struct rps_dev_flow_table
*flow_table
;
3851 struct rps_dev_flow
*rflow
;
3856 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3857 if (flow_table
&& flow_id
<= flow_table
->mask
) {
3858 rflow
= &flow_table
->flows
[flow_id
];
3859 cpu
= READ_ONCE(rflow
->cpu
);
3860 if (rflow
->filter
== filter_id
&& cpu
< nr_cpu_ids
&&
3861 ((int)(per_cpu(softnet_data
, cpu
).input_queue_head
-
3862 rflow
->last_qtail
) <
3863 (int)(10 * flow_table
->mask
)))
3869 EXPORT_SYMBOL(rps_may_expire_flow
);
3871 #endif /* CONFIG_RFS_ACCEL */
3873 /* Called from hardirq (IPI) context */
3874 static void rps_trigger_softirq(void *data
)
3876 struct softnet_data
*sd
= data
;
3878 ____napi_schedule(sd
, &sd
->backlog
);
3882 #endif /* CONFIG_RPS */
3885 * Check if this softnet_data structure is another cpu one
3886 * If yes, queue it to our IPI list and return 1
3889 static int rps_ipi_queued(struct softnet_data
*sd
)
3892 struct softnet_data
*mysd
= this_cpu_ptr(&softnet_data
);
3895 sd
->rps_ipi_next
= mysd
->rps_ipi_list
;
3896 mysd
->rps_ipi_list
= sd
;
3898 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3901 #endif /* CONFIG_RPS */
3905 #ifdef CONFIG_NET_FLOW_LIMIT
3906 int netdev_flow_limit_table_len __read_mostly
= (1 << 12);
3909 static bool skb_flow_limit(struct sk_buff
*skb
, unsigned int qlen
)
3911 #ifdef CONFIG_NET_FLOW_LIMIT
3912 struct sd_flow_limit
*fl
;
3913 struct softnet_data
*sd
;
3914 unsigned int old_flow
, new_flow
;
3916 if (qlen
< (netdev_max_backlog
>> 1))
3919 sd
= this_cpu_ptr(&softnet_data
);
3922 fl
= rcu_dereference(sd
->flow_limit
);
3924 new_flow
= skb_get_hash(skb
) & (fl
->num_buckets
- 1);
3925 old_flow
= fl
->history
[fl
->history_head
];
3926 fl
->history
[fl
->history_head
] = new_flow
;
3929 fl
->history_head
&= FLOW_LIMIT_HISTORY
- 1;
3931 if (likely(fl
->buckets
[old_flow
]))
3932 fl
->buckets
[old_flow
]--;
3934 if (++fl
->buckets
[new_flow
] > (FLOW_LIMIT_HISTORY
>> 1)) {
3946 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3947 * queue (may be a remote CPU queue).
3949 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
,
3950 unsigned int *qtail
)
3952 struct softnet_data
*sd
;
3953 unsigned long flags
;
3956 sd
= &per_cpu(softnet_data
, cpu
);
3958 local_irq_save(flags
);
3961 if (!netif_running(skb
->dev
))
3963 qlen
= skb_queue_len(&sd
->input_pkt_queue
);
3964 if (qlen
<= netdev_max_backlog
&& !skb_flow_limit(skb
, qlen
)) {
3967 __skb_queue_tail(&sd
->input_pkt_queue
, skb
);
3968 input_queue_tail_incr_save(sd
, qtail
);
3970 local_irq_restore(flags
);
3971 return NET_RX_SUCCESS
;
3974 /* Schedule NAPI for backlog device
3975 * We can use non atomic operation since we own the queue lock
3977 if (!__test_and_set_bit(NAPI_STATE_SCHED
, &sd
->backlog
.state
)) {
3978 if (!rps_ipi_queued(sd
))
3979 ____napi_schedule(sd
, &sd
->backlog
);
3988 local_irq_restore(flags
);
3990 atomic_long_inc(&skb
->dev
->rx_dropped
);
3995 static struct netdev_rx_queue
*netif_get_rxqueue(struct sk_buff
*skb
)
3997 struct net_device
*dev
= skb
->dev
;
3998 struct netdev_rx_queue
*rxqueue
;
4002 if (skb_rx_queue_recorded(skb
)) {
4003 u16 index
= skb_get_rx_queue(skb
);
4005 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
4006 WARN_ONCE(dev
->real_num_rx_queues
> 1,
4007 "%s received packet on queue %u, but number "
4008 "of RX queues is %u\n",
4009 dev
->name
, index
, dev
->real_num_rx_queues
);
4011 return rxqueue
; /* Return first rxqueue */
4018 static u32
netif_receive_generic_xdp(struct sk_buff
*skb
,
4019 struct xdp_buff
*xdp
,
4020 struct bpf_prog
*xdp_prog
)
4022 struct netdev_rx_queue
*rxqueue
;
4023 void *orig_data
, *orig_data_end
;
4024 u32 metalen
, act
= XDP_DROP
;
4028 /* Reinjected packets coming from act_mirred or similar should
4029 * not get XDP generic processing.
4031 if (skb_cloned(skb
))
4034 /* XDP packets must be linear and must have sufficient headroom
4035 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4036 * native XDP provides, thus we need to do it here as well.
4038 if (skb_is_nonlinear(skb
) ||
4039 skb_headroom(skb
) < XDP_PACKET_HEADROOM
) {
4040 int hroom
= XDP_PACKET_HEADROOM
- skb_headroom(skb
);
4041 int troom
= skb
->tail
+ skb
->data_len
- skb
->end
;
4043 /* In case we have to go down the path and also linearize,
4044 * then lets do the pskb_expand_head() work just once here.
4046 if (pskb_expand_head(skb
,
4047 hroom
> 0 ? ALIGN(hroom
, NET_SKB_PAD
) : 0,
4048 troom
> 0 ? troom
+ 128 : 0, GFP_ATOMIC
))
4050 if (skb_linearize(skb
))
4054 /* The XDP program wants to see the packet starting at the MAC
4057 mac_len
= skb
->data
- skb_mac_header(skb
);
4058 hlen
= skb_headlen(skb
) + mac_len
;
4059 xdp
->data
= skb
->data
- mac_len
;
4060 xdp
->data_meta
= xdp
->data
;
4061 xdp
->data_end
= xdp
->data
+ hlen
;
4062 xdp
->data_hard_start
= skb
->data
- skb_headroom(skb
);
4063 orig_data_end
= xdp
->data_end
;
4064 orig_data
= xdp
->data
;
4066 rxqueue
= netif_get_rxqueue(skb
);
4067 xdp
->rxq
= &rxqueue
->xdp_rxq
;
4069 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
4071 off
= xdp
->data
- orig_data
;
4073 __skb_pull(skb
, off
);
4075 __skb_push(skb
, -off
);
4076 skb
->mac_header
+= off
;
4078 /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
4081 off
= orig_data_end
- xdp
->data_end
;
4083 skb_set_tail_pointer(skb
, xdp
->data_end
- xdp
->data
);
4091 __skb_push(skb
, mac_len
);
4094 metalen
= xdp
->data
- xdp
->data_meta
;
4096 skb_metadata_set(skb
, metalen
);
4099 bpf_warn_invalid_xdp_action(act
);
4102 trace_xdp_exception(skb
->dev
, xdp_prog
, act
);
4113 /* When doing generic XDP we have to bypass the qdisc layer and the
4114 * network taps in order to match in-driver-XDP behavior.
4116 void generic_xdp_tx(struct sk_buff
*skb
, struct bpf_prog
*xdp_prog
)
4118 struct net_device
*dev
= skb
->dev
;
4119 struct netdev_queue
*txq
;
4120 bool free_skb
= true;
4123 txq
= netdev_pick_tx(dev
, skb
, NULL
);
4124 cpu
= smp_processor_id();
4125 HARD_TX_LOCK(dev
, txq
, cpu
);
4126 if (!netif_xmit_stopped(txq
)) {
4127 rc
= netdev_start_xmit(skb
, dev
, txq
, 0);
4128 if (dev_xmit_complete(rc
))
4131 HARD_TX_UNLOCK(dev
, txq
);
4133 trace_xdp_exception(dev
, xdp_prog
, XDP_TX
);
4137 EXPORT_SYMBOL_GPL(generic_xdp_tx
);
4139 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key
);
4141 int do_xdp_generic(struct bpf_prog
*xdp_prog
, struct sk_buff
*skb
)
4144 struct xdp_buff xdp
;
4148 act
= netif_receive_generic_xdp(skb
, &xdp
, xdp_prog
);
4149 if (act
!= XDP_PASS
) {
4152 err
= xdp_do_generic_redirect(skb
->dev
, skb
,
4158 generic_xdp_tx(skb
, xdp_prog
);
4169 EXPORT_SYMBOL_GPL(do_xdp_generic
);
4171 static int netif_rx_internal(struct sk_buff
*skb
)
4175 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
4177 trace_netif_rx(skb
);
4179 if (static_branch_unlikely(&generic_xdp_needed_key
)) {
4184 ret
= do_xdp_generic(rcu_dereference(skb
->dev
->xdp_prog
), skb
);
4188 /* Consider XDP consuming the packet a success from
4189 * the netdev point of view we do not want to count
4192 if (ret
!= XDP_PASS
)
4193 return NET_RX_SUCCESS
;
4197 if (static_key_false(&rps_needed
)) {
4198 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
4204 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
4206 cpu
= smp_processor_id();
4208 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
4217 ret
= enqueue_to_backlog(skb
, get_cpu(), &qtail
);
4224 * netif_rx - post buffer to the network code
4225 * @skb: buffer to post
4227 * This function receives a packet from a device driver and queues it for
4228 * the upper (protocol) levels to process. It always succeeds. The buffer
4229 * may be dropped during processing for congestion control or by the
4233 * NET_RX_SUCCESS (no congestion)
4234 * NET_RX_DROP (packet was dropped)
4238 int netif_rx(struct sk_buff
*skb
)
4240 trace_netif_rx_entry(skb
);
4242 return netif_rx_internal(skb
);
4244 EXPORT_SYMBOL(netif_rx
);
4246 int netif_rx_ni(struct sk_buff
*skb
)
4250 trace_netif_rx_ni_entry(skb
);
4253 err
= netif_rx_internal(skb
);
4254 if (local_softirq_pending())
4260 EXPORT_SYMBOL(netif_rx_ni
);
4262 static __latent_entropy
void net_tx_action(struct softirq_action
*h
)
4264 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
4266 if (sd
->completion_queue
) {
4267 struct sk_buff
*clist
;
4269 local_irq_disable();
4270 clist
= sd
->completion_queue
;
4271 sd
->completion_queue
= NULL
;
4275 struct sk_buff
*skb
= clist
;
4277 clist
= clist
->next
;
4279 WARN_ON(refcount_read(&skb
->users
));
4280 if (likely(get_kfree_skb_cb(skb
)->reason
== SKB_REASON_CONSUMED
))
4281 trace_consume_skb(skb
);
4283 trace_kfree_skb(skb
, net_tx_action
);
4285 if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
)
4288 __kfree_skb_defer(skb
);
4291 __kfree_skb_flush();
4294 if (sd
->output_queue
) {
4297 local_irq_disable();
4298 head
= sd
->output_queue
;
4299 sd
->output_queue
= NULL
;
4300 sd
->output_queue_tailp
= &sd
->output_queue
;
4304 struct Qdisc
*q
= head
;
4305 spinlock_t
*root_lock
= NULL
;
4307 head
= head
->next_sched
;
4309 if (!(q
->flags
& TCQ_F_NOLOCK
)) {
4310 root_lock
= qdisc_lock(q
);
4311 spin_lock(root_lock
);
4313 /* We need to make sure head->next_sched is read
4314 * before clearing __QDISC_STATE_SCHED
4316 smp_mb__before_atomic();
4317 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
4320 spin_unlock(root_lock
);
4324 xfrm_dev_backlog(sd
);
4327 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4328 /* This hook is defined here for ATM LANE */
4329 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
4330 unsigned char *addr
) __read_mostly
;
4331 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
4334 static inline struct sk_buff
*
4335 sch_handle_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
, int *ret
,
4336 struct net_device
*orig_dev
)
4338 #ifdef CONFIG_NET_CLS_ACT
4339 struct mini_Qdisc
*miniq
= rcu_dereference_bh(skb
->dev
->miniq_ingress
);
4340 struct tcf_result cl_res
;
4342 /* If there's at least one ingress present somewhere (so
4343 * we get here via enabled static key), remaining devices
4344 * that are not configured with an ingress qdisc will bail
4351 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4355 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
4356 skb
->tc_at_ingress
= 1;
4357 mini_qdisc_bstats_cpu_update(miniq
, skb
);
4359 switch (tcf_classify(skb
, miniq
->filter_list
, &cl_res
, false)) {
4361 case TC_ACT_RECLASSIFY
:
4362 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
4365 mini_qdisc_qstats_cpu_drop(miniq
);
4373 case TC_ACT_REDIRECT
:
4374 /* skb_mac_header check was done by cls/act_bpf, so
4375 * we can safely push the L2 header back before
4376 * redirecting to another netdev
4378 __skb_push(skb
, skb
->mac_len
);
4379 skb_do_redirect(skb
);
4384 #endif /* CONFIG_NET_CLS_ACT */
4389 * netdev_is_rx_handler_busy - check if receive handler is registered
4390 * @dev: device to check
4392 * Check if a receive handler is already registered for a given device.
4393 * Return true if there one.
4395 * The caller must hold the rtnl_mutex.
4397 bool netdev_is_rx_handler_busy(struct net_device
*dev
)
4400 return dev
&& rtnl_dereference(dev
->rx_handler
);
4402 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy
);
4405 * netdev_rx_handler_register - register receive handler
4406 * @dev: device to register a handler for
4407 * @rx_handler: receive handler to register
4408 * @rx_handler_data: data pointer that is used by rx handler
4410 * Register a receive handler for a device. This handler will then be
4411 * called from __netif_receive_skb. A negative errno code is returned
4414 * The caller must hold the rtnl_mutex.
4416 * For a general description of rx_handler, see enum rx_handler_result.
4418 int netdev_rx_handler_register(struct net_device
*dev
,
4419 rx_handler_func_t
*rx_handler
,
4420 void *rx_handler_data
)
4422 if (netdev_is_rx_handler_busy(dev
))
4425 if (dev
->priv_flags
& IFF_NO_RX_HANDLER
)
4428 /* Note: rx_handler_data must be set before rx_handler */
4429 rcu_assign_pointer(dev
->rx_handler_data
, rx_handler_data
);
4430 rcu_assign_pointer(dev
->rx_handler
, rx_handler
);
4434 EXPORT_SYMBOL_GPL(netdev_rx_handler_register
);
4437 * netdev_rx_handler_unregister - unregister receive handler
4438 * @dev: device to unregister a handler from
4440 * Unregister a receive handler from a device.
4442 * The caller must hold the rtnl_mutex.
4444 void netdev_rx_handler_unregister(struct net_device
*dev
)
4448 RCU_INIT_POINTER(dev
->rx_handler
, NULL
);
4449 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4450 * section has a guarantee to see a non NULL rx_handler_data
4454 RCU_INIT_POINTER(dev
->rx_handler_data
, NULL
);
4456 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister
);
4459 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4460 * the special handling of PFMEMALLOC skbs.
4462 static bool skb_pfmemalloc_protocol(struct sk_buff
*skb
)
4464 switch (skb
->protocol
) {
4465 case htons(ETH_P_ARP
):
4466 case htons(ETH_P_IP
):
4467 case htons(ETH_P_IPV6
):
4468 case htons(ETH_P_8021Q
):
4469 case htons(ETH_P_8021AD
):
4476 static inline int nf_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
,
4477 int *ret
, struct net_device
*orig_dev
)
4479 #ifdef CONFIG_NETFILTER_INGRESS
4480 if (nf_hook_ingress_active(skb
)) {
4484 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4489 ingress_retval
= nf_hook_ingress(skb
);
4491 return ingress_retval
;
4493 #endif /* CONFIG_NETFILTER_INGRESS */
4497 static int __netif_receive_skb_core(struct sk_buff
*skb
, bool pfmemalloc
)
4499 struct packet_type
*ptype
, *pt_prev
;
4500 rx_handler_func_t
*rx_handler
;
4501 struct net_device
*orig_dev
;
4502 bool deliver_exact
= false;
4503 int ret
= NET_RX_DROP
;
4506 net_timestamp_check(!netdev_tstamp_prequeue
, skb
);
4508 trace_netif_receive_skb(skb
);
4510 orig_dev
= skb
->dev
;
4512 skb_reset_network_header(skb
);
4513 if (!skb_transport_header_was_set(skb
))
4514 skb_reset_transport_header(skb
);
4515 skb_reset_mac_len(skb
);
4520 skb
->skb_iif
= skb
->dev
->ifindex
;
4522 __this_cpu_inc(softnet_data
.processed
);
4524 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
) ||
4525 skb
->protocol
== cpu_to_be16(ETH_P_8021AD
)) {
4526 skb
= skb_vlan_untag(skb
);
4531 if (skb_skip_tc_classify(skb
))
4537 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
4539 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4543 list_for_each_entry_rcu(ptype
, &skb
->dev
->ptype_all
, list
) {
4545 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4550 #ifdef CONFIG_NET_INGRESS
4551 if (static_branch_unlikely(&ingress_needed_key
)) {
4552 skb
= sch_handle_ingress(skb
, &pt_prev
, &ret
, orig_dev
);
4556 if (nf_ingress(skb
, &pt_prev
, &ret
, orig_dev
) < 0)
4562 if (pfmemalloc
&& !skb_pfmemalloc_protocol(skb
))
4565 if (skb_vlan_tag_present(skb
)) {
4567 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4570 if (vlan_do_receive(&skb
))
4572 else if (unlikely(!skb
))
4576 rx_handler
= rcu_dereference(skb
->dev
->rx_handler
);
4579 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4582 switch (rx_handler(&skb
)) {
4583 case RX_HANDLER_CONSUMED
:
4584 ret
= NET_RX_SUCCESS
;
4586 case RX_HANDLER_ANOTHER
:
4588 case RX_HANDLER_EXACT
:
4589 deliver_exact
= true;
4590 case RX_HANDLER_PASS
:
4597 if (unlikely(skb_vlan_tag_present(skb
))) {
4598 if (skb_vlan_tag_get_id(skb
))
4599 skb
->pkt_type
= PACKET_OTHERHOST
;
4600 /* Note: we might in the future use prio bits
4601 * and set skb->priority like in vlan_do_receive()
4602 * For the time being, just ignore Priority Code Point
4607 type
= skb
->protocol
;
4609 /* deliver only exact match when indicated */
4610 if (likely(!deliver_exact
)) {
4611 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4612 &ptype_base
[ntohs(type
) &
4616 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4617 &orig_dev
->ptype_specific
);
4619 if (unlikely(skb
->dev
!= orig_dev
)) {
4620 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4621 &skb
->dev
->ptype_specific
);
4625 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
4628 ret
= pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
4632 atomic_long_inc(&skb
->dev
->rx_dropped
);
4634 atomic_long_inc(&skb
->dev
->rx_nohandler
);
4636 /* Jamal, now you will not able to escape explaining
4637 * me how you were going to use this. :-)
4647 * netif_receive_skb_core - special purpose version of netif_receive_skb
4648 * @skb: buffer to process
4650 * More direct receive version of netif_receive_skb(). It should
4651 * only be used by callers that have a need to skip RPS and Generic XDP.
4652 * Caller must also take care of handling if (page_is_)pfmemalloc.
4654 * This function may only be called from softirq context and interrupts
4655 * should be enabled.
4657 * Return values (usually ignored):
4658 * NET_RX_SUCCESS: no congestion
4659 * NET_RX_DROP: packet was dropped
4661 int netif_receive_skb_core(struct sk_buff
*skb
)
4666 ret
= __netif_receive_skb_core(skb
, false);
4671 EXPORT_SYMBOL(netif_receive_skb_core
);
4673 static int __netif_receive_skb(struct sk_buff
*skb
)
4677 if (sk_memalloc_socks() && skb_pfmemalloc(skb
)) {
4678 unsigned int noreclaim_flag
;
4681 * PFMEMALLOC skbs are special, they should
4682 * - be delivered to SOCK_MEMALLOC sockets only
4683 * - stay away from userspace
4684 * - have bounded memory usage
4686 * Use PF_MEMALLOC as this saves us from propagating the allocation
4687 * context down to all allocation sites.
4689 noreclaim_flag
= memalloc_noreclaim_save();
4690 ret
= __netif_receive_skb_core(skb
, true);
4691 memalloc_noreclaim_restore(noreclaim_flag
);
4693 ret
= __netif_receive_skb_core(skb
, false);
4698 static int generic_xdp_install(struct net_device
*dev
, struct netdev_bpf
*xdp
)
4700 struct bpf_prog
*old
= rtnl_dereference(dev
->xdp_prog
);
4701 struct bpf_prog
*new = xdp
->prog
;
4704 switch (xdp
->command
) {
4705 case XDP_SETUP_PROG
:
4706 rcu_assign_pointer(dev
->xdp_prog
, new);
4711 static_branch_dec(&generic_xdp_needed_key
);
4712 } else if (new && !old
) {
4713 static_branch_inc(&generic_xdp_needed_key
);
4714 dev_disable_lro(dev
);
4715 dev_disable_gro_hw(dev
);
4719 case XDP_QUERY_PROG
:
4720 xdp
->prog_attached
= !!old
;
4721 xdp
->prog_id
= old
? old
->aux
->id
: 0;
4732 static int netif_receive_skb_internal(struct sk_buff
*skb
)
4736 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
4738 if (skb_defer_rx_timestamp(skb
))
4739 return NET_RX_SUCCESS
;
4741 if (static_branch_unlikely(&generic_xdp_needed_key
)) {
4746 ret
= do_xdp_generic(rcu_dereference(skb
->dev
->xdp_prog
), skb
);
4750 if (ret
!= XDP_PASS
)
4756 if (static_key_false(&rps_needed
)) {
4757 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
4758 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
4761 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
4767 ret
= __netif_receive_skb(skb
);
4773 * netif_receive_skb - process receive buffer from network
4774 * @skb: buffer to process
4776 * netif_receive_skb() is the main receive data processing function.
4777 * It always succeeds. The buffer may be dropped during processing
4778 * for congestion control or by the protocol layers.
4780 * This function may only be called from softirq context and interrupts
4781 * should be enabled.
4783 * Return values (usually ignored):
4784 * NET_RX_SUCCESS: no congestion
4785 * NET_RX_DROP: packet was dropped
4787 int netif_receive_skb(struct sk_buff
*skb
)
4789 trace_netif_receive_skb_entry(skb
);
4791 return netif_receive_skb_internal(skb
);
4793 EXPORT_SYMBOL(netif_receive_skb
);
4795 DEFINE_PER_CPU(struct work_struct
, flush_works
);
4797 /* Network device is going away, flush any packets still pending */
4798 static void flush_backlog(struct work_struct
*work
)
4800 struct sk_buff
*skb
, *tmp
;
4801 struct softnet_data
*sd
;
4804 sd
= this_cpu_ptr(&softnet_data
);
4806 local_irq_disable();
4808 skb_queue_walk_safe(&sd
->input_pkt_queue
, skb
, tmp
) {
4809 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
4810 __skb_unlink(skb
, &sd
->input_pkt_queue
);
4812 input_queue_head_incr(sd
);
4818 skb_queue_walk_safe(&sd
->process_queue
, skb
, tmp
) {
4819 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
4820 __skb_unlink(skb
, &sd
->process_queue
);
4822 input_queue_head_incr(sd
);
4828 static void flush_all_backlogs(void)
4834 for_each_online_cpu(cpu
)
4835 queue_work_on(cpu
, system_highpri_wq
,
4836 per_cpu_ptr(&flush_works
, cpu
));
4838 for_each_online_cpu(cpu
)
4839 flush_work(per_cpu_ptr(&flush_works
, cpu
));
4844 static int napi_gro_complete(struct sk_buff
*skb
)
4846 struct packet_offload
*ptype
;
4847 __be16 type
= skb
->protocol
;
4848 struct list_head
*head
= &offload_base
;
4851 BUILD_BUG_ON(sizeof(struct napi_gro_cb
) > sizeof(skb
->cb
));
4853 if (NAPI_GRO_CB(skb
)->count
== 1) {
4854 skb_shinfo(skb
)->gso_size
= 0;
4859 list_for_each_entry_rcu(ptype
, head
, list
) {
4860 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
4863 err
= ptype
->callbacks
.gro_complete(skb
, 0);
4869 WARN_ON(&ptype
->list
== head
);
4871 return NET_RX_SUCCESS
;
4875 return netif_receive_skb_internal(skb
);
4878 /* napi->gro_list contains packets ordered by age.
4879 * youngest packets at the head of it.
4880 * Complete skbs in reverse order to reduce latencies.
4882 void napi_gro_flush(struct napi_struct
*napi
, bool flush_old
)
4884 struct sk_buff
*skb
, *prev
= NULL
;
4886 /* scan list and build reverse chain */
4887 for (skb
= napi
->gro_list
; skb
!= NULL
; skb
= skb
->next
) {
4892 for (skb
= prev
; skb
; skb
= prev
) {
4895 if (flush_old
&& NAPI_GRO_CB(skb
)->age
== jiffies
)
4899 napi_gro_complete(skb
);
4903 napi
->gro_list
= NULL
;
4905 EXPORT_SYMBOL(napi_gro_flush
);
4907 static void gro_list_prepare(struct napi_struct
*napi
, struct sk_buff
*skb
)
4910 unsigned int maclen
= skb
->dev
->hard_header_len
;
4911 u32 hash
= skb_get_hash_raw(skb
);
4913 for (p
= napi
->gro_list
; p
; p
= p
->next
) {
4914 unsigned long diffs
;
4916 NAPI_GRO_CB(p
)->flush
= 0;
4918 if (hash
!= skb_get_hash_raw(p
)) {
4919 NAPI_GRO_CB(p
)->same_flow
= 0;
4923 diffs
= (unsigned long)p
->dev
^ (unsigned long)skb
->dev
;
4924 diffs
|= p
->vlan_tci
^ skb
->vlan_tci
;
4925 diffs
|= skb_metadata_dst_cmp(p
, skb
);
4926 diffs
|= skb_metadata_differs(p
, skb
);
4927 if (maclen
== ETH_HLEN
)
4928 diffs
|= compare_ether_header(skb_mac_header(p
),
4929 skb_mac_header(skb
));
4931 diffs
= memcmp(skb_mac_header(p
),
4932 skb_mac_header(skb
),
4934 NAPI_GRO_CB(p
)->same_flow
= !diffs
;
4938 static void skb_gro_reset_offset(struct sk_buff
*skb
)
4940 const struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
4941 const skb_frag_t
*frag0
= &pinfo
->frags
[0];
4943 NAPI_GRO_CB(skb
)->data_offset
= 0;
4944 NAPI_GRO_CB(skb
)->frag0
= NULL
;
4945 NAPI_GRO_CB(skb
)->frag0_len
= 0;
4947 if (skb_mac_header(skb
) == skb_tail_pointer(skb
) &&
4949 !PageHighMem(skb_frag_page(frag0
))) {
4950 NAPI_GRO_CB(skb
)->frag0
= skb_frag_address(frag0
);
4951 NAPI_GRO_CB(skb
)->frag0_len
= min_t(unsigned int,
4952 skb_frag_size(frag0
),
4953 skb
->end
- skb
->tail
);
4957 static void gro_pull_from_frag0(struct sk_buff
*skb
, int grow
)
4959 struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
4961 BUG_ON(skb
->end
- skb
->tail
< grow
);
4963 memcpy(skb_tail_pointer(skb
), NAPI_GRO_CB(skb
)->frag0
, grow
);
4965 skb
->data_len
-= grow
;
4968 pinfo
->frags
[0].page_offset
+= grow
;
4969 skb_frag_size_sub(&pinfo
->frags
[0], grow
);
4971 if (unlikely(!skb_frag_size(&pinfo
->frags
[0]))) {
4972 skb_frag_unref(skb
, 0);
4973 memmove(pinfo
->frags
, pinfo
->frags
+ 1,
4974 --pinfo
->nr_frags
* sizeof(pinfo
->frags
[0]));
4978 static enum gro_result
dev_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
4980 struct sk_buff
**pp
= NULL
;
4981 struct packet_offload
*ptype
;
4982 __be16 type
= skb
->protocol
;
4983 struct list_head
*head
= &offload_base
;
4985 enum gro_result ret
;
4988 if (netif_elide_gro(skb
->dev
))
4991 gro_list_prepare(napi
, skb
);
4994 list_for_each_entry_rcu(ptype
, head
, list
) {
4995 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
4998 skb_set_network_header(skb
, skb_gro_offset(skb
));
4999 skb_reset_mac_len(skb
);
5000 NAPI_GRO_CB(skb
)->same_flow
= 0;
5001 NAPI_GRO_CB(skb
)->flush
= skb_is_gso(skb
) || skb_has_frag_list(skb
);
5002 NAPI_GRO_CB(skb
)->free
= 0;
5003 NAPI_GRO_CB(skb
)->encap_mark
= 0;
5004 NAPI_GRO_CB(skb
)->recursion_counter
= 0;
5005 NAPI_GRO_CB(skb
)->is_fou
= 0;
5006 NAPI_GRO_CB(skb
)->is_atomic
= 1;
5007 NAPI_GRO_CB(skb
)->gro_remcsum_start
= 0;
5009 /* Setup for GRO checksum validation */
5010 switch (skb
->ip_summed
) {
5011 case CHECKSUM_COMPLETE
:
5012 NAPI_GRO_CB(skb
)->csum
= skb
->csum
;
5013 NAPI_GRO_CB(skb
)->csum_valid
= 1;
5014 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
5016 case CHECKSUM_UNNECESSARY
:
5017 NAPI_GRO_CB(skb
)->csum_cnt
= skb
->csum_level
+ 1;
5018 NAPI_GRO_CB(skb
)->csum_valid
= 0;
5021 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
5022 NAPI_GRO_CB(skb
)->csum_valid
= 0;
5025 pp
= ptype
->callbacks
.gro_receive(&napi
->gro_list
, skb
);
5030 if (&ptype
->list
== head
)
5033 if (IS_ERR(pp
) && PTR_ERR(pp
) == -EINPROGRESS
) {
5038 same_flow
= NAPI_GRO_CB(skb
)->same_flow
;
5039 ret
= NAPI_GRO_CB(skb
)->free
? GRO_MERGED_FREE
: GRO_MERGED
;
5042 struct sk_buff
*nskb
= *pp
;
5046 napi_gro_complete(nskb
);
5053 if (NAPI_GRO_CB(skb
)->flush
)
5056 if (unlikely(napi
->gro_count
>= MAX_GRO_SKBS
)) {
5057 struct sk_buff
*nskb
= napi
->gro_list
;
5059 /* locate the end of the list to select the 'oldest' flow */
5060 while (nskb
->next
) {
5066 napi_gro_complete(nskb
);
5070 NAPI_GRO_CB(skb
)->count
= 1;
5071 NAPI_GRO_CB(skb
)->age
= jiffies
;
5072 NAPI_GRO_CB(skb
)->last
= skb
;
5073 skb_shinfo(skb
)->gso_size
= skb_gro_len(skb
);
5074 skb
->next
= napi
->gro_list
;
5075 napi
->gro_list
= skb
;
5079 grow
= skb_gro_offset(skb
) - skb_headlen(skb
);
5081 gro_pull_from_frag0(skb
, grow
);
5090 struct packet_offload
*gro_find_receive_by_type(__be16 type
)
5092 struct list_head
*offload_head
= &offload_base
;
5093 struct packet_offload
*ptype
;
5095 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
5096 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
5102 EXPORT_SYMBOL(gro_find_receive_by_type
);
5104 struct packet_offload
*gro_find_complete_by_type(__be16 type
)
5106 struct list_head
*offload_head
= &offload_base
;
5107 struct packet_offload
*ptype
;
5109 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
5110 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
5116 EXPORT_SYMBOL(gro_find_complete_by_type
);
5118 static void napi_skb_free_stolen_head(struct sk_buff
*skb
)
5122 kmem_cache_free(skbuff_head_cache
, skb
);
5125 static gro_result_t
napi_skb_finish(gro_result_t ret
, struct sk_buff
*skb
)
5129 if (netif_receive_skb_internal(skb
))
5137 case GRO_MERGED_FREE
:
5138 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
5139 napi_skb_free_stolen_head(skb
);
5153 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
5155 skb_mark_napi_id(skb
, napi
);
5156 trace_napi_gro_receive_entry(skb
);
5158 skb_gro_reset_offset(skb
);
5160 return napi_skb_finish(dev_gro_receive(napi
, skb
), skb
);
5162 EXPORT_SYMBOL(napi_gro_receive
);
5164 static void napi_reuse_skb(struct napi_struct
*napi
, struct sk_buff
*skb
)
5166 if (unlikely(skb
->pfmemalloc
)) {
5170 __skb_pull(skb
, skb_headlen(skb
));
5171 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
5172 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
- skb_headroom(skb
));
5174 skb
->dev
= napi
->dev
;
5176 skb
->encapsulation
= 0;
5177 skb_shinfo(skb
)->gso_type
= 0;
5178 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
5184 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
)
5186 struct sk_buff
*skb
= napi
->skb
;
5189 skb
= napi_alloc_skb(napi
, GRO_MAX_HEAD
);
5192 skb_mark_napi_id(skb
, napi
);
5197 EXPORT_SYMBOL(napi_get_frags
);
5199 static gro_result_t
napi_frags_finish(struct napi_struct
*napi
,
5200 struct sk_buff
*skb
,
5206 __skb_push(skb
, ETH_HLEN
);
5207 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
5208 if (ret
== GRO_NORMAL
&& netif_receive_skb_internal(skb
))
5213 napi_reuse_skb(napi
, skb
);
5216 case GRO_MERGED_FREE
:
5217 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
5218 napi_skb_free_stolen_head(skb
);
5220 napi_reuse_skb(napi
, skb
);
5231 /* Upper GRO stack assumes network header starts at gro_offset=0
5232 * Drivers could call both napi_gro_frags() and napi_gro_receive()
5233 * We copy ethernet header into skb->data to have a common layout.
5235 static struct sk_buff
*napi_frags_skb(struct napi_struct
*napi
)
5237 struct sk_buff
*skb
= napi
->skb
;
5238 const struct ethhdr
*eth
;
5239 unsigned int hlen
= sizeof(*eth
);
5243 skb_reset_mac_header(skb
);
5244 skb_gro_reset_offset(skb
);
5246 eth
= skb_gro_header_fast(skb
, 0);
5247 if (unlikely(skb_gro_header_hard(skb
, hlen
))) {
5248 eth
= skb_gro_header_slow(skb
, hlen
, 0);
5249 if (unlikely(!eth
)) {
5250 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5251 __func__
, napi
->dev
->name
);
5252 napi_reuse_skb(napi
, skb
);
5256 gro_pull_from_frag0(skb
, hlen
);
5257 NAPI_GRO_CB(skb
)->frag0
+= hlen
;
5258 NAPI_GRO_CB(skb
)->frag0_len
-= hlen
;
5260 __skb_pull(skb
, hlen
);
5263 * This works because the only protocols we care about don't require
5265 * We'll fix it up properly in napi_frags_finish()
5267 skb
->protocol
= eth
->h_proto
;
5272 gro_result_t
napi_gro_frags(struct napi_struct
*napi
)
5274 struct sk_buff
*skb
= napi_frags_skb(napi
);
5279 trace_napi_gro_frags_entry(skb
);
5281 return napi_frags_finish(napi
, skb
, dev_gro_receive(napi
, skb
));
5283 EXPORT_SYMBOL(napi_gro_frags
);
5285 /* Compute the checksum from gro_offset and return the folded value
5286 * after adding in any pseudo checksum.
5288 __sum16
__skb_gro_checksum_complete(struct sk_buff
*skb
)
5293 wsum
= skb_checksum(skb
, skb_gro_offset(skb
), skb_gro_len(skb
), 0);
5295 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5296 sum
= csum_fold(csum_add(NAPI_GRO_CB(skb
)->csum
, wsum
));
5298 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
5299 !skb
->csum_complete_sw
)
5300 netdev_rx_csum_fault(skb
->dev
);
5303 NAPI_GRO_CB(skb
)->csum
= wsum
;
5304 NAPI_GRO_CB(skb
)->csum_valid
= 1;
5308 EXPORT_SYMBOL(__skb_gro_checksum_complete
);
5310 static void net_rps_send_ipi(struct softnet_data
*remsd
)
5314 struct softnet_data
*next
= remsd
->rps_ipi_next
;
5316 if (cpu_online(remsd
->cpu
))
5317 smp_call_function_single_async(remsd
->cpu
, &remsd
->csd
);
5324 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5325 * Note: called with local irq disabled, but exits with local irq enabled.
5327 static void net_rps_action_and_irq_enable(struct softnet_data
*sd
)
5330 struct softnet_data
*remsd
= sd
->rps_ipi_list
;
5333 sd
->rps_ipi_list
= NULL
;
5337 /* Send pending IPI's to kick RPS processing on remote cpus. */
5338 net_rps_send_ipi(remsd
);
5344 static bool sd_has_rps_ipi_waiting(struct softnet_data
*sd
)
5347 return sd
->rps_ipi_list
!= NULL
;
5353 static int process_backlog(struct napi_struct
*napi
, int quota
)
5355 struct softnet_data
*sd
= container_of(napi
, struct softnet_data
, backlog
);
5359 /* Check if we have pending ipi, its better to send them now,
5360 * not waiting net_rx_action() end.
5362 if (sd_has_rps_ipi_waiting(sd
)) {
5363 local_irq_disable();
5364 net_rps_action_and_irq_enable(sd
);
5367 napi
->weight
= dev_rx_weight
;
5369 struct sk_buff
*skb
;
5371 while ((skb
= __skb_dequeue(&sd
->process_queue
))) {
5373 __netif_receive_skb(skb
);
5375 input_queue_head_incr(sd
);
5376 if (++work
>= quota
)
5381 local_irq_disable();
5383 if (skb_queue_empty(&sd
->input_pkt_queue
)) {
5385 * Inline a custom version of __napi_complete().
5386 * only current cpu owns and manipulates this napi,
5387 * and NAPI_STATE_SCHED is the only possible flag set
5389 * We can use a plain write instead of clear_bit(),
5390 * and we dont need an smp_mb() memory barrier.
5395 skb_queue_splice_tail_init(&sd
->input_pkt_queue
,
5396 &sd
->process_queue
);
5406 * __napi_schedule - schedule for receive
5407 * @n: entry to schedule
5409 * The entry's receive function will be scheduled to run.
5410 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
5412 void __napi_schedule(struct napi_struct
*n
)
5414 unsigned long flags
;
5416 local_irq_save(flags
);
5417 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
5418 local_irq_restore(flags
);
5420 EXPORT_SYMBOL(__napi_schedule
);
5423 * napi_schedule_prep - check if napi can be scheduled
5426 * Test if NAPI routine is already running, and if not mark
5427 * it as running. This is used as a condition variable
5428 * insure only one NAPI poll instance runs. We also make
5429 * sure there is no pending NAPI disable.
5431 bool napi_schedule_prep(struct napi_struct
*n
)
5433 unsigned long val
, new;
5436 val
= READ_ONCE(n
->state
);
5437 if (unlikely(val
& NAPIF_STATE_DISABLE
))
5439 new = val
| NAPIF_STATE_SCHED
;
5441 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5442 * This was suggested by Alexander Duyck, as compiler
5443 * emits better code than :
5444 * if (val & NAPIF_STATE_SCHED)
5445 * new |= NAPIF_STATE_MISSED;
5447 new |= (val
& NAPIF_STATE_SCHED
) / NAPIF_STATE_SCHED
*
5449 } while (cmpxchg(&n
->state
, val
, new) != val
);
5451 return !(val
& NAPIF_STATE_SCHED
);
5453 EXPORT_SYMBOL(napi_schedule_prep
);
5456 * __napi_schedule_irqoff - schedule for receive
5457 * @n: entry to schedule
5459 * Variant of __napi_schedule() assuming hard irqs are masked
5461 void __napi_schedule_irqoff(struct napi_struct
*n
)
5463 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
5465 EXPORT_SYMBOL(__napi_schedule_irqoff
);
5467 bool napi_complete_done(struct napi_struct
*n
, int work_done
)
5469 unsigned long flags
, val
, new;
5472 * 1) Don't let napi dequeue from the cpu poll list
5473 * just in case its running on a different cpu.
5474 * 2) If we are busy polling, do nothing here, we have
5475 * the guarantee we will be called later.
5477 if (unlikely(n
->state
& (NAPIF_STATE_NPSVC
|
5478 NAPIF_STATE_IN_BUSY_POLL
)))
5482 unsigned long timeout
= 0;
5485 timeout
= n
->dev
->gro_flush_timeout
;
5488 hrtimer_start(&n
->timer
, ns_to_ktime(timeout
),
5489 HRTIMER_MODE_REL_PINNED
);
5491 napi_gro_flush(n
, false);
5493 if (unlikely(!list_empty(&n
->poll_list
))) {
5494 /* If n->poll_list is not empty, we need to mask irqs */
5495 local_irq_save(flags
);
5496 list_del_init(&n
->poll_list
);
5497 local_irq_restore(flags
);
5501 val
= READ_ONCE(n
->state
);
5503 WARN_ON_ONCE(!(val
& NAPIF_STATE_SCHED
));
5505 new = val
& ~(NAPIF_STATE_MISSED
| NAPIF_STATE_SCHED
);
5507 /* If STATE_MISSED was set, leave STATE_SCHED set,
5508 * because we will call napi->poll() one more time.
5509 * This C code was suggested by Alexander Duyck to help gcc.
5511 new |= (val
& NAPIF_STATE_MISSED
) / NAPIF_STATE_MISSED
*
5513 } while (cmpxchg(&n
->state
, val
, new) != val
);
5515 if (unlikely(val
& NAPIF_STATE_MISSED
)) {
5522 EXPORT_SYMBOL(napi_complete_done
);
5524 /* must be called under rcu_read_lock(), as we dont take a reference */
5525 static struct napi_struct
*napi_by_id(unsigned int napi_id
)
5527 unsigned int hash
= napi_id
% HASH_SIZE(napi_hash
);
5528 struct napi_struct
*napi
;
5530 hlist_for_each_entry_rcu(napi
, &napi_hash
[hash
], napi_hash_node
)
5531 if (napi
->napi_id
== napi_id
)
5537 #if defined(CONFIG_NET_RX_BUSY_POLL)
5539 #define BUSY_POLL_BUDGET 8
5541 static void busy_poll_stop(struct napi_struct
*napi
, void *have_poll_lock
)
5545 /* Busy polling means there is a high chance device driver hard irq
5546 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5547 * set in napi_schedule_prep().
5548 * Since we are about to call napi->poll() once more, we can safely
5549 * clear NAPI_STATE_MISSED.
5551 * Note: x86 could use a single "lock and ..." instruction
5552 * to perform these two clear_bit()
5554 clear_bit(NAPI_STATE_MISSED
, &napi
->state
);
5555 clear_bit(NAPI_STATE_IN_BUSY_POLL
, &napi
->state
);
5559 /* All we really want here is to re-enable device interrupts.
5560 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5562 rc
= napi
->poll(napi
, BUSY_POLL_BUDGET
);
5563 trace_napi_poll(napi
, rc
, BUSY_POLL_BUDGET
);
5564 netpoll_poll_unlock(have_poll_lock
);
5565 if (rc
== BUSY_POLL_BUDGET
)
5566 __napi_schedule(napi
);
5570 void napi_busy_loop(unsigned int napi_id
,
5571 bool (*loop_end
)(void *, unsigned long),
5574 unsigned long start_time
= loop_end
? busy_loop_current_time() : 0;
5575 int (*napi_poll
)(struct napi_struct
*napi
, int budget
);
5576 void *have_poll_lock
= NULL
;
5577 struct napi_struct
*napi
;
5584 napi
= napi_by_id(napi_id
);
5594 unsigned long val
= READ_ONCE(napi
->state
);
5596 /* If multiple threads are competing for this napi,
5597 * we avoid dirtying napi->state as much as we can.
5599 if (val
& (NAPIF_STATE_DISABLE
| NAPIF_STATE_SCHED
|
5600 NAPIF_STATE_IN_BUSY_POLL
))
5602 if (cmpxchg(&napi
->state
, val
,
5603 val
| NAPIF_STATE_IN_BUSY_POLL
|
5604 NAPIF_STATE_SCHED
) != val
)
5606 have_poll_lock
= netpoll_poll_lock(napi
);
5607 napi_poll
= napi
->poll
;
5609 work
= napi_poll(napi
, BUSY_POLL_BUDGET
);
5610 trace_napi_poll(napi
, work
, BUSY_POLL_BUDGET
);
5613 __NET_ADD_STATS(dev_net(napi
->dev
),
5614 LINUX_MIB_BUSYPOLLRXPACKETS
, work
);
5617 if (!loop_end
|| loop_end(loop_end_arg
, start_time
))
5620 if (unlikely(need_resched())) {
5622 busy_poll_stop(napi
, have_poll_lock
);
5626 if (loop_end(loop_end_arg
, start_time
))
5633 busy_poll_stop(napi
, have_poll_lock
);
5638 EXPORT_SYMBOL(napi_busy_loop
);
5640 #endif /* CONFIG_NET_RX_BUSY_POLL */
5642 static void napi_hash_add(struct napi_struct
*napi
)
5644 if (test_bit(NAPI_STATE_NO_BUSY_POLL
, &napi
->state
) ||
5645 test_and_set_bit(NAPI_STATE_HASHED
, &napi
->state
))
5648 spin_lock(&napi_hash_lock
);
5650 /* 0..NR_CPUS range is reserved for sender_cpu use */
5652 if (unlikely(++napi_gen_id
< MIN_NAPI_ID
))
5653 napi_gen_id
= MIN_NAPI_ID
;
5654 } while (napi_by_id(napi_gen_id
));
5655 napi
->napi_id
= napi_gen_id
;
5657 hlist_add_head_rcu(&napi
->napi_hash_node
,
5658 &napi_hash
[napi
->napi_id
% HASH_SIZE(napi_hash
)]);
5660 spin_unlock(&napi_hash_lock
);
5663 /* Warning : caller is responsible to make sure rcu grace period
5664 * is respected before freeing memory containing @napi
5666 bool napi_hash_del(struct napi_struct
*napi
)
5668 bool rcu_sync_needed
= false;
5670 spin_lock(&napi_hash_lock
);
5672 if (test_and_clear_bit(NAPI_STATE_HASHED
, &napi
->state
)) {
5673 rcu_sync_needed
= true;
5674 hlist_del_rcu(&napi
->napi_hash_node
);
5676 spin_unlock(&napi_hash_lock
);
5677 return rcu_sync_needed
;
5679 EXPORT_SYMBOL_GPL(napi_hash_del
);
5681 static enum hrtimer_restart
napi_watchdog(struct hrtimer
*timer
)
5683 struct napi_struct
*napi
;
5685 napi
= container_of(timer
, struct napi_struct
, timer
);
5687 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
5688 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
5690 if (napi
->gro_list
&& !napi_disable_pending(napi
) &&
5691 !test_and_set_bit(NAPI_STATE_SCHED
, &napi
->state
))
5692 __napi_schedule_irqoff(napi
);
5694 return HRTIMER_NORESTART
;
5697 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
5698 int (*poll
)(struct napi_struct
*, int), int weight
)
5700 INIT_LIST_HEAD(&napi
->poll_list
);
5701 hrtimer_init(&napi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
5702 napi
->timer
.function
= napi_watchdog
;
5703 napi
->gro_count
= 0;
5704 napi
->gro_list
= NULL
;
5707 if (weight
> NAPI_POLL_WEIGHT
)
5708 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5710 napi
->weight
= weight
;
5711 list_add(&napi
->dev_list
, &dev
->napi_list
);
5713 #ifdef CONFIG_NETPOLL
5714 napi
->poll_owner
= -1;
5716 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
5717 napi_hash_add(napi
);
5719 EXPORT_SYMBOL(netif_napi_add
);
5721 void napi_disable(struct napi_struct
*n
)
5724 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
5726 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
5728 while (test_and_set_bit(NAPI_STATE_NPSVC
, &n
->state
))
5731 hrtimer_cancel(&n
->timer
);
5733 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
5735 EXPORT_SYMBOL(napi_disable
);
5737 /* Must be called in process context */
5738 void netif_napi_del(struct napi_struct
*napi
)
5741 if (napi_hash_del(napi
))
5743 list_del_init(&napi
->dev_list
);
5744 napi_free_frags(napi
);
5746 kfree_skb_list(napi
->gro_list
);
5747 napi
->gro_list
= NULL
;
5748 napi
->gro_count
= 0;
5750 EXPORT_SYMBOL(netif_napi_del
);
5752 static int napi_poll(struct napi_struct
*n
, struct list_head
*repoll
)
5757 list_del_init(&n
->poll_list
);
5759 have
= netpoll_poll_lock(n
);
5763 /* This NAPI_STATE_SCHED test is for avoiding a race
5764 * with netpoll's poll_napi(). Only the entity which
5765 * obtains the lock and sees NAPI_STATE_SCHED set will
5766 * actually make the ->poll() call. Therefore we avoid
5767 * accidentally calling ->poll() when NAPI is not scheduled.
5770 if (test_bit(NAPI_STATE_SCHED
, &n
->state
)) {
5771 work
= n
->poll(n
, weight
);
5772 trace_napi_poll(n
, work
, weight
);
5775 WARN_ON_ONCE(work
> weight
);
5777 if (likely(work
< weight
))
5780 /* Drivers must not modify the NAPI state if they
5781 * consume the entire weight. In such cases this code
5782 * still "owns" the NAPI instance and therefore can
5783 * move the instance around on the list at-will.
5785 if (unlikely(napi_disable_pending(n
))) {
5791 /* flush too old packets
5792 * If HZ < 1000, flush all packets.
5794 napi_gro_flush(n
, HZ
>= 1000);
5797 /* Some drivers may have called napi_schedule
5798 * prior to exhausting their budget.
5800 if (unlikely(!list_empty(&n
->poll_list
))) {
5801 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5802 n
->dev
? n
->dev
->name
: "backlog");
5806 list_add_tail(&n
->poll_list
, repoll
);
5809 netpoll_poll_unlock(have
);
5814 static __latent_entropy
void net_rx_action(struct softirq_action
*h
)
5816 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
5817 unsigned long time_limit
= jiffies
+
5818 usecs_to_jiffies(netdev_budget_usecs
);
5819 int budget
= netdev_budget
;
5823 local_irq_disable();
5824 list_splice_init(&sd
->poll_list
, &list
);
5828 struct napi_struct
*n
;
5830 if (list_empty(&list
)) {
5831 if (!sd_has_rps_ipi_waiting(sd
) && list_empty(&repoll
))
5836 n
= list_first_entry(&list
, struct napi_struct
, poll_list
);
5837 budget
-= napi_poll(n
, &repoll
);
5839 /* If softirq window is exhausted then punt.
5840 * Allow this to run for 2 jiffies since which will allow
5841 * an average latency of 1.5/HZ.
5843 if (unlikely(budget
<= 0 ||
5844 time_after_eq(jiffies
, time_limit
))) {
5850 local_irq_disable();
5852 list_splice_tail_init(&sd
->poll_list
, &list
);
5853 list_splice_tail(&repoll
, &list
);
5854 list_splice(&list
, &sd
->poll_list
);
5855 if (!list_empty(&sd
->poll_list
))
5856 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
5858 net_rps_action_and_irq_enable(sd
);
5860 __kfree_skb_flush();
5863 struct netdev_adjacent
{
5864 struct net_device
*dev
;
5866 /* upper master flag, there can only be one master device per list */
5869 /* counter for the number of times this device was added to us */
5872 /* private field for the users */
5875 struct list_head list
;
5876 struct rcu_head rcu
;
5879 static struct netdev_adjacent
*__netdev_find_adj(struct net_device
*adj_dev
,
5880 struct list_head
*adj_list
)
5882 struct netdev_adjacent
*adj
;
5884 list_for_each_entry(adj
, adj_list
, list
) {
5885 if (adj
->dev
== adj_dev
)
5891 static int __netdev_has_upper_dev(struct net_device
*upper_dev
, void *data
)
5893 struct net_device
*dev
= data
;
5895 return upper_dev
== dev
;
5899 * netdev_has_upper_dev - Check if device is linked to an upper device
5901 * @upper_dev: upper device to check
5903 * Find out if a device is linked to specified upper device and return true
5904 * in case it is. Note that this checks only immediate upper device,
5905 * not through a complete stack of devices. The caller must hold the RTNL lock.
5907 bool netdev_has_upper_dev(struct net_device
*dev
,
5908 struct net_device
*upper_dev
)
5912 return netdev_walk_all_upper_dev_rcu(dev
, __netdev_has_upper_dev
,
5915 EXPORT_SYMBOL(netdev_has_upper_dev
);
5918 * netdev_has_upper_dev_all - Check if device is linked to an upper device
5920 * @upper_dev: upper device to check
5922 * Find out if a device is linked to specified upper device and return true
5923 * in case it is. Note that this checks the entire upper device chain.
5924 * The caller must hold rcu lock.
5927 bool netdev_has_upper_dev_all_rcu(struct net_device
*dev
,
5928 struct net_device
*upper_dev
)
5930 return !!netdev_walk_all_upper_dev_rcu(dev
, __netdev_has_upper_dev
,
5933 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu
);
5936 * netdev_has_any_upper_dev - Check if device is linked to some device
5939 * Find out if a device is linked to an upper device and return true in case
5940 * it is. The caller must hold the RTNL lock.
5942 bool netdev_has_any_upper_dev(struct net_device
*dev
)
5946 return !list_empty(&dev
->adj_list
.upper
);
5948 EXPORT_SYMBOL(netdev_has_any_upper_dev
);
5951 * netdev_master_upper_dev_get - Get master upper device
5954 * Find a master upper device and return pointer to it or NULL in case
5955 * it's not there. The caller must hold the RTNL lock.
5957 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
)
5959 struct netdev_adjacent
*upper
;
5963 if (list_empty(&dev
->adj_list
.upper
))
5966 upper
= list_first_entry(&dev
->adj_list
.upper
,
5967 struct netdev_adjacent
, list
);
5968 if (likely(upper
->master
))
5972 EXPORT_SYMBOL(netdev_master_upper_dev_get
);
5975 * netdev_has_any_lower_dev - Check if device is linked to some device
5978 * Find out if a device is linked to a lower device and return true in case
5979 * it is. The caller must hold the RTNL lock.
5981 static bool netdev_has_any_lower_dev(struct net_device
*dev
)
5985 return !list_empty(&dev
->adj_list
.lower
);
5988 void *netdev_adjacent_get_private(struct list_head
*adj_list
)
5990 struct netdev_adjacent
*adj
;
5992 adj
= list_entry(adj_list
, struct netdev_adjacent
, list
);
5994 return adj
->private;
5996 EXPORT_SYMBOL(netdev_adjacent_get_private
);
5999 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6001 * @iter: list_head ** of the current position
6003 * Gets the next device from the dev's upper list, starting from iter
6004 * position. The caller must hold RCU read lock.
6006 struct net_device
*netdev_upper_get_next_dev_rcu(struct net_device
*dev
,
6007 struct list_head
**iter
)
6009 struct netdev_adjacent
*upper
;
6011 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6013 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6015 if (&upper
->list
== &dev
->adj_list
.upper
)
6018 *iter
= &upper
->list
;
6022 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu
);
6024 static struct net_device
*netdev_next_upper_dev_rcu(struct net_device
*dev
,
6025 struct list_head
**iter
)
6027 struct netdev_adjacent
*upper
;
6029 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6031 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6033 if (&upper
->list
== &dev
->adj_list
.upper
)
6036 *iter
= &upper
->list
;
6041 int netdev_walk_all_upper_dev_rcu(struct net_device
*dev
,
6042 int (*fn
)(struct net_device
*dev
,
6046 struct net_device
*udev
;
6047 struct list_head
*iter
;
6050 for (iter
= &dev
->adj_list
.upper
,
6051 udev
= netdev_next_upper_dev_rcu(dev
, &iter
);
6053 udev
= netdev_next_upper_dev_rcu(dev
, &iter
)) {
6054 /* first is the upper device itself */
6055 ret
= fn(udev
, data
);
6059 /* then look at all of its upper devices */
6060 ret
= netdev_walk_all_upper_dev_rcu(udev
, fn
, data
);
6067 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu
);
6070 * netdev_lower_get_next_private - Get the next ->private from the
6071 * lower neighbour list
6073 * @iter: list_head ** of the current position
6075 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6076 * list, starting from iter position. The caller must hold either hold the
6077 * RTNL lock or its own locking that guarantees that the neighbour lower
6078 * list will remain unchanged.
6080 void *netdev_lower_get_next_private(struct net_device
*dev
,
6081 struct list_head
**iter
)
6083 struct netdev_adjacent
*lower
;
6085 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
6087 if (&lower
->list
== &dev
->adj_list
.lower
)
6090 *iter
= lower
->list
.next
;
6092 return lower
->private;
6094 EXPORT_SYMBOL(netdev_lower_get_next_private
);
6097 * netdev_lower_get_next_private_rcu - Get the next ->private from the
6098 * lower neighbour list, RCU
6101 * @iter: list_head ** of the current position
6103 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6104 * list, starting from iter position. The caller must hold RCU read lock.
6106 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
6107 struct list_head
**iter
)
6109 struct netdev_adjacent
*lower
;
6111 WARN_ON_ONCE(!rcu_read_lock_held());
6113 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6115 if (&lower
->list
== &dev
->adj_list
.lower
)
6118 *iter
= &lower
->list
;
6120 return lower
->private;
6122 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu
);
6125 * netdev_lower_get_next - Get the next device from the lower neighbour
6128 * @iter: list_head ** of the current position
6130 * Gets the next netdev_adjacent from the dev's lower neighbour
6131 * list, starting from iter position. The caller must hold RTNL lock or
6132 * its own locking that guarantees that the neighbour lower
6133 * list will remain unchanged.
6135 void *netdev_lower_get_next(struct net_device
*dev
, struct list_head
**iter
)
6137 struct netdev_adjacent
*lower
;
6139 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
6141 if (&lower
->list
== &dev
->adj_list
.lower
)
6144 *iter
= lower
->list
.next
;
6148 EXPORT_SYMBOL(netdev_lower_get_next
);
6150 static struct net_device
*netdev_next_lower_dev(struct net_device
*dev
,
6151 struct list_head
**iter
)
6153 struct netdev_adjacent
*lower
;
6155 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
6157 if (&lower
->list
== &dev
->adj_list
.lower
)
6160 *iter
= &lower
->list
;
6165 int netdev_walk_all_lower_dev(struct net_device
*dev
,
6166 int (*fn
)(struct net_device
*dev
,
6170 struct net_device
*ldev
;
6171 struct list_head
*iter
;
6174 for (iter
= &dev
->adj_list
.lower
,
6175 ldev
= netdev_next_lower_dev(dev
, &iter
);
6177 ldev
= netdev_next_lower_dev(dev
, &iter
)) {
6178 /* first is the lower device itself */
6179 ret
= fn(ldev
, data
);
6183 /* then look at all of its lower devices */
6184 ret
= netdev_walk_all_lower_dev(ldev
, fn
, data
);
6191 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev
);
6193 static struct net_device
*netdev_next_lower_dev_rcu(struct net_device
*dev
,
6194 struct list_head
**iter
)
6196 struct netdev_adjacent
*lower
;
6198 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6199 if (&lower
->list
== &dev
->adj_list
.lower
)
6202 *iter
= &lower
->list
;
6207 int netdev_walk_all_lower_dev_rcu(struct net_device
*dev
,
6208 int (*fn
)(struct net_device
*dev
,
6212 struct net_device
*ldev
;
6213 struct list_head
*iter
;
6216 for (iter
= &dev
->adj_list
.lower
,
6217 ldev
= netdev_next_lower_dev_rcu(dev
, &iter
);
6219 ldev
= netdev_next_lower_dev_rcu(dev
, &iter
)) {
6220 /* first is the lower device itself */
6221 ret
= fn(ldev
, data
);
6225 /* then look at all of its lower devices */
6226 ret
= netdev_walk_all_lower_dev_rcu(ldev
, fn
, data
);
6233 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu
);
6236 * netdev_lower_get_first_private_rcu - Get the first ->private from the
6237 * lower neighbour list, RCU
6241 * Gets the first netdev_adjacent->private from the dev's lower neighbour
6242 * list. The caller must hold RCU read lock.
6244 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
)
6246 struct netdev_adjacent
*lower
;
6248 lower
= list_first_or_null_rcu(&dev
->adj_list
.lower
,
6249 struct netdev_adjacent
, list
);
6251 return lower
->private;
6254 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu
);
6257 * netdev_master_upper_dev_get_rcu - Get master upper device
6260 * Find a master upper device and return pointer to it or NULL in case
6261 * it's not there. The caller must hold the RCU read lock.
6263 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
)
6265 struct netdev_adjacent
*upper
;
6267 upper
= list_first_or_null_rcu(&dev
->adj_list
.upper
,
6268 struct netdev_adjacent
, list
);
6269 if (upper
&& likely(upper
->master
))
6273 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu
);
6275 static int netdev_adjacent_sysfs_add(struct net_device
*dev
,
6276 struct net_device
*adj_dev
,
6277 struct list_head
*dev_list
)
6279 char linkname
[IFNAMSIZ
+7];
6281 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
6282 "upper_%s" : "lower_%s", adj_dev
->name
);
6283 return sysfs_create_link(&(dev
->dev
.kobj
), &(adj_dev
->dev
.kobj
),
6286 static void netdev_adjacent_sysfs_del(struct net_device
*dev
,
6288 struct list_head
*dev_list
)
6290 char linkname
[IFNAMSIZ
+7];
6292 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
6293 "upper_%s" : "lower_%s", name
);
6294 sysfs_remove_link(&(dev
->dev
.kobj
), linkname
);
6297 static inline bool netdev_adjacent_is_neigh_list(struct net_device
*dev
,
6298 struct net_device
*adj_dev
,
6299 struct list_head
*dev_list
)
6301 return (dev_list
== &dev
->adj_list
.upper
||
6302 dev_list
== &dev
->adj_list
.lower
) &&
6303 net_eq(dev_net(dev
), dev_net(adj_dev
));
6306 static int __netdev_adjacent_dev_insert(struct net_device
*dev
,
6307 struct net_device
*adj_dev
,
6308 struct list_head
*dev_list
,
6309 void *private, bool master
)
6311 struct netdev_adjacent
*adj
;
6314 adj
= __netdev_find_adj(adj_dev
, dev_list
);
6318 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6319 dev
->name
, adj_dev
->name
, adj
->ref_nr
);
6324 adj
= kmalloc(sizeof(*adj
), GFP_KERNEL
);
6329 adj
->master
= master
;
6331 adj
->private = private;
6334 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6335 dev
->name
, adj_dev
->name
, adj
->ref_nr
, adj_dev
->name
);
6337 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
)) {
6338 ret
= netdev_adjacent_sysfs_add(dev
, adj_dev
, dev_list
);
6343 /* Ensure that master link is always the first item in list. */
6345 ret
= sysfs_create_link(&(dev
->dev
.kobj
),
6346 &(adj_dev
->dev
.kobj
), "master");
6348 goto remove_symlinks
;
6350 list_add_rcu(&adj
->list
, dev_list
);
6352 list_add_tail_rcu(&adj
->list
, dev_list
);
6358 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
6359 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
6367 static void __netdev_adjacent_dev_remove(struct net_device
*dev
,
6368 struct net_device
*adj_dev
,
6370 struct list_head
*dev_list
)
6372 struct netdev_adjacent
*adj
;
6374 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6375 dev
->name
, adj_dev
->name
, ref_nr
);
6377 adj
= __netdev_find_adj(adj_dev
, dev_list
);
6380 pr_err("Adjacency does not exist for device %s from %s\n",
6381 dev
->name
, adj_dev
->name
);
6386 if (adj
->ref_nr
> ref_nr
) {
6387 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6388 dev
->name
, adj_dev
->name
, ref_nr
,
6389 adj
->ref_nr
- ref_nr
);
6390 adj
->ref_nr
-= ref_nr
;
6395 sysfs_remove_link(&(dev
->dev
.kobj
), "master");
6397 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
6398 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
6400 list_del_rcu(&adj
->list
);
6401 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
6402 adj_dev
->name
, dev
->name
, adj_dev
->name
);
6404 kfree_rcu(adj
, rcu
);
6407 static int __netdev_adjacent_dev_link_lists(struct net_device
*dev
,
6408 struct net_device
*upper_dev
,
6409 struct list_head
*up_list
,
6410 struct list_head
*down_list
,
6411 void *private, bool master
)
6415 ret
= __netdev_adjacent_dev_insert(dev
, upper_dev
, up_list
,
6420 ret
= __netdev_adjacent_dev_insert(upper_dev
, dev
, down_list
,
6423 __netdev_adjacent_dev_remove(dev
, upper_dev
, 1, up_list
);
6430 static void __netdev_adjacent_dev_unlink_lists(struct net_device
*dev
,
6431 struct net_device
*upper_dev
,
6433 struct list_head
*up_list
,
6434 struct list_head
*down_list
)
6436 __netdev_adjacent_dev_remove(dev
, upper_dev
, ref_nr
, up_list
);
6437 __netdev_adjacent_dev_remove(upper_dev
, dev
, ref_nr
, down_list
);
6440 static int __netdev_adjacent_dev_link_neighbour(struct net_device
*dev
,
6441 struct net_device
*upper_dev
,
6442 void *private, bool master
)
6444 return __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
6445 &dev
->adj_list
.upper
,
6446 &upper_dev
->adj_list
.lower
,
6450 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device
*dev
,
6451 struct net_device
*upper_dev
)
6453 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
, 1,
6454 &dev
->adj_list
.upper
,
6455 &upper_dev
->adj_list
.lower
);
6458 static int __netdev_upper_dev_link(struct net_device
*dev
,
6459 struct net_device
*upper_dev
, bool master
,
6460 void *upper_priv
, void *upper_info
,
6461 struct netlink_ext_ack
*extack
)
6463 struct netdev_notifier_changeupper_info changeupper_info
= {
6468 .upper_dev
= upper_dev
,
6471 .upper_info
= upper_info
,
6473 struct net_device
*master_dev
;
6478 if (dev
== upper_dev
)
6481 /* To prevent loops, check if dev is not upper device to upper_dev. */
6482 if (netdev_has_upper_dev(upper_dev
, dev
))
6486 if (netdev_has_upper_dev(dev
, upper_dev
))
6489 master_dev
= netdev_master_upper_dev_get(dev
);
6491 return master_dev
== upper_dev
? -EEXIST
: -EBUSY
;
6494 ret
= call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
6495 &changeupper_info
.info
);
6496 ret
= notifier_to_errno(ret
);
6500 ret
= __netdev_adjacent_dev_link_neighbour(dev
, upper_dev
, upper_priv
,
6505 ret
= call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
6506 &changeupper_info
.info
);
6507 ret
= notifier_to_errno(ret
);
6514 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
6520 * netdev_upper_dev_link - Add a link to the upper device
6522 * @upper_dev: new upper device
6523 * @extack: netlink extended ack
6525 * Adds a link to device which is upper to this one. The caller must hold
6526 * the RTNL lock. On a failure a negative errno code is returned.
6527 * On success the reference counts are adjusted and the function
6530 int netdev_upper_dev_link(struct net_device
*dev
,
6531 struct net_device
*upper_dev
,
6532 struct netlink_ext_ack
*extack
)
6534 return __netdev_upper_dev_link(dev
, upper_dev
, false,
6535 NULL
, NULL
, extack
);
6537 EXPORT_SYMBOL(netdev_upper_dev_link
);
6540 * netdev_master_upper_dev_link - Add a master link to the upper device
6542 * @upper_dev: new upper device
6543 * @upper_priv: upper device private
6544 * @upper_info: upper info to be passed down via notifier
6545 * @extack: netlink extended ack
6547 * Adds a link to device which is upper to this one. In this case, only
6548 * one master upper device can be linked, although other non-master devices
6549 * might be linked as well. The caller must hold the RTNL lock.
6550 * On a failure a negative errno code is returned. On success the reference
6551 * counts are adjusted and the function returns zero.
6553 int netdev_master_upper_dev_link(struct net_device
*dev
,
6554 struct net_device
*upper_dev
,
6555 void *upper_priv
, void *upper_info
,
6556 struct netlink_ext_ack
*extack
)
6558 return __netdev_upper_dev_link(dev
, upper_dev
, true,
6559 upper_priv
, upper_info
, extack
);
6561 EXPORT_SYMBOL(netdev_master_upper_dev_link
);
6564 * netdev_upper_dev_unlink - Removes a link to upper device
6566 * @upper_dev: new upper device
6568 * Removes a link to device which is upper to this one. The caller must hold
6571 void netdev_upper_dev_unlink(struct net_device
*dev
,
6572 struct net_device
*upper_dev
)
6574 struct netdev_notifier_changeupper_info changeupper_info
= {
6578 .upper_dev
= upper_dev
,
6584 changeupper_info
.master
= netdev_master_upper_dev_get(dev
) == upper_dev
;
6586 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
6587 &changeupper_info
.info
);
6589 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
6591 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
6592 &changeupper_info
.info
);
6594 EXPORT_SYMBOL(netdev_upper_dev_unlink
);
6597 * netdev_bonding_info_change - Dispatch event about slave change
6599 * @bonding_info: info to dispatch
6601 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
6602 * The caller must hold the RTNL lock.
6604 void netdev_bonding_info_change(struct net_device
*dev
,
6605 struct netdev_bonding_info
*bonding_info
)
6607 struct netdev_notifier_bonding_info info
= {
6611 memcpy(&info
.bonding_info
, bonding_info
,
6612 sizeof(struct netdev_bonding_info
));
6613 call_netdevice_notifiers_info(NETDEV_BONDING_INFO
,
6616 EXPORT_SYMBOL(netdev_bonding_info_change
);
6618 static void netdev_adjacent_add_links(struct net_device
*dev
)
6620 struct netdev_adjacent
*iter
;
6622 struct net
*net
= dev_net(dev
);
6624 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
6625 if (!net_eq(net
, dev_net(iter
->dev
)))
6627 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6628 &iter
->dev
->adj_list
.lower
);
6629 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
6630 &dev
->adj_list
.upper
);
6633 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
6634 if (!net_eq(net
, dev_net(iter
->dev
)))
6636 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6637 &iter
->dev
->adj_list
.upper
);
6638 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
6639 &dev
->adj_list
.lower
);
6643 static void netdev_adjacent_del_links(struct net_device
*dev
)
6645 struct netdev_adjacent
*iter
;
6647 struct net
*net
= dev_net(dev
);
6649 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
6650 if (!net_eq(net
, dev_net(iter
->dev
)))
6652 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
6653 &iter
->dev
->adj_list
.lower
);
6654 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
6655 &dev
->adj_list
.upper
);
6658 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
6659 if (!net_eq(net
, dev_net(iter
->dev
)))
6661 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
6662 &iter
->dev
->adj_list
.upper
);
6663 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
6664 &dev
->adj_list
.lower
);
6668 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
)
6670 struct netdev_adjacent
*iter
;
6672 struct net
*net
= dev_net(dev
);
6674 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
6675 if (!net_eq(net
, dev_net(iter
->dev
)))
6677 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
6678 &iter
->dev
->adj_list
.lower
);
6679 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6680 &iter
->dev
->adj_list
.lower
);
6683 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
6684 if (!net_eq(net
, dev_net(iter
->dev
)))
6686 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
6687 &iter
->dev
->adj_list
.upper
);
6688 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6689 &iter
->dev
->adj_list
.upper
);
6693 void *netdev_lower_dev_get_private(struct net_device
*dev
,
6694 struct net_device
*lower_dev
)
6696 struct netdev_adjacent
*lower
;
6700 lower
= __netdev_find_adj(lower_dev
, &dev
->adj_list
.lower
);
6704 return lower
->private;
6706 EXPORT_SYMBOL(netdev_lower_dev_get_private
);
6709 int dev_get_nest_level(struct net_device
*dev
)
6711 struct net_device
*lower
= NULL
;
6712 struct list_head
*iter
;
6718 netdev_for_each_lower_dev(dev
, lower
, iter
) {
6719 nest
= dev_get_nest_level(lower
);
6720 if (max_nest
< nest
)
6724 return max_nest
+ 1;
6726 EXPORT_SYMBOL(dev_get_nest_level
);
6729 * netdev_lower_change - Dispatch event about lower device state change
6730 * @lower_dev: device
6731 * @lower_state_info: state to dispatch
6733 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
6734 * The caller must hold the RTNL lock.
6736 void netdev_lower_state_changed(struct net_device
*lower_dev
,
6737 void *lower_state_info
)
6739 struct netdev_notifier_changelowerstate_info changelowerstate_info
= {
6740 .info
.dev
= lower_dev
,
6744 changelowerstate_info
.lower_state_info
= lower_state_info
;
6745 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE
,
6746 &changelowerstate_info
.info
);
6748 EXPORT_SYMBOL(netdev_lower_state_changed
);
6750 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
6752 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6754 if (ops
->ndo_change_rx_flags
)
6755 ops
->ndo_change_rx_flags(dev
, flags
);
6758 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
, bool notify
)
6760 unsigned int old_flags
= dev
->flags
;
6766 dev
->flags
|= IFF_PROMISC
;
6767 dev
->promiscuity
+= inc
;
6768 if (dev
->promiscuity
== 0) {
6771 * If inc causes overflow, untouch promisc and return error.
6774 dev
->flags
&= ~IFF_PROMISC
;
6776 dev
->promiscuity
-= inc
;
6777 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6782 if (dev
->flags
!= old_flags
) {
6783 pr_info("device %s %s promiscuous mode\n",
6785 dev
->flags
& IFF_PROMISC
? "entered" : "left");
6786 if (audit_enabled
) {
6787 current_uid_gid(&uid
, &gid
);
6788 audit_log(audit_context(), GFP_ATOMIC
,
6789 AUDIT_ANOM_PROMISCUOUS
,
6790 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6791 dev
->name
, (dev
->flags
& IFF_PROMISC
),
6792 (old_flags
& IFF_PROMISC
),
6793 from_kuid(&init_user_ns
, audit_get_loginuid(current
)),
6794 from_kuid(&init_user_ns
, uid
),
6795 from_kgid(&init_user_ns
, gid
),
6796 audit_get_sessionid(current
));
6799 dev_change_rx_flags(dev
, IFF_PROMISC
);
6802 __dev_notify_flags(dev
, old_flags
, IFF_PROMISC
);
6807 * dev_set_promiscuity - update promiscuity count on a device
6811 * Add or remove promiscuity from a device. While the count in the device
6812 * remains above zero the interface remains promiscuous. Once it hits zero
6813 * the device reverts back to normal filtering operation. A negative inc
6814 * value is used to drop promiscuity on the device.
6815 * Return 0 if successful or a negative errno code on error.
6817 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
6819 unsigned int old_flags
= dev
->flags
;
6822 err
= __dev_set_promiscuity(dev
, inc
, true);
6825 if (dev
->flags
!= old_flags
)
6826 dev_set_rx_mode(dev
);
6829 EXPORT_SYMBOL(dev_set_promiscuity
);
6831 static int __dev_set_allmulti(struct net_device
*dev
, int inc
, bool notify
)
6833 unsigned int old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
6837 dev
->flags
|= IFF_ALLMULTI
;
6838 dev
->allmulti
+= inc
;
6839 if (dev
->allmulti
== 0) {
6842 * If inc causes overflow, untouch allmulti and return error.
6845 dev
->flags
&= ~IFF_ALLMULTI
;
6847 dev
->allmulti
-= inc
;
6848 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6853 if (dev
->flags
^ old_flags
) {
6854 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
6855 dev_set_rx_mode(dev
);
6857 __dev_notify_flags(dev
, old_flags
,
6858 dev
->gflags
^ old_gflags
);
6864 * dev_set_allmulti - update allmulti count on a device
6868 * Add or remove reception of all multicast frames to a device. While the
6869 * count in the device remains above zero the interface remains listening
6870 * to all interfaces. Once it hits zero the device reverts back to normal
6871 * filtering operation. A negative @inc value is used to drop the counter
6872 * when releasing a resource needing all multicasts.
6873 * Return 0 if successful or a negative errno code on error.
6876 int dev_set_allmulti(struct net_device
*dev
, int inc
)
6878 return __dev_set_allmulti(dev
, inc
, true);
6880 EXPORT_SYMBOL(dev_set_allmulti
);
6883 * Upload unicast and multicast address lists to device and
6884 * configure RX filtering. When the device doesn't support unicast
6885 * filtering it is put in promiscuous mode while unicast addresses
6888 void __dev_set_rx_mode(struct net_device
*dev
)
6890 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6892 /* dev_open will call this function so the list will stay sane. */
6893 if (!(dev
->flags
&IFF_UP
))
6896 if (!netif_device_present(dev
))
6899 if (!(dev
->priv_flags
& IFF_UNICAST_FLT
)) {
6900 /* Unicast addresses changes may only happen under the rtnl,
6901 * therefore calling __dev_set_promiscuity here is safe.
6903 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
6904 __dev_set_promiscuity(dev
, 1, false);
6905 dev
->uc_promisc
= true;
6906 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
6907 __dev_set_promiscuity(dev
, -1, false);
6908 dev
->uc_promisc
= false;
6912 if (ops
->ndo_set_rx_mode
)
6913 ops
->ndo_set_rx_mode(dev
);
6916 void dev_set_rx_mode(struct net_device
*dev
)
6918 netif_addr_lock_bh(dev
);
6919 __dev_set_rx_mode(dev
);
6920 netif_addr_unlock_bh(dev
);
6924 * dev_get_flags - get flags reported to userspace
6927 * Get the combination of flag bits exported through APIs to userspace.
6929 unsigned int dev_get_flags(const struct net_device
*dev
)
6933 flags
= (dev
->flags
& ~(IFF_PROMISC
|
6938 (dev
->gflags
& (IFF_PROMISC
|
6941 if (netif_running(dev
)) {
6942 if (netif_oper_up(dev
))
6943 flags
|= IFF_RUNNING
;
6944 if (netif_carrier_ok(dev
))
6945 flags
|= IFF_LOWER_UP
;
6946 if (netif_dormant(dev
))
6947 flags
|= IFF_DORMANT
;
6952 EXPORT_SYMBOL(dev_get_flags
);
6954 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
)
6956 unsigned int old_flags
= dev
->flags
;
6962 * Set the flags on our device.
6965 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
6966 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
6968 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
6972 * Load in the correct multicast list now the flags have changed.
6975 if ((old_flags
^ flags
) & IFF_MULTICAST
)
6976 dev_change_rx_flags(dev
, IFF_MULTICAST
);
6978 dev_set_rx_mode(dev
);
6981 * Have we downed the interface. We handle IFF_UP ourselves
6982 * according to user attempts to set it, rather than blindly
6987 if ((old_flags
^ flags
) & IFF_UP
) {
6988 if (old_flags
& IFF_UP
)
6991 ret
= __dev_open(dev
);
6994 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
6995 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
6996 unsigned int old_flags
= dev
->flags
;
6998 dev
->gflags
^= IFF_PROMISC
;
7000 if (__dev_set_promiscuity(dev
, inc
, false) >= 0)
7001 if (dev
->flags
!= old_flags
)
7002 dev_set_rx_mode(dev
);
7005 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
7006 * is important. Some (broken) drivers set IFF_PROMISC, when
7007 * IFF_ALLMULTI is requested not asking us and not reporting.
7009 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
7010 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
7012 dev
->gflags
^= IFF_ALLMULTI
;
7013 __dev_set_allmulti(dev
, inc
, false);
7019 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
,
7020 unsigned int gchanges
)
7022 unsigned int changes
= dev
->flags
^ old_flags
;
7025 rtmsg_ifinfo(RTM_NEWLINK
, dev
, gchanges
, GFP_ATOMIC
);
7027 if (changes
& IFF_UP
) {
7028 if (dev
->flags
& IFF_UP
)
7029 call_netdevice_notifiers(NETDEV_UP
, dev
);
7031 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
7034 if (dev
->flags
& IFF_UP
&&
7035 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
))) {
7036 struct netdev_notifier_change_info change_info
= {
7040 .flags_changed
= changes
,
7043 call_netdevice_notifiers_info(NETDEV_CHANGE
, &change_info
.info
);
7048 * dev_change_flags - change device settings
7050 * @flags: device state flags
7052 * Change settings on device based state flags. The flags are
7053 * in the userspace exported format.
7055 int dev_change_flags(struct net_device
*dev
, unsigned int flags
)
7058 unsigned int changes
, old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
7060 ret
= __dev_change_flags(dev
, flags
);
7064 changes
= (old_flags
^ dev
->flags
) | (old_gflags
^ dev
->gflags
);
7065 __dev_notify_flags(dev
, old_flags
, changes
);
7068 EXPORT_SYMBOL(dev_change_flags
);
7070 int __dev_set_mtu(struct net_device
*dev
, int new_mtu
)
7072 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7074 if (ops
->ndo_change_mtu
)
7075 return ops
->ndo_change_mtu(dev
, new_mtu
);
7080 EXPORT_SYMBOL(__dev_set_mtu
);
7083 * dev_set_mtu - Change maximum transfer unit
7085 * @new_mtu: new transfer unit
7087 * Change the maximum transfer size of the network device.
7089 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
7093 if (new_mtu
== dev
->mtu
)
7096 /* MTU must be positive, and in range */
7097 if (new_mtu
< 0 || new_mtu
< dev
->min_mtu
) {
7098 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
7099 dev
->name
, new_mtu
, dev
->min_mtu
);
7103 if (dev
->max_mtu
> 0 && new_mtu
> dev
->max_mtu
) {
7104 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
7105 dev
->name
, new_mtu
, dev
->max_mtu
);
7109 if (!netif_device_present(dev
))
7112 err
= call_netdevice_notifiers(NETDEV_PRECHANGEMTU
, dev
);
7113 err
= notifier_to_errno(err
);
7117 orig_mtu
= dev
->mtu
;
7118 err
= __dev_set_mtu(dev
, new_mtu
);
7121 err
= call_netdevice_notifiers(NETDEV_CHANGEMTU
, dev
);
7122 err
= notifier_to_errno(err
);
7124 /* setting mtu back and notifying everyone again,
7125 * so that they have a chance to revert changes.
7127 __dev_set_mtu(dev
, orig_mtu
);
7128 call_netdevice_notifiers(NETDEV_CHANGEMTU
, dev
);
7133 EXPORT_SYMBOL(dev_set_mtu
);
7136 * dev_change_tx_queue_len - Change TX queue length of a netdevice
7138 * @new_len: new tx queue length
7140 int dev_change_tx_queue_len(struct net_device
*dev
, unsigned long new_len
)
7142 unsigned int orig_len
= dev
->tx_queue_len
;
7145 if (new_len
!= (unsigned int)new_len
)
7148 if (new_len
!= orig_len
) {
7149 dev
->tx_queue_len
= new_len
;
7150 res
= call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN
, dev
);
7151 res
= notifier_to_errno(res
);
7154 "refused to change device tx_queue_len\n");
7155 dev
->tx_queue_len
= orig_len
;
7158 return dev_qdisc_change_tx_queue_len(dev
);
7165 * dev_set_group - Change group this device belongs to
7167 * @new_group: group this device should belong to
7169 void dev_set_group(struct net_device
*dev
, int new_group
)
7171 dev
->group
= new_group
;
7173 EXPORT_SYMBOL(dev_set_group
);
7176 * dev_set_mac_address - Change Media Access Control Address
7180 * Change the hardware (MAC) address of the device
7182 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
)
7184 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7187 if (!ops
->ndo_set_mac_address
)
7189 if (sa
->sa_family
!= dev
->type
)
7191 if (!netif_device_present(dev
))
7193 err
= ops
->ndo_set_mac_address(dev
, sa
);
7196 dev
->addr_assign_type
= NET_ADDR_SET
;
7197 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
7198 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
7201 EXPORT_SYMBOL(dev_set_mac_address
);
7204 * dev_change_carrier - Change device carrier
7206 * @new_carrier: new value
7208 * Change device carrier
7210 int dev_change_carrier(struct net_device
*dev
, bool new_carrier
)
7212 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7214 if (!ops
->ndo_change_carrier
)
7216 if (!netif_device_present(dev
))
7218 return ops
->ndo_change_carrier(dev
, new_carrier
);
7220 EXPORT_SYMBOL(dev_change_carrier
);
7223 * dev_get_phys_port_id - Get device physical port ID
7227 * Get device physical port ID
7229 int dev_get_phys_port_id(struct net_device
*dev
,
7230 struct netdev_phys_item_id
*ppid
)
7232 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7234 if (!ops
->ndo_get_phys_port_id
)
7236 return ops
->ndo_get_phys_port_id(dev
, ppid
);
7238 EXPORT_SYMBOL(dev_get_phys_port_id
);
7241 * dev_get_phys_port_name - Get device physical port name
7244 * @len: limit of bytes to copy to name
7246 * Get device physical port name
7248 int dev_get_phys_port_name(struct net_device
*dev
,
7249 char *name
, size_t len
)
7251 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7253 if (!ops
->ndo_get_phys_port_name
)
7255 return ops
->ndo_get_phys_port_name(dev
, name
, len
);
7257 EXPORT_SYMBOL(dev_get_phys_port_name
);
7260 * dev_change_proto_down - update protocol port state information
7262 * @proto_down: new value
7264 * This info can be used by switch drivers to set the phys state of the
7267 int dev_change_proto_down(struct net_device
*dev
, bool proto_down
)
7269 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7271 if (!ops
->ndo_change_proto_down
)
7273 if (!netif_device_present(dev
))
7275 return ops
->ndo_change_proto_down(dev
, proto_down
);
7277 EXPORT_SYMBOL(dev_change_proto_down
);
7279 void __dev_xdp_query(struct net_device
*dev
, bpf_op_t bpf_op
,
7280 struct netdev_bpf
*xdp
)
7282 memset(xdp
, 0, sizeof(*xdp
));
7283 xdp
->command
= XDP_QUERY_PROG
;
7285 /* Query must always succeed. */
7286 WARN_ON(bpf_op(dev
, xdp
) < 0);
7289 static u8
__dev_xdp_attached(struct net_device
*dev
, bpf_op_t bpf_op
)
7291 struct netdev_bpf xdp
;
7293 __dev_xdp_query(dev
, bpf_op
, &xdp
);
7295 return xdp
.prog_attached
;
7298 static int dev_xdp_install(struct net_device
*dev
, bpf_op_t bpf_op
,
7299 struct netlink_ext_ack
*extack
, u32 flags
,
7300 struct bpf_prog
*prog
)
7302 struct netdev_bpf xdp
;
7304 memset(&xdp
, 0, sizeof(xdp
));
7305 if (flags
& XDP_FLAGS_HW_MODE
)
7306 xdp
.command
= XDP_SETUP_PROG_HW
;
7308 xdp
.command
= XDP_SETUP_PROG
;
7309 xdp
.extack
= extack
;
7313 return bpf_op(dev
, &xdp
);
7316 static void dev_xdp_uninstall(struct net_device
*dev
)
7318 struct netdev_bpf xdp
;
7321 /* Remove generic XDP */
7322 WARN_ON(dev_xdp_install(dev
, generic_xdp_install
, NULL
, 0, NULL
));
7324 /* Remove from the driver */
7325 ndo_bpf
= dev
->netdev_ops
->ndo_bpf
;
7329 __dev_xdp_query(dev
, ndo_bpf
, &xdp
);
7330 if (xdp
.prog_attached
== XDP_ATTACHED_NONE
)
7333 /* Program removal should always succeed */
7334 WARN_ON(dev_xdp_install(dev
, ndo_bpf
, NULL
, xdp
.prog_flags
, NULL
));
7338 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
7340 * @extack: netlink extended ack
7341 * @fd: new program fd or negative value to clear
7342 * @flags: xdp-related flags
7344 * Set or clear a bpf program for a device
7346 int dev_change_xdp_fd(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
7349 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7350 struct bpf_prog
*prog
= NULL
;
7351 bpf_op_t bpf_op
, bpf_chk
;
7356 bpf_op
= bpf_chk
= ops
->ndo_bpf
;
7357 if (!bpf_op
&& (flags
& (XDP_FLAGS_DRV_MODE
| XDP_FLAGS_HW_MODE
)))
7359 if (!bpf_op
|| (flags
& XDP_FLAGS_SKB_MODE
))
7360 bpf_op
= generic_xdp_install
;
7361 if (bpf_op
== bpf_chk
)
7362 bpf_chk
= generic_xdp_install
;
7365 if (bpf_chk
&& __dev_xdp_attached(dev
, bpf_chk
))
7367 if ((flags
& XDP_FLAGS_UPDATE_IF_NOEXIST
) &&
7368 __dev_xdp_attached(dev
, bpf_op
))
7371 prog
= bpf_prog_get_type_dev(fd
, BPF_PROG_TYPE_XDP
,
7372 bpf_op
== ops
->ndo_bpf
);
7374 return PTR_ERR(prog
);
7376 if (!(flags
& XDP_FLAGS_HW_MODE
) &&
7377 bpf_prog_is_dev_bound(prog
->aux
)) {
7378 NL_SET_ERR_MSG(extack
, "using device-bound program without HW_MODE flag is not supported");
7384 err
= dev_xdp_install(dev
, bpf_op
, extack
, flags
, prog
);
7385 if (err
< 0 && prog
)
7392 * dev_new_index - allocate an ifindex
7393 * @net: the applicable net namespace
7395 * Returns a suitable unique value for a new device interface
7396 * number. The caller must hold the rtnl semaphore or the
7397 * dev_base_lock to be sure it remains unique.
7399 static int dev_new_index(struct net
*net
)
7401 int ifindex
= net
->ifindex
;
7406 if (!__dev_get_by_index(net
, ifindex
))
7407 return net
->ifindex
= ifindex
;
7411 /* Delayed registration/unregisteration */
7412 static LIST_HEAD(net_todo_list
);
7413 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq
);
7415 static void net_set_todo(struct net_device
*dev
)
7417 list_add_tail(&dev
->todo_list
, &net_todo_list
);
7418 dev_net(dev
)->dev_unreg_count
++;
7421 static void rollback_registered_many(struct list_head
*head
)
7423 struct net_device
*dev
, *tmp
;
7424 LIST_HEAD(close_head
);
7426 BUG_ON(dev_boot_phase
);
7429 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
7430 /* Some devices call without registering
7431 * for initialization unwind. Remove those
7432 * devices and proceed with the remaining.
7434 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
7435 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
7439 list_del(&dev
->unreg_list
);
7442 dev
->dismantle
= true;
7443 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
7446 /* If device is running, close it first. */
7447 list_for_each_entry(dev
, head
, unreg_list
)
7448 list_add_tail(&dev
->close_list
, &close_head
);
7449 dev_close_many(&close_head
, true);
7451 list_for_each_entry(dev
, head
, unreg_list
) {
7452 /* And unlink it from device chain. */
7453 unlist_netdevice(dev
);
7455 dev
->reg_state
= NETREG_UNREGISTERING
;
7457 flush_all_backlogs();
7461 list_for_each_entry(dev
, head
, unreg_list
) {
7462 struct sk_buff
*skb
= NULL
;
7464 /* Shutdown queueing discipline. */
7467 dev_xdp_uninstall(dev
);
7469 /* Notify protocols, that we are about to destroy
7470 * this device. They should clean all the things.
7472 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
7474 if (!dev
->rtnl_link_ops
||
7475 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
7476 skb
= rtmsg_ifinfo_build_skb(RTM_DELLINK
, dev
, ~0U, 0,
7477 GFP_KERNEL
, NULL
, 0);
7480 * Flush the unicast and multicast chains
7485 if (dev
->netdev_ops
->ndo_uninit
)
7486 dev
->netdev_ops
->ndo_uninit(dev
);
7489 rtmsg_ifinfo_send(skb
, dev
, GFP_KERNEL
);
7491 /* Notifier chain MUST detach us all upper devices. */
7492 WARN_ON(netdev_has_any_upper_dev(dev
));
7493 WARN_ON(netdev_has_any_lower_dev(dev
));
7495 /* Remove entries from kobject tree */
7496 netdev_unregister_kobject(dev
);
7498 /* Remove XPS queueing entries */
7499 netif_reset_xps_queues_gt(dev
, 0);
7505 list_for_each_entry(dev
, head
, unreg_list
)
7509 static void rollback_registered(struct net_device
*dev
)
7513 list_add(&dev
->unreg_list
, &single
);
7514 rollback_registered_many(&single
);
7518 static netdev_features_t
netdev_sync_upper_features(struct net_device
*lower
,
7519 struct net_device
*upper
, netdev_features_t features
)
7521 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
7522 netdev_features_t feature
;
7525 for_each_netdev_feature(&upper_disables
, feature_bit
) {
7526 feature
= __NETIF_F_BIT(feature_bit
);
7527 if (!(upper
->wanted_features
& feature
)
7528 && (features
& feature
)) {
7529 netdev_dbg(lower
, "Dropping feature %pNF, upper dev %s has it off.\n",
7530 &feature
, upper
->name
);
7531 features
&= ~feature
;
7538 static void netdev_sync_lower_features(struct net_device
*upper
,
7539 struct net_device
*lower
, netdev_features_t features
)
7541 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
7542 netdev_features_t feature
;
7545 for_each_netdev_feature(&upper_disables
, feature_bit
) {
7546 feature
= __NETIF_F_BIT(feature_bit
);
7547 if (!(features
& feature
) && (lower
->features
& feature
)) {
7548 netdev_dbg(upper
, "Disabling feature %pNF on lower dev %s.\n",
7549 &feature
, lower
->name
);
7550 lower
->wanted_features
&= ~feature
;
7551 netdev_update_features(lower
);
7553 if (unlikely(lower
->features
& feature
))
7554 netdev_WARN(upper
, "failed to disable %pNF on %s!\n",
7555 &feature
, lower
->name
);
7560 static netdev_features_t
netdev_fix_features(struct net_device
*dev
,
7561 netdev_features_t features
)
7563 /* Fix illegal checksum combinations */
7564 if ((features
& NETIF_F_HW_CSUM
) &&
7565 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
7566 netdev_warn(dev
, "mixed HW and IP checksum settings.\n");
7567 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
7570 /* TSO requires that SG is present as well. */
7571 if ((features
& NETIF_F_ALL_TSO
) && !(features
& NETIF_F_SG
)) {
7572 netdev_dbg(dev
, "Dropping TSO features since no SG feature.\n");
7573 features
&= ~NETIF_F_ALL_TSO
;
7576 if ((features
& NETIF_F_TSO
) && !(features
& NETIF_F_HW_CSUM
) &&
7577 !(features
& NETIF_F_IP_CSUM
)) {
7578 netdev_dbg(dev
, "Dropping TSO features since no CSUM feature.\n");
7579 features
&= ~NETIF_F_TSO
;
7580 features
&= ~NETIF_F_TSO_ECN
;
7583 if ((features
& NETIF_F_TSO6
) && !(features
& NETIF_F_HW_CSUM
) &&
7584 !(features
& NETIF_F_IPV6_CSUM
)) {
7585 netdev_dbg(dev
, "Dropping TSO6 features since no CSUM feature.\n");
7586 features
&= ~NETIF_F_TSO6
;
7589 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
7590 if ((features
& NETIF_F_TSO_MANGLEID
) && !(features
& NETIF_F_TSO
))
7591 features
&= ~NETIF_F_TSO_MANGLEID
;
7593 /* TSO ECN requires that TSO is present as well. */
7594 if ((features
& NETIF_F_ALL_TSO
) == NETIF_F_TSO_ECN
)
7595 features
&= ~NETIF_F_TSO_ECN
;
7597 /* Software GSO depends on SG. */
7598 if ((features
& NETIF_F_GSO
) && !(features
& NETIF_F_SG
)) {
7599 netdev_dbg(dev
, "Dropping NETIF_F_GSO since no SG feature.\n");
7600 features
&= ~NETIF_F_GSO
;
7603 /* GSO partial features require GSO partial be set */
7604 if ((features
& dev
->gso_partial_features
) &&
7605 !(features
& NETIF_F_GSO_PARTIAL
)) {
7607 "Dropping partially supported GSO features since no GSO partial.\n");
7608 features
&= ~dev
->gso_partial_features
;
7611 if (!(features
& NETIF_F_RXCSUM
)) {
7612 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
7613 * successfully merged by hardware must also have the
7614 * checksum verified by hardware. If the user does not
7615 * want to enable RXCSUM, logically, we should disable GRO_HW.
7617 if (features
& NETIF_F_GRO_HW
) {
7618 netdev_dbg(dev
, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
7619 features
&= ~NETIF_F_GRO_HW
;
7623 /* LRO/HW-GRO features cannot be combined with RX-FCS */
7624 if (features
& NETIF_F_RXFCS
) {
7625 if (features
& NETIF_F_LRO
) {
7626 netdev_dbg(dev
, "Dropping LRO feature since RX-FCS is requested.\n");
7627 features
&= ~NETIF_F_LRO
;
7630 if (features
& NETIF_F_GRO_HW
) {
7631 netdev_dbg(dev
, "Dropping HW-GRO feature since RX-FCS is requested.\n");
7632 features
&= ~NETIF_F_GRO_HW
;
7639 int __netdev_update_features(struct net_device
*dev
)
7641 struct net_device
*upper
, *lower
;
7642 netdev_features_t features
;
7643 struct list_head
*iter
;
7648 features
= netdev_get_wanted_features(dev
);
7650 if (dev
->netdev_ops
->ndo_fix_features
)
7651 features
= dev
->netdev_ops
->ndo_fix_features(dev
, features
);
7653 /* driver might be less strict about feature dependencies */
7654 features
= netdev_fix_features(dev
, features
);
7656 /* some features can't be enabled if they're off an an upper device */
7657 netdev_for_each_upper_dev_rcu(dev
, upper
, iter
)
7658 features
= netdev_sync_upper_features(dev
, upper
, features
);
7660 if (dev
->features
== features
)
7663 netdev_dbg(dev
, "Features changed: %pNF -> %pNF\n",
7664 &dev
->features
, &features
);
7666 if (dev
->netdev_ops
->ndo_set_features
)
7667 err
= dev
->netdev_ops
->ndo_set_features(dev
, features
);
7671 if (unlikely(err
< 0)) {
7673 "set_features() failed (%d); wanted %pNF, left %pNF\n",
7674 err
, &features
, &dev
->features
);
7675 /* return non-0 since some features might have changed and
7676 * it's better to fire a spurious notification than miss it
7682 /* some features must be disabled on lower devices when disabled
7683 * on an upper device (think: bonding master or bridge)
7685 netdev_for_each_lower_dev(dev
, lower
, iter
)
7686 netdev_sync_lower_features(dev
, lower
, features
);
7689 netdev_features_t diff
= features
^ dev
->features
;
7691 if (diff
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
7692 /* udp_tunnel_{get,drop}_rx_info both need
7693 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
7694 * device, or they won't do anything.
7695 * Thus we need to update dev->features
7696 * *before* calling udp_tunnel_get_rx_info,
7697 * but *after* calling udp_tunnel_drop_rx_info.
7699 if (features
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
7700 dev
->features
= features
;
7701 udp_tunnel_get_rx_info(dev
);
7703 udp_tunnel_drop_rx_info(dev
);
7707 if (diff
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
7708 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
7709 dev
->features
= features
;
7710 err
|= vlan_get_rx_ctag_filter_info(dev
);
7712 vlan_drop_rx_ctag_filter_info(dev
);
7716 if (diff
& NETIF_F_HW_VLAN_STAG_FILTER
) {
7717 if (features
& NETIF_F_HW_VLAN_STAG_FILTER
) {
7718 dev
->features
= features
;
7719 err
|= vlan_get_rx_stag_filter_info(dev
);
7721 vlan_drop_rx_stag_filter_info(dev
);
7725 dev
->features
= features
;
7728 return err
< 0 ? 0 : 1;
7732 * netdev_update_features - recalculate device features
7733 * @dev: the device to check
7735 * Recalculate dev->features set and send notifications if it
7736 * has changed. Should be called after driver or hardware dependent
7737 * conditions might have changed that influence the features.
7739 void netdev_update_features(struct net_device
*dev
)
7741 if (__netdev_update_features(dev
))
7742 netdev_features_change(dev
);
7744 EXPORT_SYMBOL(netdev_update_features
);
7747 * netdev_change_features - recalculate device features
7748 * @dev: the device to check
7750 * Recalculate dev->features set and send notifications even
7751 * if they have not changed. Should be called instead of
7752 * netdev_update_features() if also dev->vlan_features might
7753 * have changed to allow the changes to be propagated to stacked
7756 void netdev_change_features(struct net_device
*dev
)
7758 __netdev_update_features(dev
);
7759 netdev_features_change(dev
);
7761 EXPORT_SYMBOL(netdev_change_features
);
7764 * netif_stacked_transfer_operstate - transfer operstate
7765 * @rootdev: the root or lower level device to transfer state from
7766 * @dev: the device to transfer operstate to
7768 * Transfer operational state from root to device. This is normally
7769 * called when a stacking relationship exists between the root
7770 * device and the device(a leaf device).
7772 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
7773 struct net_device
*dev
)
7775 if (rootdev
->operstate
== IF_OPER_DORMANT
)
7776 netif_dormant_on(dev
);
7778 netif_dormant_off(dev
);
7780 if (netif_carrier_ok(rootdev
))
7781 netif_carrier_on(dev
);
7783 netif_carrier_off(dev
);
7785 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
7787 static int netif_alloc_rx_queues(struct net_device
*dev
)
7789 unsigned int i
, count
= dev
->num_rx_queues
;
7790 struct netdev_rx_queue
*rx
;
7791 size_t sz
= count
* sizeof(*rx
);
7796 rx
= kvzalloc(sz
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
7802 for (i
= 0; i
< count
; i
++) {
7805 /* XDP RX-queue setup */
7806 err
= xdp_rxq_info_reg(&rx
[i
].xdp_rxq
, dev
, i
);
7813 /* Rollback successful reg's and free other resources */
7815 xdp_rxq_info_unreg(&rx
[i
].xdp_rxq
);
7821 static void netif_free_rx_queues(struct net_device
*dev
)
7823 unsigned int i
, count
= dev
->num_rx_queues
;
7825 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
7829 for (i
= 0; i
< count
; i
++)
7830 xdp_rxq_info_unreg(&dev
->_rx
[i
].xdp_rxq
);
7835 static void netdev_init_one_queue(struct net_device
*dev
,
7836 struct netdev_queue
*queue
, void *_unused
)
7838 /* Initialize queue lock */
7839 spin_lock_init(&queue
->_xmit_lock
);
7840 netdev_set_xmit_lockdep_class(&queue
->_xmit_lock
, dev
->type
);
7841 queue
->xmit_lock_owner
= -1;
7842 netdev_queue_numa_node_write(queue
, NUMA_NO_NODE
);
7845 dql_init(&queue
->dql
, HZ
);
7849 static void netif_free_tx_queues(struct net_device
*dev
)
7854 static int netif_alloc_netdev_queues(struct net_device
*dev
)
7856 unsigned int count
= dev
->num_tx_queues
;
7857 struct netdev_queue
*tx
;
7858 size_t sz
= count
* sizeof(*tx
);
7860 if (count
< 1 || count
> 0xffff)
7863 tx
= kvzalloc(sz
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
7869 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
7870 spin_lock_init(&dev
->tx_global_lock
);
7875 void netif_tx_stop_all_queues(struct net_device
*dev
)
7879 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
7880 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
7882 netif_tx_stop_queue(txq
);
7885 EXPORT_SYMBOL(netif_tx_stop_all_queues
);
7888 * register_netdevice - register a network device
7889 * @dev: device to register
7891 * Take a completed network device structure and add it to the kernel
7892 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7893 * chain. 0 is returned on success. A negative errno code is returned
7894 * on a failure to set up the device, or if the name is a duplicate.
7896 * Callers must hold the rtnl semaphore. You may want
7897 * register_netdev() instead of this.
7900 * The locking appears insufficient to guarantee two parallel registers
7901 * will not get the same name.
7904 int register_netdevice(struct net_device
*dev
)
7907 struct net
*net
= dev_net(dev
);
7909 BUILD_BUG_ON(sizeof(netdev_features_t
) * BITS_PER_BYTE
<
7910 NETDEV_FEATURE_COUNT
);
7911 BUG_ON(dev_boot_phase
);
7916 /* When net_device's are persistent, this will be fatal. */
7917 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
7920 spin_lock_init(&dev
->addr_list_lock
);
7921 netdev_set_addr_lockdep_class(dev
);
7923 ret
= dev_get_valid_name(net
, dev
, dev
->name
);
7927 /* Init, if this function is available */
7928 if (dev
->netdev_ops
->ndo_init
) {
7929 ret
= dev
->netdev_ops
->ndo_init(dev
);
7937 if (((dev
->hw_features
| dev
->features
) &
7938 NETIF_F_HW_VLAN_CTAG_FILTER
) &&
7939 (!dev
->netdev_ops
->ndo_vlan_rx_add_vid
||
7940 !dev
->netdev_ops
->ndo_vlan_rx_kill_vid
)) {
7941 netdev_WARN(dev
, "Buggy VLAN acceleration in driver!\n");
7948 dev
->ifindex
= dev_new_index(net
);
7949 else if (__dev_get_by_index(net
, dev
->ifindex
))
7952 /* Transfer changeable features to wanted_features and enable
7953 * software offloads (GSO and GRO).
7955 dev
->hw_features
|= NETIF_F_SOFT_FEATURES
;
7956 dev
->features
|= NETIF_F_SOFT_FEATURES
;
7958 if (dev
->netdev_ops
->ndo_udp_tunnel_add
) {
7959 dev
->features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
7960 dev
->hw_features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
7963 dev
->wanted_features
= dev
->features
& dev
->hw_features
;
7965 if (!(dev
->flags
& IFF_LOOPBACK
))
7966 dev
->hw_features
|= NETIF_F_NOCACHE_COPY
;
7968 /* If IPv4 TCP segmentation offload is supported we should also
7969 * allow the device to enable segmenting the frame with the option
7970 * of ignoring a static IP ID value. This doesn't enable the
7971 * feature itself but allows the user to enable it later.
7973 if (dev
->hw_features
& NETIF_F_TSO
)
7974 dev
->hw_features
|= NETIF_F_TSO_MANGLEID
;
7975 if (dev
->vlan_features
& NETIF_F_TSO
)
7976 dev
->vlan_features
|= NETIF_F_TSO_MANGLEID
;
7977 if (dev
->mpls_features
& NETIF_F_TSO
)
7978 dev
->mpls_features
|= NETIF_F_TSO_MANGLEID
;
7979 if (dev
->hw_enc_features
& NETIF_F_TSO
)
7980 dev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
7982 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
7984 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
7986 /* Make NETIF_F_SG inheritable to tunnel devices.
7988 dev
->hw_enc_features
|= NETIF_F_SG
| NETIF_F_GSO_PARTIAL
;
7990 /* Make NETIF_F_SG inheritable to MPLS.
7992 dev
->mpls_features
|= NETIF_F_SG
;
7994 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
7995 ret
= notifier_to_errno(ret
);
7999 ret
= netdev_register_kobject(dev
);
8002 dev
->reg_state
= NETREG_REGISTERED
;
8004 __netdev_update_features(dev
);
8007 * Default initial state at registry is that the
8008 * device is present.
8011 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
8013 linkwatch_init_dev(dev
);
8015 dev_init_scheduler(dev
);
8017 list_netdevice(dev
);
8018 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
8020 /* If the device has permanent device address, driver should
8021 * set dev_addr and also addr_assign_type should be set to
8022 * NET_ADDR_PERM (default value).
8024 if (dev
->addr_assign_type
== NET_ADDR_PERM
)
8025 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
8027 /* Notify protocols, that a new device appeared. */
8028 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
8029 ret
= notifier_to_errno(ret
);
8031 rollback_registered(dev
);
8032 dev
->reg_state
= NETREG_UNREGISTERED
;
8035 * Prevent userspace races by waiting until the network
8036 * device is fully setup before sending notifications.
8038 if (!dev
->rtnl_link_ops
||
8039 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
8040 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
8046 if (dev
->netdev_ops
->ndo_uninit
)
8047 dev
->netdev_ops
->ndo_uninit(dev
);
8048 if (dev
->priv_destructor
)
8049 dev
->priv_destructor(dev
);
8052 EXPORT_SYMBOL(register_netdevice
);
8055 * init_dummy_netdev - init a dummy network device for NAPI
8056 * @dev: device to init
8058 * This takes a network device structure and initialize the minimum
8059 * amount of fields so it can be used to schedule NAPI polls without
8060 * registering a full blown interface. This is to be used by drivers
8061 * that need to tie several hardware interfaces to a single NAPI
8062 * poll scheduler due to HW limitations.
8064 int init_dummy_netdev(struct net_device
*dev
)
8066 /* Clear everything. Note we don't initialize spinlocks
8067 * are they aren't supposed to be taken by any of the
8068 * NAPI code and this dummy netdev is supposed to be
8069 * only ever used for NAPI polls
8071 memset(dev
, 0, sizeof(struct net_device
));
8073 /* make sure we BUG if trying to hit standard
8074 * register/unregister code path
8076 dev
->reg_state
= NETREG_DUMMY
;
8078 /* NAPI wants this */
8079 INIT_LIST_HEAD(&dev
->napi_list
);
8081 /* a dummy interface is started by default */
8082 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
8083 set_bit(__LINK_STATE_START
, &dev
->state
);
8085 /* Note : We dont allocate pcpu_refcnt for dummy devices,
8086 * because users of this 'device' dont need to change
8092 EXPORT_SYMBOL_GPL(init_dummy_netdev
);
8096 * register_netdev - register a network device
8097 * @dev: device to register
8099 * Take a completed network device structure and add it to the kernel
8100 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
8101 * chain. 0 is returned on success. A negative errno code is returned
8102 * on a failure to set up the device, or if the name is a duplicate.
8104 * This is a wrapper around register_netdevice that takes the rtnl semaphore
8105 * and expands the device name if you passed a format string to
8108 int register_netdev(struct net_device
*dev
)
8112 if (rtnl_lock_killable())
8114 err
= register_netdevice(dev
);
8118 EXPORT_SYMBOL(register_netdev
);
8120 int netdev_refcnt_read(const struct net_device
*dev
)
8124 for_each_possible_cpu(i
)
8125 refcnt
+= *per_cpu_ptr(dev
->pcpu_refcnt
, i
);
8128 EXPORT_SYMBOL(netdev_refcnt_read
);
8131 * netdev_wait_allrefs - wait until all references are gone.
8132 * @dev: target net_device
8134 * This is called when unregistering network devices.
8136 * Any protocol or device that holds a reference should register
8137 * for netdevice notification, and cleanup and put back the
8138 * reference if they receive an UNREGISTER event.
8139 * We can get stuck here if buggy protocols don't correctly
8142 static void netdev_wait_allrefs(struct net_device
*dev
)
8144 unsigned long rebroadcast_time
, warning_time
;
8147 linkwatch_forget_dev(dev
);
8149 rebroadcast_time
= warning_time
= jiffies
;
8150 refcnt
= netdev_refcnt_read(dev
);
8152 while (refcnt
!= 0) {
8153 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
8156 /* Rebroadcast unregister notification */
8157 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
8163 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
8165 /* We must not have linkwatch events
8166 * pending on unregister. If this
8167 * happens, we simply run the queue
8168 * unscheduled, resulting in a noop
8171 linkwatch_run_queue();
8176 rebroadcast_time
= jiffies
;
8181 refcnt
= netdev_refcnt_read(dev
);
8183 if (time_after(jiffies
, warning_time
+ 10 * HZ
)) {
8184 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
8186 warning_time
= jiffies
;
8195 * register_netdevice(x1);
8196 * register_netdevice(x2);
8198 * unregister_netdevice(y1);
8199 * unregister_netdevice(y2);
8205 * We are invoked by rtnl_unlock().
8206 * This allows us to deal with problems:
8207 * 1) We can delete sysfs objects which invoke hotplug
8208 * without deadlocking with linkwatch via keventd.
8209 * 2) Since we run with the RTNL semaphore not held, we can sleep
8210 * safely in order to wait for the netdev refcnt to drop to zero.
8212 * We must not return until all unregister events added during
8213 * the interval the lock was held have been completed.
8215 void netdev_run_todo(void)
8217 struct list_head list
;
8219 /* Snapshot list, allow later requests */
8220 list_replace_init(&net_todo_list
, &list
);
8225 /* Wait for rcu callbacks to finish before next phase */
8226 if (!list_empty(&list
))
8229 while (!list_empty(&list
)) {
8230 struct net_device
*dev
8231 = list_first_entry(&list
, struct net_device
, todo_list
);
8232 list_del(&dev
->todo_list
);
8234 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
8235 pr_err("network todo '%s' but state %d\n",
8236 dev
->name
, dev
->reg_state
);
8241 dev
->reg_state
= NETREG_UNREGISTERED
;
8243 netdev_wait_allrefs(dev
);
8246 BUG_ON(netdev_refcnt_read(dev
));
8247 BUG_ON(!list_empty(&dev
->ptype_all
));
8248 BUG_ON(!list_empty(&dev
->ptype_specific
));
8249 WARN_ON(rcu_access_pointer(dev
->ip_ptr
));
8250 WARN_ON(rcu_access_pointer(dev
->ip6_ptr
));
8251 #if IS_ENABLED(CONFIG_DECNET)
8252 WARN_ON(dev
->dn_ptr
);
8254 if (dev
->priv_destructor
)
8255 dev
->priv_destructor(dev
);
8256 if (dev
->needs_free_netdev
)
8259 /* Report a network device has been unregistered */
8261 dev_net(dev
)->dev_unreg_count
--;
8263 wake_up(&netdev_unregistering_wq
);
8265 /* Free network device */
8266 kobject_put(&dev
->dev
.kobj
);
8270 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
8271 * all the same fields in the same order as net_device_stats, with only
8272 * the type differing, but rtnl_link_stats64 may have additional fields
8273 * at the end for newer counters.
8275 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
8276 const struct net_device_stats
*netdev_stats
)
8278 #if BITS_PER_LONG == 64
8279 BUILD_BUG_ON(sizeof(*stats64
) < sizeof(*netdev_stats
));
8280 memcpy(stats64
, netdev_stats
, sizeof(*netdev_stats
));
8281 /* zero out counters that only exist in rtnl_link_stats64 */
8282 memset((char *)stats64
+ sizeof(*netdev_stats
), 0,
8283 sizeof(*stats64
) - sizeof(*netdev_stats
));
8285 size_t i
, n
= sizeof(*netdev_stats
) / sizeof(unsigned long);
8286 const unsigned long *src
= (const unsigned long *)netdev_stats
;
8287 u64
*dst
= (u64
*)stats64
;
8289 BUILD_BUG_ON(n
> sizeof(*stats64
) / sizeof(u64
));
8290 for (i
= 0; i
< n
; i
++)
8292 /* zero out counters that only exist in rtnl_link_stats64 */
8293 memset((char *)stats64
+ n
* sizeof(u64
), 0,
8294 sizeof(*stats64
) - n
* sizeof(u64
));
8297 EXPORT_SYMBOL(netdev_stats_to_stats64
);
8300 * dev_get_stats - get network device statistics
8301 * @dev: device to get statistics from
8302 * @storage: place to store stats
8304 * Get network statistics from device. Return @storage.
8305 * The device driver may provide its own method by setting
8306 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
8307 * otherwise the internal statistics structure is used.
8309 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
8310 struct rtnl_link_stats64
*storage
)
8312 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8314 if (ops
->ndo_get_stats64
) {
8315 memset(storage
, 0, sizeof(*storage
));
8316 ops
->ndo_get_stats64(dev
, storage
);
8317 } else if (ops
->ndo_get_stats
) {
8318 netdev_stats_to_stats64(storage
, ops
->ndo_get_stats(dev
));
8320 netdev_stats_to_stats64(storage
, &dev
->stats
);
8322 storage
->rx_dropped
+= (unsigned long)atomic_long_read(&dev
->rx_dropped
);
8323 storage
->tx_dropped
+= (unsigned long)atomic_long_read(&dev
->tx_dropped
);
8324 storage
->rx_nohandler
+= (unsigned long)atomic_long_read(&dev
->rx_nohandler
);
8327 EXPORT_SYMBOL(dev_get_stats
);
8329 struct netdev_queue
*dev_ingress_queue_create(struct net_device
*dev
)
8331 struct netdev_queue
*queue
= dev_ingress_queue(dev
);
8333 #ifdef CONFIG_NET_CLS_ACT
8336 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
8339 netdev_init_one_queue(dev
, queue
, NULL
);
8340 RCU_INIT_POINTER(queue
->qdisc
, &noop_qdisc
);
8341 queue
->qdisc_sleeping
= &noop_qdisc
;
8342 rcu_assign_pointer(dev
->ingress_queue
, queue
);
8347 static const struct ethtool_ops default_ethtool_ops
;
8349 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
8350 const struct ethtool_ops
*ops
)
8352 if (dev
->ethtool_ops
== &default_ethtool_ops
)
8353 dev
->ethtool_ops
= ops
;
8355 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops
);
8357 void netdev_freemem(struct net_device
*dev
)
8359 char *addr
= (char *)dev
- dev
->padded
;
8365 * alloc_netdev_mqs - allocate network device
8366 * @sizeof_priv: size of private data to allocate space for
8367 * @name: device name format string
8368 * @name_assign_type: origin of device name
8369 * @setup: callback to initialize device
8370 * @txqs: the number of TX subqueues to allocate
8371 * @rxqs: the number of RX subqueues to allocate
8373 * Allocates a struct net_device with private data area for driver use
8374 * and performs basic initialization. Also allocates subqueue structs
8375 * for each queue on the device.
8377 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
8378 unsigned char name_assign_type
,
8379 void (*setup
)(struct net_device
*),
8380 unsigned int txqs
, unsigned int rxqs
)
8382 struct net_device
*dev
;
8383 unsigned int alloc_size
;
8384 struct net_device
*p
;
8386 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
8389 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
8394 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
8398 alloc_size
= sizeof(struct net_device
);
8400 /* ensure 32-byte alignment of private area */
8401 alloc_size
= ALIGN(alloc_size
, NETDEV_ALIGN
);
8402 alloc_size
+= sizeof_priv
;
8404 /* ensure 32-byte alignment of whole construct */
8405 alloc_size
+= NETDEV_ALIGN
- 1;
8407 p
= kvzalloc(alloc_size
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
8411 dev
= PTR_ALIGN(p
, NETDEV_ALIGN
);
8412 dev
->padded
= (char *)dev
- (char *)p
;
8414 dev
->pcpu_refcnt
= alloc_percpu(int);
8415 if (!dev
->pcpu_refcnt
)
8418 if (dev_addr_init(dev
))
8424 dev_net_set(dev
, &init_net
);
8426 dev
->gso_max_size
= GSO_MAX_SIZE
;
8427 dev
->gso_max_segs
= GSO_MAX_SEGS
;
8429 INIT_LIST_HEAD(&dev
->napi_list
);
8430 INIT_LIST_HEAD(&dev
->unreg_list
);
8431 INIT_LIST_HEAD(&dev
->close_list
);
8432 INIT_LIST_HEAD(&dev
->link_watch_list
);
8433 INIT_LIST_HEAD(&dev
->adj_list
.upper
);
8434 INIT_LIST_HEAD(&dev
->adj_list
.lower
);
8435 INIT_LIST_HEAD(&dev
->ptype_all
);
8436 INIT_LIST_HEAD(&dev
->ptype_specific
);
8437 #ifdef CONFIG_NET_SCHED
8438 hash_init(dev
->qdisc_hash
);
8440 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
;
8443 if (!dev
->tx_queue_len
) {
8444 dev
->priv_flags
|= IFF_NO_QUEUE
;
8445 dev
->tx_queue_len
= DEFAULT_TX_QUEUE_LEN
;
8448 dev
->num_tx_queues
= txqs
;
8449 dev
->real_num_tx_queues
= txqs
;
8450 if (netif_alloc_netdev_queues(dev
))
8453 dev
->num_rx_queues
= rxqs
;
8454 dev
->real_num_rx_queues
= rxqs
;
8455 if (netif_alloc_rx_queues(dev
))
8458 strcpy(dev
->name
, name
);
8459 dev
->name_assign_type
= name_assign_type
;
8460 dev
->group
= INIT_NETDEV_GROUP
;
8461 if (!dev
->ethtool_ops
)
8462 dev
->ethtool_ops
= &default_ethtool_ops
;
8464 nf_hook_ingress_init(dev
);
8473 free_percpu(dev
->pcpu_refcnt
);
8475 netdev_freemem(dev
);
8478 EXPORT_SYMBOL(alloc_netdev_mqs
);
8481 * free_netdev - free network device
8484 * This function does the last stage of destroying an allocated device
8485 * interface. The reference to the device object is released. If this
8486 * is the last reference then it will be freed.Must be called in process
8489 void free_netdev(struct net_device
*dev
)
8491 struct napi_struct
*p
, *n
;
8494 netif_free_tx_queues(dev
);
8495 netif_free_rx_queues(dev
);
8497 kfree(rcu_dereference_protected(dev
->ingress_queue
, 1));
8499 /* Flush device addresses */
8500 dev_addr_flush(dev
);
8502 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
8505 free_percpu(dev
->pcpu_refcnt
);
8506 dev
->pcpu_refcnt
= NULL
;
8508 /* Compatibility with error handling in drivers */
8509 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
8510 netdev_freemem(dev
);
8514 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
8515 dev
->reg_state
= NETREG_RELEASED
;
8517 /* will free via device release */
8518 put_device(&dev
->dev
);
8520 EXPORT_SYMBOL(free_netdev
);
8523 * synchronize_net - Synchronize with packet receive processing
8525 * Wait for packets currently being received to be done.
8526 * Does not block later packets from starting.
8528 void synchronize_net(void)
8531 if (rtnl_is_locked())
8532 synchronize_rcu_expedited();
8536 EXPORT_SYMBOL(synchronize_net
);
8539 * unregister_netdevice_queue - remove device from the kernel
8543 * This function shuts down a device interface and removes it
8544 * from the kernel tables.
8545 * If head not NULL, device is queued to be unregistered later.
8547 * Callers must hold the rtnl semaphore. You may want
8548 * unregister_netdev() instead of this.
8551 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
8556 list_move_tail(&dev
->unreg_list
, head
);
8558 rollback_registered(dev
);
8559 /* Finish processing unregister after unlock */
8563 EXPORT_SYMBOL(unregister_netdevice_queue
);
8566 * unregister_netdevice_many - unregister many devices
8567 * @head: list of devices
8569 * Note: As most callers use a stack allocated list_head,
8570 * we force a list_del() to make sure stack wont be corrupted later.
8572 void unregister_netdevice_many(struct list_head
*head
)
8574 struct net_device
*dev
;
8576 if (!list_empty(head
)) {
8577 rollback_registered_many(head
);
8578 list_for_each_entry(dev
, head
, unreg_list
)
8583 EXPORT_SYMBOL(unregister_netdevice_many
);
8586 * unregister_netdev - remove device from the kernel
8589 * This function shuts down a device interface and removes it
8590 * from the kernel tables.
8592 * This is just a wrapper for unregister_netdevice that takes
8593 * the rtnl semaphore. In general you want to use this and not
8594 * unregister_netdevice.
8596 void unregister_netdev(struct net_device
*dev
)
8599 unregister_netdevice(dev
);
8602 EXPORT_SYMBOL(unregister_netdev
);
8605 * dev_change_net_namespace - move device to different nethost namespace
8607 * @net: network namespace
8608 * @pat: If not NULL name pattern to try if the current device name
8609 * is already taken in the destination network namespace.
8611 * This function shuts down a device interface and moves it
8612 * to a new network namespace. On success 0 is returned, on
8613 * a failure a netagive errno code is returned.
8615 * Callers must hold the rtnl semaphore.
8618 int dev_change_net_namespace(struct net_device
*dev
, struct net
*net
, const char *pat
)
8620 int err
, new_nsid
, new_ifindex
;
8624 /* Don't allow namespace local devices to be moved. */
8626 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
8629 /* Ensure the device has been registrered */
8630 if (dev
->reg_state
!= NETREG_REGISTERED
)
8633 /* Get out if there is nothing todo */
8635 if (net_eq(dev_net(dev
), net
))
8638 /* Pick the destination device name, and ensure
8639 * we can use it in the destination network namespace.
8642 if (__dev_get_by_name(net
, dev
->name
)) {
8643 /* We get here if we can't use the current device name */
8646 if (dev_get_valid_name(net
, dev
, pat
) < 0)
8651 * And now a mini version of register_netdevice unregister_netdevice.
8654 /* If device is running close it first. */
8657 /* And unlink it from device chain */
8659 unlist_netdevice(dev
);
8663 /* Shutdown queueing discipline. */
8666 /* Notify protocols, that we are about to destroy
8667 * this device. They should clean all the things.
8669 * Note that dev->reg_state stays at NETREG_REGISTERED.
8670 * This is wanted because this way 8021q and macvlan know
8671 * the device is just moving and can keep their slaves up.
8673 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
8676 new_nsid
= peernet2id_alloc(dev_net(dev
), net
);
8677 /* If there is an ifindex conflict assign a new one */
8678 if (__dev_get_by_index(net
, dev
->ifindex
))
8679 new_ifindex
= dev_new_index(net
);
8681 new_ifindex
= dev
->ifindex
;
8683 rtmsg_ifinfo_newnet(RTM_DELLINK
, dev
, ~0U, GFP_KERNEL
, &new_nsid
,
8687 * Flush the unicast and multicast chains
8692 /* Send a netdev-removed uevent to the old namespace */
8693 kobject_uevent(&dev
->dev
.kobj
, KOBJ_REMOVE
);
8694 netdev_adjacent_del_links(dev
);
8696 /* Actually switch the network namespace */
8697 dev_net_set(dev
, net
);
8698 dev
->ifindex
= new_ifindex
;
8700 /* Send a netdev-add uevent to the new namespace */
8701 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ADD
);
8702 netdev_adjacent_add_links(dev
);
8704 /* Fixup kobjects */
8705 err
= device_rename(&dev
->dev
, dev
->name
);
8708 /* Add the device back in the hashes */
8709 list_netdevice(dev
);
8711 /* Notify protocols, that a new device appeared. */
8712 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
8715 * Prevent userspace races by waiting until the network
8716 * device is fully setup before sending notifications.
8718 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
8725 EXPORT_SYMBOL_GPL(dev_change_net_namespace
);
8727 static int dev_cpu_dead(unsigned int oldcpu
)
8729 struct sk_buff
**list_skb
;
8730 struct sk_buff
*skb
;
8732 struct softnet_data
*sd
, *oldsd
, *remsd
= NULL
;
8734 local_irq_disable();
8735 cpu
= smp_processor_id();
8736 sd
= &per_cpu(softnet_data
, cpu
);
8737 oldsd
= &per_cpu(softnet_data
, oldcpu
);
8739 /* Find end of our completion_queue. */
8740 list_skb
= &sd
->completion_queue
;
8742 list_skb
= &(*list_skb
)->next
;
8743 /* Append completion queue from offline CPU. */
8744 *list_skb
= oldsd
->completion_queue
;
8745 oldsd
->completion_queue
= NULL
;
8747 /* Append output queue from offline CPU. */
8748 if (oldsd
->output_queue
) {
8749 *sd
->output_queue_tailp
= oldsd
->output_queue
;
8750 sd
->output_queue_tailp
= oldsd
->output_queue_tailp
;
8751 oldsd
->output_queue
= NULL
;
8752 oldsd
->output_queue_tailp
= &oldsd
->output_queue
;
8754 /* Append NAPI poll list from offline CPU, with one exception :
8755 * process_backlog() must be called by cpu owning percpu backlog.
8756 * We properly handle process_queue & input_pkt_queue later.
8758 while (!list_empty(&oldsd
->poll_list
)) {
8759 struct napi_struct
*napi
= list_first_entry(&oldsd
->poll_list
,
8763 list_del_init(&napi
->poll_list
);
8764 if (napi
->poll
== process_backlog
)
8767 ____napi_schedule(sd
, napi
);
8770 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
8774 remsd
= oldsd
->rps_ipi_list
;
8775 oldsd
->rps_ipi_list
= NULL
;
8777 /* send out pending IPI's on offline CPU */
8778 net_rps_send_ipi(remsd
);
8780 /* Process offline CPU's input_pkt_queue */
8781 while ((skb
= __skb_dequeue(&oldsd
->process_queue
))) {
8783 input_queue_head_incr(oldsd
);
8785 while ((skb
= skb_dequeue(&oldsd
->input_pkt_queue
))) {
8787 input_queue_head_incr(oldsd
);
8794 * netdev_increment_features - increment feature set by one
8795 * @all: current feature set
8796 * @one: new feature set
8797 * @mask: mask feature set
8799 * Computes a new feature set after adding a device with feature set
8800 * @one to the master device with current feature set @all. Will not
8801 * enable anything that is off in @mask. Returns the new feature set.
8803 netdev_features_t
netdev_increment_features(netdev_features_t all
,
8804 netdev_features_t one
, netdev_features_t mask
)
8806 if (mask
& NETIF_F_HW_CSUM
)
8807 mask
|= NETIF_F_CSUM_MASK
;
8808 mask
|= NETIF_F_VLAN_CHALLENGED
;
8810 all
|= one
& (NETIF_F_ONE_FOR_ALL
| NETIF_F_CSUM_MASK
) & mask
;
8811 all
&= one
| ~NETIF_F_ALL_FOR_ALL
;
8813 /* If one device supports hw checksumming, set for all. */
8814 if (all
& NETIF_F_HW_CSUM
)
8815 all
&= ~(NETIF_F_CSUM_MASK
& ~NETIF_F_HW_CSUM
);
8819 EXPORT_SYMBOL(netdev_increment_features
);
8821 static struct hlist_head
* __net_init
netdev_create_hash(void)
8824 struct hlist_head
*hash
;
8826 hash
= kmalloc_array(NETDEV_HASHENTRIES
, sizeof(*hash
), GFP_KERNEL
);
8828 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
8829 INIT_HLIST_HEAD(&hash
[i
]);
8834 /* Initialize per network namespace state */
8835 static int __net_init
netdev_init(struct net
*net
)
8837 if (net
!= &init_net
)
8838 INIT_LIST_HEAD(&net
->dev_base_head
);
8840 net
->dev_name_head
= netdev_create_hash();
8841 if (net
->dev_name_head
== NULL
)
8844 net
->dev_index_head
= netdev_create_hash();
8845 if (net
->dev_index_head
== NULL
)
8851 kfree(net
->dev_name_head
);
8857 * netdev_drivername - network driver for the device
8858 * @dev: network device
8860 * Determine network driver for device.
8862 const char *netdev_drivername(const struct net_device
*dev
)
8864 const struct device_driver
*driver
;
8865 const struct device
*parent
;
8866 const char *empty
= "";
8868 parent
= dev
->dev
.parent
;
8872 driver
= parent
->driver
;
8873 if (driver
&& driver
->name
)
8874 return driver
->name
;
8878 static void __netdev_printk(const char *level
, const struct net_device
*dev
,
8879 struct va_format
*vaf
)
8881 if (dev
&& dev
->dev
.parent
) {
8882 dev_printk_emit(level
[1] - '0',
8885 dev_driver_string(dev
->dev
.parent
),
8886 dev_name(dev
->dev
.parent
),
8887 netdev_name(dev
), netdev_reg_state(dev
),
8890 printk("%s%s%s: %pV",
8891 level
, netdev_name(dev
), netdev_reg_state(dev
), vaf
);
8893 printk("%s(NULL net_device): %pV", level
, vaf
);
8897 void netdev_printk(const char *level
, const struct net_device
*dev
,
8898 const char *format
, ...)
8900 struct va_format vaf
;
8903 va_start(args
, format
);
8908 __netdev_printk(level
, dev
, &vaf
);
8912 EXPORT_SYMBOL(netdev_printk
);
8914 #define define_netdev_printk_level(func, level) \
8915 void func(const struct net_device *dev, const char *fmt, ...) \
8917 struct va_format vaf; \
8920 va_start(args, fmt); \
8925 __netdev_printk(level, dev, &vaf); \
8929 EXPORT_SYMBOL(func);
8931 define_netdev_printk_level(netdev_emerg
, KERN_EMERG
);
8932 define_netdev_printk_level(netdev_alert
, KERN_ALERT
);
8933 define_netdev_printk_level(netdev_crit
, KERN_CRIT
);
8934 define_netdev_printk_level(netdev_err
, KERN_ERR
);
8935 define_netdev_printk_level(netdev_warn
, KERN_WARNING
);
8936 define_netdev_printk_level(netdev_notice
, KERN_NOTICE
);
8937 define_netdev_printk_level(netdev_info
, KERN_INFO
);
8939 static void __net_exit
netdev_exit(struct net
*net
)
8941 kfree(net
->dev_name_head
);
8942 kfree(net
->dev_index_head
);
8943 if (net
!= &init_net
)
8944 WARN_ON_ONCE(!list_empty(&net
->dev_base_head
));
8947 static struct pernet_operations __net_initdata netdev_net_ops
= {
8948 .init
= netdev_init
,
8949 .exit
= netdev_exit
,
8952 static void __net_exit
default_device_exit(struct net
*net
)
8954 struct net_device
*dev
, *aux
;
8956 * Push all migratable network devices back to the
8957 * initial network namespace
8960 for_each_netdev_safe(net
, dev
, aux
) {
8962 char fb_name
[IFNAMSIZ
];
8964 /* Ignore unmoveable devices (i.e. loopback) */
8965 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
8968 /* Leave virtual devices for the generic cleanup */
8969 if (dev
->rtnl_link_ops
)
8972 /* Push remaining network devices to init_net */
8973 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
8974 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
8976 pr_emerg("%s: failed to move %s to init_net: %d\n",
8977 __func__
, dev
->name
, err
);
8984 static void __net_exit
rtnl_lock_unregistering(struct list_head
*net_list
)
8986 /* Return with the rtnl_lock held when there are no network
8987 * devices unregistering in any network namespace in net_list.
8991 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
8993 add_wait_queue(&netdev_unregistering_wq
, &wait
);
8995 unregistering
= false;
8997 list_for_each_entry(net
, net_list
, exit_list
) {
8998 if (net
->dev_unreg_count
> 0) {
8999 unregistering
= true;
9007 wait_woken(&wait
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
9009 remove_wait_queue(&netdev_unregistering_wq
, &wait
);
9012 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
9014 /* At exit all network devices most be removed from a network
9015 * namespace. Do this in the reverse order of registration.
9016 * Do this across as many network namespaces as possible to
9017 * improve batching efficiency.
9019 struct net_device
*dev
;
9021 LIST_HEAD(dev_kill_list
);
9023 /* To prevent network device cleanup code from dereferencing
9024 * loopback devices or network devices that have been freed
9025 * wait here for all pending unregistrations to complete,
9026 * before unregistring the loopback device and allowing the
9027 * network namespace be freed.
9029 * The netdev todo list containing all network devices
9030 * unregistrations that happen in default_device_exit_batch
9031 * will run in the rtnl_unlock() at the end of
9032 * default_device_exit_batch.
9034 rtnl_lock_unregistering(net_list
);
9035 list_for_each_entry(net
, net_list
, exit_list
) {
9036 for_each_netdev_reverse(net
, dev
) {
9037 if (dev
->rtnl_link_ops
&& dev
->rtnl_link_ops
->dellink
)
9038 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
9040 unregister_netdevice_queue(dev
, &dev_kill_list
);
9043 unregister_netdevice_many(&dev_kill_list
);
9047 static struct pernet_operations __net_initdata default_device_ops
= {
9048 .exit
= default_device_exit
,
9049 .exit_batch
= default_device_exit_batch
,
9053 * Initialize the DEV module. At boot time this walks the device list and
9054 * unhooks any devices that fail to initialise (normally hardware not
9055 * present) and leaves us with a valid list of present and active devices.
9060 * This is called single threaded during boot, so no need
9061 * to take the rtnl semaphore.
9063 static int __init
net_dev_init(void)
9065 int i
, rc
= -ENOMEM
;
9067 BUG_ON(!dev_boot_phase
);
9069 if (dev_proc_init())
9072 if (netdev_kobject_init())
9075 INIT_LIST_HEAD(&ptype_all
);
9076 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
9077 INIT_LIST_HEAD(&ptype_base
[i
]);
9079 INIT_LIST_HEAD(&offload_base
);
9081 if (register_pernet_subsys(&netdev_net_ops
))
9085 * Initialise the packet receive queues.
9088 for_each_possible_cpu(i
) {
9089 struct work_struct
*flush
= per_cpu_ptr(&flush_works
, i
);
9090 struct softnet_data
*sd
= &per_cpu(softnet_data
, i
);
9092 INIT_WORK(flush
, flush_backlog
);
9094 skb_queue_head_init(&sd
->input_pkt_queue
);
9095 skb_queue_head_init(&sd
->process_queue
);
9096 #ifdef CONFIG_XFRM_OFFLOAD
9097 skb_queue_head_init(&sd
->xfrm_backlog
);
9099 INIT_LIST_HEAD(&sd
->poll_list
);
9100 sd
->output_queue_tailp
= &sd
->output_queue
;
9102 sd
->csd
.func
= rps_trigger_softirq
;
9107 sd
->backlog
.poll
= process_backlog
;
9108 sd
->backlog
.weight
= weight_p
;
9113 /* The loopback device is special if any other network devices
9114 * is present in a network namespace the loopback device must
9115 * be present. Since we now dynamically allocate and free the
9116 * loopback device ensure this invariant is maintained by
9117 * keeping the loopback device as the first device on the
9118 * list of network devices. Ensuring the loopback devices
9119 * is the first device that appears and the last network device
9122 if (register_pernet_device(&loopback_net_ops
))
9125 if (register_pernet_device(&default_device_ops
))
9128 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
9129 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
9131 rc
= cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD
, "net/dev:dead",
9132 NULL
, dev_cpu_dead
);
9139 subsys_initcall(net_dev_init
);