2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <linux/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/sched/mm.h>
85 #include <linux/mutex.h>
86 #include <linux/string.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/errno.h>
91 #include <linux/interrupt.h>
92 #include <linux/if_ether.h>
93 #include <linux/netdevice.h>
94 #include <linux/etherdevice.h>
95 #include <linux/ethtool.h>
96 #include <linux/notifier.h>
97 #include <linux/skbuff.h>
98 #include <linux/bpf.h>
99 #include <linux/bpf_trace.h>
100 #include <net/net_namespace.h>
101 #include <net/sock.h>
102 #include <net/busy_poll.h>
103 #include <linux/rtnetlink.h>
104 #include <linux/stat.h>
106 #include <net/dst_metadata.h>
107 #include <net/pkt_sched.h>
108 #include <net/pkt_cls.h>
109 #include <net/checksum.h>
110 #include <net/xfrm.h>
111 #include <linux/highmem.h>
112 #include <linux/init.h>
113 #include <linux/module.h>
114 #include <linux/netpoll.h>
115 #include <linux/rcupdate.h>
116 #include <linux/delay.h>
117 #include <net/iw_handler.h>
118 #include <asm/current.h>
119 #include <linux/audit.h>
120 #include <linux/dmaengine.h>
121 #include <linux/err.h>
122 #include <linux/ctype.h>
123 #include <linux/if_arp.h>
124 #include <linux/if_vlan.h>
125 #include <linux/ip.h>
127 #include <net/mpls.h>
128 #include <linux/ipv6.h>
129 #include <linux/in.h>
130 #include <linux/jhash.h>
131 #include <linux/random.h>
132 #include <trace/events/napi.h>
133 #include <trace/events/net.h>
134 #include <trace/events/skb.h>
135 #include <linux/pci.h>
136 #include <linux/inetdevice.h>
137 #include <linux/cpu_rmap.h>
138 #include <linux/static_key.h>
139 #include <linux/hashtable.h>
140 #include <linux/vmalloc.h>
141 #include <linux/if_macvlan.h>
142 #include <linux/errqueue.h>
143 #include <linux/hrtimer.h>
144 #include <linux/netfilter_ingress.h>
145 #include <linux/crash_dump.h>
146 #include <linux/sctp.h>
147 #include <net/udp_tunnel.h>
148 #include <linux/net_namespace.h>
150 #include "net-sysfs.h"
152 /* Instead of increasing this, you should create a hash table. */
153 #define MAX_GRO_SKBS 8
154 #define MAX_NEST_DEV 8
156 /* This should be increased if a protocol with a bigger head is added. */
157 #define GRO_MAX_HEAD (MAX_HEADER + 128)
159 static DEFINE_SPINLOCK(ptype_lock
);
160 static DEFINE_SPINLOCK(offload_lock
);
161 struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
162 struct list_head ptype_all __read_mostly
; /* Taps */
163 static struct list_head offload_base __read_mostly
;
165 static int netif_rx_internal(struct sk_buff
*skb
);
166 static int call_netdevice_notifiers_info(unsigned long val
,
167 struct netdev_notifier_info
*info
);
168 static struct napi_struct
*napi_by_id(unsigned int napi_id
);
171 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
174 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
176 * Writers must hold the rtnl semaphore while they loop through the
177 * dev_base_head list, and hold dev_base_lock for writing when they do the
178 * actual updates. This allows pure readers to access the list even
179 * while a writer is preparing to update it.
181 * To put it another way, dev_base_lock is held for writing only to
182 * protect against pure readers; the rtnl semaphore provides the
183 * protection against other writers.
185 * See, for example usages, register_netdevice() and
186 * unregister_netdevice(), which must be called with the rtnl
189 DEFINE_RWLOCK(dev_base_lock
);
190 EXPORT_SYMBOL(dev_base_lock
);
192 static DEFINE_MUTEX(ifalias_mutex
);
194 /* protects napi_hash addition/deletion and napi_gen_id */
195 static DEFINE_SPINLOCK(napi_hash_lock
);
197 static unsigned int napi_gen_id
= NR_CPUS
;
198 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash
, 8);
200 static seqcount_t devnet_rename_seq
;
202 static inline void dev_base_seq_inc(struct net
*net
)
204 while (++net
->dev_base_seq
== 0)
208 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
210 unsigned int hash
= full_name_hash(net
, name
, strnlen(name
, IFNAMSIZ
));
212 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
215 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
217 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
220 static inline void rps_lock(struct softnet_data
*sd
)
223 spin_lock(&sd
->input_pkt_queue
.lock
);
227 static inline void rps_unlock(struct softnet_data
*sd
)
230 spin_unlock(&sd
->input_pkt_queue
.lock
);
234 /* Device list insertion */
235 static void list_netdevice(struct net_device
*dev
)
237 struct net
*net
= dev_net(dev
);
241 write_lock_bh(&dev_base_lock
);
242 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
243 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
244 hlist_add_head_rcu(&dev
->index_hlist
,
245 dev_index_hash(net
, dev
->ifindex
));
246 write_unlock_bh(&dev_base_lock
);
248 dev_base_seq_inc(net
);
251 /* Device list removal
252 * caller must respect a RCU grace period before freeing/reusing dev
254 static void unlist_netdevice(struct net_device
*dev
)
258 /* Unlink dev from the device chain */
259 write_lock_bh(&dev_base_lock
);
260 list_del_rcu(&dev
->dev_list
);
261 hlist_del_rcu(&dev
->name_hlist
);
262 hlist_del_rcu(&dev
->index_hlist
);
263 write_unlock_bh(&dev_base_lock
);
265 dev_base_seq_inc(dev_net(dev
));
272 static RAW_NOTIFIER_HEAD(netdev_chain
);
275 * Device drivers call our routines to queue packets here. We empty the
276 * queue in the local softnet handler.
279 DEFINE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
280 EXPORT_PER_CPU_SYMBOL(softnet_data
);
282 #ifdef CONFIG_LOCKDEP
284 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
285 * according to dev->type
287 static const unsigned short netdev_lock_type
[] = {
288 ARPHRD_NETROM
, ARPHRD_ETHER
, ARPHRD_EETHER
, ARPHRD_AX25
,
289 ARPHRD_PRONET
, ARPHRD_CHAOS
, ARPHRD_IEEE802
, ARPHRD_ARCNET
,
290 ARPHRD_APPLETLK
, ARPHRD_DLCI
, ARPHRD_ATM
, ARPHRD_METRICOM
,
291 ARPHRD_IEEE1394
, ARPHRD_EUI64
, ARPHRD_INFINIBAND
, ARPHRD_SLIP
,
292 ARPHRD_CSLIP
, ARPHRD_SLIP6
, ARPHRD_CSLIP6
, ARPHRD_RSRVD
,
293 ARPHRD_ADAPT
, ARPHRD_ROSE
, ARPHRD_X25
, ARPHRD_HWX25
,
294 ARPHRD_PPP
, ARPHRD_CISCO
, ARPHRD_LAPB
, ARPHRD_DDCMP
,
295 ARPHRD_RAWHDLC
, ARPHRD_TUNNEL
, ARPHRD_TUNNEL6
, ARPHRD_FRAD
,
296 ARPHRD_SKIP
, ARPHRD_LOOPBACK
, ARPHRD_LOCALTLK
, ARPHRD_FDDI
,
297 ARPHRD_BIF
, ARPHRD_SIT
, ARPHRD_IPDDP
, ARPHRD_IPGRE
,
298 ARPHRD_PIMREG
, ARPHRD_HIPPI
, ARPHRD_ASH
, ARPHRD_ECONET
,
299 ARPHRD_IRDA
, ARPHRD_FCPP
, ARPHRD_FCAL
, ARPHRD_FCPL
,
300 ARPHRD_FCFABRIC
, ARPHRD_IEEE80211
, ARPHRD_IEEE80211_PRISM
,
301 ARPHRD_IEEE80211_RADIOTAP
, ARPHRD_PHONET
, ARPHRD_PHONET_PIPE
,
302 ARPHRD_IEEE802154
, ARPHRD_VOID
, ARPHRD_NONE
};
304 static const char *const netdev_lock_name
[] = {
305 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
306 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
307 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
308 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
309 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
310 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
311 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
312 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
313 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
314 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
315 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
316 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
317 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
318 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
319 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
321 static struct lock_class_key netdev_xmit_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
322 static struct lock_class_key netdev_addr_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
324 static inline unsigned short netdev_lock_pos(unsigned short dev_type
)
328 for (i
= 0; i
< ARRAY_SIZE(netdev_lock_type
); i
++)
329 if (netdev_lock_type
[i
] == dev_type
)
331 /* the last key is used by default */
332 return ARRAY_SIZE(netdev_lock_type
) - 1;
335 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
336 unsigned short dev_type
)
340 i
= netdev_lock_pos(dev_type
);
341 lockdep_set_class_and_name(lock
, &netdev_xmit_lock_key
[i
],
342 netdev_lock_name
[i
]);
345 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
349 i
= netdev_lock_pos(dev
->type
);
350 lockdep_set_class_and_name(&dev
->addr_list_lock
,
351 &netdev_addr_lock_key
[i
],
352 netdev_lock_name
[i
]);
355 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
356 unsigned short dev_type
)
359 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
364 /*******************************************************************************
366 * Protocol management and registration routines
368 *******************************************************************************/
372 * Add a protocol ID to the list. Now that the input handler is
373 * smarter we can dispense with all the messy stuff that used to be
376 * BEWARE!!! Protocol handlers, mangling input packets,
377 * MUST BE last in hash buckets and checking protocol handlers
378 * MUST start from promiscuous ptype_all chain in net_bh.
379 * It is true now, do not change it.
380 * Explanation follows: if protocol handler, mangling packet, will
381 * be the first on list, it is not able to sense, that packet
382 * is cloned and should be copied-on-write, so that it will
383 * change it and subsequent readers will get broken packet.
387 static inline struct list_head
*ptype_head(const struct packet_type
*pt
)
389 if (pt
->type
== htons(ETH_P_ALL
))
390 return pt
->dev
? &pt
->dev
->ptype_all
: &ptype_all
;
392 return pt
->dev
? &pt
->dev
->ptype_specific
:
393 &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
397 * dev_add_pack - add packet handler
398 * @pt: packet type declaration
400 * Add a protocol handler to the networking stack. The passed &packet_type
401 * is linked into kernel lists and may not be freed until it has been
402 * removed from the kernel lists.
404 * This call does not sleep therefore it can not
405 * guarantee all CPU's that are in middle of receiving packets
406 * will see the new packet type (until the next received packet).
409 void dev_add_pack(struct packet_type
*pt
)
411 struct list_head
*head
= ptype_head(pt
);
413 spin_lock(&ptype_lock
);
414 list_add_rcu(&pt
->list
, head
);
415 spin_unlock(&ptype_lock
);
417 EXPORT_SYMBOL(dev_add_pack
);
420 * __dev_remove_pack - remove packet handler
421 * @pt: packet type declaration
423 * Remove a protocol handler that was previously added to the kernel
424 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
425 * from the kernel lists and can be freed or reused once this function
428 * The packet type might still be in use by receivers
429 * and must not be freed until after all the CPU's have gone
430 * through a quiescent state.
432 void __dev_remove_pack(struct packet_type
*pt
)
434 struct list_head
*head
= ptype_head(pt
);
435 struct packet_type
*pt1
;
437 spin_lock(&ptype_lock
);
439 list_for_each_entry(pt1
, head
, list
) {
441 list_del_rcu(&pt
->list
);
446 pr_warn("dev_remove_pack: %p not found\n", pt
);
448 spin_unlock(&ptype_lock
);
450 EXPORT_SYMBOL(__dev_remove_pack
);
453 * dev_remove_pack - remove packet handler
454 * @pt: packet type declaration
456 * Remove a protocol handler that was previously added to the kernel
457 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
458 * from the kernel lists and can be freed or reused once this function
461 * This call sleeps to guarantee that no CPU is looking at the packet
464 void dev_remove_pack(struct packet_type
*pt
)
466 __dev_remove_pack(pt
);
470 EXPORT_SYMBOL(dev_remove_pack
);
474 * dev_add_offload - register offload handlers
475 * @po: protocol offload declaration
477 * Add protocol offload handlers to the networking stack. The passed
478 * &proto_offload is linked into kernel lists and may not be freed until
479 * it has been removed from the kernel lists.
481 * This call does not sleep therefore it can not
482 * guarantee all CPU's that are in middle of receiving packets
483 * will see the new offload handlers (until the next received packet).
485 void dev_add_offload(struct packet_offload
*po
)
487 struct packet_offload
*elem
;
489 spin_lock(&offload_lock
);
490 list_for_each_entry(elem
, &offload_base
, list
) {
491 if (po
->priority
< elem
->priority
)
494 list_add_rcu(&po
->list
, elem
->list
.prev
);
495 spin_unlock(&offload_lock
);
497 EXPORT_SYMBOL(dev_add_offload
);
500 * __dev_remove_offload - remove offload handler
501 * @po: packet offload declaration
503 * Remove a protocol offload handler that was previously added to the
504 * kernel offload handlers by dev_add_offload(). The passed &offload_type
505 * is removed from the kernel lists and can be freed or reused once this
508 * The packet type might still be in use by receivers
509 * and must not be freed until after all the CPU's have gone
510 * through a quiescent state.
512 static void __dev_remove_offload(struct packet_offload
*po
)
514 struct list_head
*head
= &offload_base
;
515 struct packet_offload
*po1
;
517 spin_lock(&offload_lock
);
519 list_for_each_entry(po1
, head
, list
) {
521 list_del_rcu(&po
->list
);
526 pr_warn("dev_remove_offload: %p not found\n", po
);
528 spin_unlock(&offload_lock
);
532 * dev_remove_offload - remove packet offload handler
533 * @po: packet offload declaration
535 * Remove a packet offload handler that was previously added to the kernel
536 * offload handlers by dev_add_offload(). The passed &offload_type is
537 * removed from the kernel lists and can be freed or reused once this
540 * This call sleeps to guarantee that no CPU is looking at the packet
543 void dev_remove_offload(struct packet_offload
*po
)
545 __dev_remove_offload(po
);
549 EXPORT_SYMBOL(dev_remove_offload
);
551 /******************************************************************************
553 * Device Boot-time Settings Routines
555 ******************************************************************************/
557 /* Boot time configuration table */
558 static struct netdev_boot_setup dev_boot_setup
[NETDEV_BOOT_SETUP_MAX
];
561 * netdev_boot_setup_add - add new setup entry
562 * @name: name of the device
563 * @map: configured settings for the device
565 * Adds new setup entry to the dev_boot_setup list. The function
566 * returns 0 on error and 1 on success. This is a generic routine to
569 static int netdev_boot_setup_add(char *name
, struct ifmap
*map
)
571 struct netdev_boot_setup
*s
;
575 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
576 if (s
[i
].name
[0] == '\0' || s
[i
].name
[0] == ' ') {
577 memset(s
[i
].name
, 0, sizeof(s
[i
].name
));
578 strlcpy(s
[i
].name
, name
, IFNAMSIZ
);
579 memcpy(&s
[i
].map
, map
, sizeof(s
[i
].map
));
584 return i
>= NETDEV_BOOT_SETUP_MAX
? 0 : 1;
588 * netdev_boot_setup_check - check boot time settings
589 * @dev: the netdevice
591 * Check boot time settings for the device.
592 * The found settings are set for the device to be used
593 * later in the device probing.
594 * Returns 0 if no settings found, 1 if they are.
596 int netdev_boot_setup_check(struct net_device
*dev
)
598 struct netdev_boot_setup
*s
= dev_boot_setup
;
601 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
602 if (s
[i
].name
[0] != '\0' && s
[i
].name
[0] != ' ' &&
603 !strcmp(dev
->name
, s
[i
].name
)) {
604 dev
->irq
= s
[i
].map
.irq
;
605 dev
->base_addr
= s
[i
].map
.base_addr
;
606 dev
->mem_start
= s
[i
].map
.mem_start
;
607 dev
->mem_end
= s
[i
].map
.mem_end
;
613 EXPORT_SYMBOL(netdev_boot_setup_check
);
617 * netdev_boot_base - get address from boot time settings
618 * @prefix: prefix for network device
619 * @unit: id for network device
621 * Check boot time settings for the base address of device.
622 * The found settings are set for the device to be used
623 * later in the device probing.
624 * Returns 0 if no settings found.
626 unsigned long netdev_boot_base(const char *prefix
, int unit
)
628 const struct netdev_boot_setup
*s
= dev_boot_setup
;
632 sprintf(name
, "%s%d", prefix
, unit
);
635 * If device already registered then return base of 1
636 * to indicate not to probe for this interface
638 if (__dev_get_by_name(&init_net
, name
))
641 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++)
642 if (!strcmp(name
, s
[i
].name
))
643 return s
[i
].map
.base_addr
;
648 * Saves at boot time configured settings for any netdevice.
650 int __init
netdev_boot_setup(char *str
)
655 str
= get_options(str
, ARRAY_SIZE(ints
), ints
);
660 memset(&map
, 0, sizeof(map
));
664 map
.base_addr
= ints
[2];
666 map
.mem_start
= ints
[3];
668 map
.mem_end
= ints
[4];
670 /* Add new entry to the list */
671 return netdev_boot_setup_add(str
, &map
);
674 __setup("netdev=", netdev_boot_setup
);
676 /*******************************************************************************
678 * Device Interface Subroutines
680 *******************************************************************************/
683 * dev_get_iflink - get 'iflink' value of a interface
684 * @dev: targeted interface
686 * Indicates the ifindex the interface is linked to.
687 * Physical interfaces have the same 'ifindex' and 'iflink' values.
690 int dev_get_iflink(const struct net_device
*dev
)
692 if (dev
->netdev_ops
&& dev
->netdev_ops
->ndo_get_iflink
)
693 return dev
->netdev_ops
->ndo_get_iflink(dev
);
697 EXPORT_SYMBOL(dev_get_iflink
);
700 * dev_fill_metadata_dst - Retrieve tunnel egress information.
701 * @dev: targeted interface
704 * For better visibility of tunnel traffic OVS needs to retrieve
705 * egress tunnel information for a packet. Following API allows
706 * user to get this info.
708 int dev_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
710 struct ip_tunnel_info
*info
;
712 if (!dev
->netdev_ops
|| !dev
->netdev_ops
->ndo_fill_metadata_dst
)
715 info
= skb_tunnel_info_unclone(skb
);
718 if (unlikely(!(info
->mode
& IP_TUNNEL_INFO_TX
)))
721 return dev
->netdev_ops
->ndo_fill_metadata_dst(dev
, skb
);
723 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst
);
726 * __dev_get_by_name - find a device by its name
727 * @net: the applicable net namespace
728 * @name: name to find
730 * Find an interface by name. Must be called under RTNL semaphore
731 * or @dev_base_lock. If the name is found a pointer to the device
732 * is returned. If the name is not found then %NULL is returned. The
733 * reference counters are not incremented so the caller must be
734 * careful with locks.
737 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
739 struct net_device
*dev
;
740 struct hlist_head
*head
= dev_name_hash(net
, name
);
742 hlist_for_each_entry(dev
, head
, name_hlist
)
743 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
748 EXPORT_SYMBOL(__dev_get_by_name
);
751 * dev_get_by_name_rcu - find a device by its name
752 * @net: the applicable net namespace
753 * @name: name to find
755 * Find an interface by name.
756 * If the name is found a pointer to the device is returned.
757 * If the name is not found then %NULL is returned.
758 * The reference counters are not incremented so the caller must be
759 * careful with locks. The caller must hold RCU lock.
762 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
764 struct net_device
*dev
;
765 struct hlist_head
*head
= dev_name_hash(net
, name
);
767 hlist_for_each_entry_rcu(dev
, head
, name_hlist
)
768 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
773 EXPORT_SYMBOL(dev_get_by_name_rcu
);
776 * dev_get_by_name - find a device by its name
777 * @net: the applicable net namespace
778 * @name: name to find
780 * Find an interface by name. This can be called from any
781 * context and does its own locking. The returned handle has
782 * the usage count incremented and the caller must use dev_put() to
783 * release it when it is no longer needed. %NULL is returned if no
784 * matching device is found.
787 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
789 struct net_device
*dev
;
792 dev
= dev_get_by_name_rcu(net
, name
);
798 EXPORT_SYMBOL(dev_get_by_name
);
801 * __dev_get_by_index - find a device by its ifindex
802 * @net: the applicable net namespace
803 * @ifindex: index of device
805 * Search for an interface by index. Returns %NULL if the device
806 * is not found or a pointer to the device. The device has not
807 * had its reference counter increased so the caller must be careful
808 * about locking. The caller must hold either the RTNL semaphore
812 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
814 struct net_device
*dev
;
815 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
817 hlist_for_each_entry(dev
, head
, index_hlist
)
818 if (dev
->ifindex
== ifindex
)
823 EXPORT_SYMBOL(__dev_get_by_index
);
826 * dev_get_by_index_rcu - find a device by its ifindex
827 * @net: the applicable net namespace
828 * @ifindex: index of device
830 * Search for an interface by index. Returns %NULL if the device
831 * is not found or a pointer to the device. The device has not
832 * had its reference counter increased so the caller must be careful
833 * about locking. The caller must hold RCU lock.
836 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
838 struct net_device
*dev
;
839 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
841 hlist_for_each_entry_rcu(dev
, head
, index_hlist
)
842 if (dev
->ifindex
== ifindex
)
847 EXPORT_SYMBOL(dev_get_by_index_rcu
);
851 * dev_get_by_index - find a device by its ifindex
852 * @net: the applicable net namespace
853 * @ifindex: index of device
855 * Search for an interface by index. Returns NULL if the device
856 * is not found or a pointer to the device. The device returned has
857 * had a reference added and the pointer is safe until the user calls
858 * dev_put to indicate they have finished with it.
861 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
863 struct net_device
*dev
;
866 dev
= dev_get_by_index_rcu(net
, ifindex
);
872 EXPORT_SYMBOL(dev_get_by_index
);
875 * dev_get_by_napi_id - find a device by napi_id
876 * @napi_id: ID of the NAPI struct
878 * Search for an interface by NAPI ID. Returns %NULL if the device
879 * is not found or a pointer to the device. The device has not had
880 * its reference counter increased so the caller must be careful
881 * about locking. The caller must hold RCU lock.
884 struct net_device
*dev_get_by_napi_id(unsigned int napi_id
)
886 struct napi_struct
*napi
;
888 WARN_ON_ONCE(!rcu_read_lock_held());
890 if (napi_id
< MIN_NAPI_ID
)
893 napi
= napi_by_id(napi_id
);
895 return napi
? napi
->dev
: NULL
;
897 EXPORT_SYMBOL(dev_get_by_napi_id
);
900 * netdev_get_name - get a netdevice name, knowing its ifindex.
901 * @net: network namespace
902 * @name: a pointer to the buffer where the name will be stored.
903 * @ifindex: the ifindex of the interface to get the name from.
905 * The use of raw_seqcount_begin() and cond_resched() before
906 * retrying is required as we want to give the writers a chance
907 * to complete when CONFIG_PREEMPT is not set.
909 int netdev_get_name(struct net
*net
, char *name
, int ifindex
)
911 struct net_device
*dev
;
915 seq
= raw_seqcount_begin(&devnet_rename_seq
);
917 dev
= dev_get_by_index_rcu(net
, ifindex
);
923 strcpy(name
, dev
->name
);
925 if (read_seqcount_retry(&devnet_rename_seq
, seq
)) {
934 * dev_getbyhwaddr_rcu - find a device by its hardware address
935 * @net: the applicable net namespace
936 * @type: media type of device
937 * @ha: hardware address
939 * Search for an interface by MAC address. Returns NULL if the device
940 * is not found or a pointer to the device.
941 * The caller must hold RCU or RTNL.
942 * The returned device has not had its ref count increased
943 * and the caller must therefore be careful about locking
947 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
950 struct net_device
*dev
;
952 for_each_netdev_rcu(net
, dev
)
953 if (dev
->type
== type
&&
954 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
959 EXPORT_SYMBOL(dev_getbyhwaddr_rcu
);
961 struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
963 struct net_device
*dev
;
966 for_each_netdev(net
, dev
)
967 if (dev
->type
== type
)
972 EXPORT_SYMBOL(__dev_getfirstbyhwtype
);
974 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
976 struct net_device
*dev
, *ret
= NULL
;
979 for_each_netdev_rcu(net
, dev
)
980 if (dev
->type
== type
) {
988 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
991 * __dev_get_by_flags - find any device with given flags
992 * @net: the applicable net namespace
993 * @if_flags: IFF_* values
994 * @mask: bitmask of bits in if_flags to check
996 * Search for any interface with the given flags. Returns NULL if a device
997 * is not found or a pointer to the device. Must be called inside
998 * rtnl_lock(), and result refcount is unchanged.
1001 struct net_device
*__dev_get_by_flags(struct net
*net
, unsigned short if_flags
,
1002 unsigned short mask
)
1004 struct net_device
*dev
, *ret
;
1009 for_each_netdev(net
, dev
) {
1010 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
1017 EXPORT_SYMBOL(__dev_get_by_flags
);
1020 * dev_valid_name - check if name is okay for network device
1021 * @name: name string
1023 * Network device names need to be valid file names to
1024 * to allow sysfs to work. We also disallow any kind of
1027 bool dev_valid_name(const char *name
)
1031 if (strnlen(name
, IFNAMSIZ
) == IFNAMSIZ
)
1033 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
1037 if (*name
== '/' || *name
== ':' || isspace(*name
))
1043 EXPORT_SYMBOL(dev_valid_name
);
1046 * __dev_alloc_name - allocate a name for a device
1047 * @net: network namespace to allocate the device name in
1048 * @name: name format string
1049 * @buf: scratch buffer and result name string
1051 * Passed a format string - eg "lt%d" it will try and find a suitable
1052 * id. It scans list of devices to build up a free map, then chooses
1053 * the first empty slot. The caller must hold the dev_base or rtnl lock
1054 * while allocating the name and adding the device in order to avoid
1056 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1057 * Returns the number of the unit assigned or a negative errno code.
1060 static int __dev_alloc_name(struct net
*net
, const char *name
, char *buf
)
1064 const int max_netdevices
= 8*PAGE_SIZE
;
1065 unsigned long *inuse
;
1066 struct net_device
*d
;
1068 if (!dev_valid_name(name
))
1071 p
= strchr(name
, '%');
1074 * Verify the string as this thing may have come from
1075 * the user. There must be either one "%d" and no other "%"
1078 if (p
[1] != 'd' || strchr(p
+ 2, '%'))
1081 /* Use one page as a bit array of possible slots */
1082 inuse
= (unsigned long *) get_zeroed_page(GFP_ATOMIC
);
1086 for_each_netdev(net
, d
) {
1087 if (!sscanf(d
->name
, name
, &i
))
1089 if (i
< 0 || i
>= max_netdevices
)
1092 /* avoid cases where sscanf is not exact inverse of printf */
1093 snprintf(buf
, IFNAMSIZ
, name
, i
);
1094 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
1098 i
= find_first_zero_bit(inuse
, max_netdevices
);
1099 free_page((unsigned long) inuse
);
1102 snprintf(buf
, IFNAMSIZ
, name
, i
);
1103 if (!__dev_get_by_name(net
, buf
))
1106 /* It is possible to run out of possible slots
1107 * when the name is long and there isn't enough space left
1108 * for the digits, or if all bits are used.
1113 static int dev_alloc_name_ns(struct net
*net
,
1114 struct net_device
*dev
,
1121 ret
= __dev_alloc_name(net
, name
, buf
);
1123 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1128 * dev_alloc_name - allocate a name for a device
1130 * @name: name format string
1132 * Passed a format string - eg "lt%d" it will try and find a suitable
1133 * id. It scans list of devices to build up a free map, then chooses
1134 * the first empty slot. The caller must hold the dev_base or rtnl lock
1135 * while allocating the name and adding the device in order to avoid
1137 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1138 * Returns the number of the unit assigned or a negative errno code.
1141 int dev_alloc_name(struct net_device
*dev
, const char *name
)
1143 return dev_alloc_name_ns(dev_net(dev
), dev
, name
);
1145 EXPORT_SYMBOL(dev_alloc_name
);
1147 int dev_get_valid_name(struct net
*net
, struct net_device
*dev
,
1152 if (!dev_valid_name(name
))
1155 if (strchr(name
, '%'))
1156 return dev_alloc_name_ns(net
, dev
, name
);
1157 else if (__dev_get_by_name(net
, name
))
1159 else if (dev
->name
!= name
)
1160 strlcpy(dev
->name
, name
, IFNAMSIZ
);
1164 EXPORT_SYMBOL(dev_get_valid_name
);
1167 * dev_change_name - change name of a device
1169 * @newname: name (or format string) must be at least IFNAMSIZ
1171 * Change name of a device, can pass format strings "eth%d".
1174 int dev_change_name(struct net_device
*dev
, const char *newname
)
1176 unsigned char old_assign_type
;
1177 char oldname
[IFNAMSIZ
];
1183 BUG_ON(!dev_net(dev
));
1186 if (dev
->flags
& IFF_UP
)
1189 write_seqcount_begin(&devnet_rename_seq
);
1191 if (strncmp(newname
, dev
->name
, IFNAMSIZ
) == 0) {
1192 write_seqcount_end(&devnet_rename_seq
);
1196 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
1198 err
= dev_get_valid_name(net
, dev
, newname
);
1200 write_seqcount_end(&devnet_rename_seq
);
1204 if (oldname
[0] && !strchr(oldname
, '%'))
1205 netdev_info(dev
, "renamed from %s\n", oldname
);
1207 old_assign_type
= dev
->name_assign_type
;
1208 dev
->name_assign_type
= NET_NAME_RENAMED
;
1211 ret
= device_rename(&dev
->dev
, dev
->name
);
1213 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1214 dev
->name_assign_type
= old_assign_type
;
1215 write_seqcount_end(&devnet_rename_seq
);
1219 write_seqcount_end(&devnet_rename_seq
);
1221 netdev_adjacent_rename_links(dev
, oldname
);
1223 write_lock_bh(&dev_base_lock
);
1224 hlist_del_rcu(&dev
->name_hlist
);
1225 write_unlock_bh(&dev_base_lock
);
1229 write_lock_bh(&dev_base_lock
);
1230 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
1231 write_unlock_bh(&dev_base_lock
);
1233 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1234 ret
= notifier_to_errno(ret
);
1237 /* err >= 0 after dev_alloc_name() or stores the first errno */
1240 write_seqcount_begin(&devnet_rename_seq
);
1241 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1242 memcpy(oldname
, newname
, IFNAMSIZ
);
1243 dev
->name_assign_type
= old_assign_type
;
1244 old_assign_type
= NET_NAME_RENAMED
;
1247 pr_err("%s: name change rollback failed: %d\n",
1256 * dev_set_alias - change ifalias of a device
1258 * @alias: name up to IFALIASZ
1259 * @len: limit of bytes to copy from info
1261 * Set ifalias for a device,
1263 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1265 struct dev_ifalias
*new_alias
= NULL
;
1267 if (len
>= IFALIASZ
)
1271 new_alias
= kmalloc(sizeof(*new_alias
) + len
+ 1, GFP_KERNEL
);
1275 memcpy(new_alias
->ifalias
, alias
, len
);
1276 new_alias
->ifalias
[len
] = 0;
1279 mutex_lock(&ifalias_mutex
);
1280 rcu_swap_protected(dev
->ifalias
, new_alias
,
1281 mutex_is_locked(&ifalias_mutex
));
1282 mutex_unlock(&ifalias_mutex
);
1285 kfree_rcu(new_alias
, rcuhead
);
1291 * dev_get_alias - get ifalias of a device
1293 * @name: buffer to store name of ifalias
1294 * @len: size of buffer
1296 * get ifalias for a device. Caller must make sure dev cannot go
1297 * away, e.g. rcu read lock or own a reference count to device.
1299 int dev_get_alias(const struct net_device
*dev
, char *name
, size_t len
)
1301 const struct dev_ifalias
*alias
;
1305 alias
= rcu_dereference(dev
->ifalias
);
1307 ret
= snprintf(name
, len
, "%s", alias
->ifalias
);
1314 * netdev_features_change - device changes features
1315 * @dev: device to cause notification
1317 * Called to indicate a device has changed features.
1319 void netdev_features_change(struct net_device
*dev
)
1321 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1323 EXPORT_SYMBOL(netdev_features_change
);
1326 * netdev_state_change - device changes state
1327 * @dev: device to cause notification
1329 * Called to indicate a device has changed state. This function calls
1330 * the notifier chains for netdev_chain and sends a NEWLINK message
1331 * to the routing socket.
1333 void netdev_state_change(struct net_device
*dev
)
1335 if (dev
->flags
& IFF_UP
) {
1336 struct netdev_notifier_change_info change_info
= {
1340 call_netdevice_notifiers_info(NETDEV_CHANGE
,
1342 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0, GFP_KERNEL
);
1345 EXPORT_SYMBOL(netdev_state_change
);
1348 * netdev_notify_peers - notify network peers about existence of @dev
1349 * @dev: network device
1351 * Generate traffic such that interested network peers are aware of
1352 * @dev, such as by generating a gratuitous ARP. This may be used when
1353 * a device wants to inform the rest of the network about some sort of
1354 * reconfiguration such as a failover event or virtual machine
1357 void netdev_notify_peers(struct net_device
*dev
)
1360 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, dev
);
1361 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, dev
);
1364 EXPORT_SYMBOL(netdev_notify_peers
);
1366 static int __dev_open(struct net_device
*dev
)
1368 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1373 if (!netif_device_present(dev
))
1376 /* Block netpoll from trying to do any rx path servicing.
1377 * If we don't do this there is a chance ndo_poll_controller
1378 * or ndo_poll may be running while we open the device
1380 netpoll_poll_disable(dev
);
1382 ret
= call_netdevice_notifiers(NETDEV_PRE_UP
, dev
);
1383 ret
= notifier_to_errno(ret
);
1387 set_bit(__LINK_STATE_START
, &dev
->state
);
1389 if (ops
->ndo_validate_addr
)
1390 ret
= ops
->ndo_validate_addr(dev
);
1392 if (!ret
&& ops
->ndo_open
)
1393 ret
= ops
->ndo_open(dev
);
1395 netpoll_poll_enable(dev
);
1398 clear_bit(__LINK_STATE_START
, &dev
->state
);
1400 dev
->flags
|= IFF_UP
;
1401 dev_set_rx_mode(dev
);
1403 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
1410 * dev_open - prepare an interface for use.
1411 * @dev: device to open
1413 * Takes a device from down to up state. The device's private open
1414 * function is invoked and then the multicast lists are loaded. Finally
1415 * the device is moved into the up state and a %NETDEV_UP message is
1416 * sent to the netdev notifier chain.
1418 * Calling this function on an active interface is a nop. On a failure
1419 * a negative errno code is returned.
1421 int dev_open(struct net_device
*dev
)
1425 if (dev
->flags
& IFF_UP
)
1428 ret
= __dev_open(dev
);
1432 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1433 call_netdevice_notifiers(NETDEV_UP
, dev
);
1437 EXPORT_SYMBOL(dev_open
);
1439 static void __dev_close_many(struct list_head
*head
)
1441 struct net_device
*dev
;
1446 list_for_each_entry(dev
, head
, close_list
) {
1447 /* Temporarily disable netpoll until the interface is down */
1448 netpoll_poll_disable(dev
);
1450 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1452 clear_bit(__LINK_STATE_START
, &dev
->state
);
1454 /* Synchronize to scheduled poll. We cannot touch poll list, it
1455 * can be even on different cpu. So just clear netif_running().
1457 * dev->stop() will invoke napi_disable() on all of it's
1458 * napi_struct instances on this device.
1460 smp_mb__after_atomic(); /* Commit netif_running(). */
1463 dev_deactivate_many(head
);
1465 list_for_each_entry(dev
, head
, close_list
) {
1466 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1469 * Call the device specific close. This cannot fail.
1470 * Only if device is UP
1472 * We allow it to be called even after a DETACH hot-plug
1478 dev
->flags
&= ~IFF_UP
;
1479 netpoll_poll_enable(dev
);
1483 static void __dev_close(struct net_device
*dev
)
1487 list_add(&dev
->close_list
, &single
);
1488 __dev_close_many(&single
);
1492 void dev_close_many(struct list_head
*head
, bool unlink
)
1494 struct net_device
*dev
, *tmp
;
1496 /* Remove the devices that don't need to be closed */
1497 list_for_each_entry_safe(dev
, tmp
, head
, close_list
)
1498 if (!(dev
->flags
& IFF_UP
))
1499 list_del_init(&dev
->close_list
);
1501 __dev_close_many(head
);
1503 list_for_each_entry_safe(dev
, tmp
, head
, close_list
) {
1504 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1505 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1507 list_del_init(&dev
->close_list
);
1510 EXPORT_SYMBOL(dev_close_many
);
1513 * dev_close - shutdown an interface.
1514 * @dev: device to shutdown
1516 * This function moves an active device into down state. A
1517 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1518 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1521 void dev_close(struct net_device
*dev
)
1523 if (dev
->flags
& IFF_UP
) {
1526 list_add(&dev
->close_list
, &single
);
1527 dev_close_many(&single
, true);
1531 EXPORT_SYMBOL(dev_close
);
1535 * dev_disable_lro - disable Large Receive Offload on a device
1538 * Disable Large Receive Offload (LRO) on a net device. Must be
1539 * called under RTNL. This is needed if received packets may be
1540 * forwarded to another interface.
1542 void dev_disable_lro(struct net_device
*dev
)
1544 struct net_device
*lower_dev
;
1545 struct list_head
*iter
;
1547 dev
->wanted_features
&= ~NETIF_F_LRO
;
1548 netdev_update_features(dev
);
1550 if (unlikely(dev
->features
& NETIF_F_LRO
))
1551 netdev_WARN(dev
, "failed to disable LRO!\n");
1553 netdev_for_each_lower_dev(dev
, lower_dev
, iter
)
1554 dev_disable_lro(lower_dev
);
1556 EXPORT_SYMBOL(dev_disable_lro
);
1558 static int call_netdevice_notifier(struct notifier_block
*nb
, unsigned long val
,
1559 struct net_device
*dev
)
1561 struct netdev_notifier_info info
= {
1565 return nb
->notifier_call(nb
, val
, &info
);
1568 static int dev_boot_phase
= 1;
1571 * register_netdevice_notifier - register a network notifier block
1574 * Register a notifier to be called when network device events occur.
1575 * The notifier passed is linked into the kernel structures and must
1576 * not be reused until it has been unregistered. A negative errno code
1577 * is returned on a failure.
1579 * When registered all registration and up events are replayed
1580 * to the new notifier to allow device to have a race free
1581 * view of the network device list.
1584 int register_netdevice_notifier(struct notifier_block
*nb
)
1586 struct net_device
*dev
;
1587 struct net_device
*last
;
1592 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1598 for_each_netdev(net
, dev
) {
1599 err
= call_netdevice_notifier(nb
, NETDEV_REGISTER
, dev
);
1600 err
= notifier_to_errno(err
);
1604 if (!(dev
->flags
& IFF_UP
))
1607 call_netdevice_notifier(nb
, NETDEV_UP
, dev
);
1618 for_each_netdev(net
, dev
) {
1622 if (dev
->flags
& IFF_UP
) {
1623 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1625 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1627 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1632 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1635 EXPORT_SYMBOL(register_netdevice_notifier
);
1638 * unregister_netdevice_notifier - unregister a network notifier block
1641 * Unregister a notifier previously registered by
1642 * register_netdevice_notifier(). The notifier is unlinked into the
1643 * kernel structures and may then be reused. A negative errno code
1644 * is returned on a failure.
1646 * After unregistering unregister and down device events are synthesized
1647 * for all devices on the device list to the removed notifier to remove
1648 * the need for special case cleanup code.
1651 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1653 struct net_device
*dev
;
1658 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1663 for_each_netdev(net
, dev
) {
1664 if (dev
->flags
& IFF_UP
) {
1665 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1667 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1669 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1676 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1679 * call_netdevice_notifiers_info - call all network notifier blocks
1680 * @val: value passed unmodified to notifier function
1681 * @dev: net_device pointer passed unmodified to notifier function
1682 * @info: notifier information data
1684 * Call all network notifier blocks. Parameters and return value
1685 * are as for raw_notifier_call_chain().
1688 static int call_netdevice_notifiers_info(unsigned long val
,
1689 struct netdev_notifier_info
*info
)
1692 return raw_notifier_call_chain(&netdev_chain
, val
, info
);
1696 * call_netdevice_notifiers - call all network notifier blocks
1697 * @val: value passed unmodified to notifier function
1698 * @dev: net_device pointer passed unmodified to notifier function
1700 * Call all network notifier blocks. Parameters and return value
1701 * are as for raw_notifier_call_chain().
1704 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
1706 struct netdev_notifier_info info
= {
1710 return call_netdevice_notifiers_info(val
, &info
);
1712 EXPORT_SYMBOL(call_netdevice_notifiers
);
1715 * call_netdevice_notifiers_mtu - call all network notifier blocks
1716 * @val: value passed unmodified to notifier function
1717 * @dev: net_device pointer passed unmodified to notifier function
1718 * @arg: additional u32 argument passed to the notifier function
1720 * Call all network notifier blocks. Parameters and return value
1721 * are as for raw_notifier_call_chain().
1723 static int call_netdevice_notifiers_mtu(unsigned long val
,
1724 struct net_device
*dev
, u32 arg
)
1726 struct netdev_notifier_info_ext info
= {
1731 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext
, info
) != 0);
1733 return call_netdevice_notifiers_info(val
, &info
.info
);
1736 #ifdef CONFIG_NET_INGRESS
1737 static struct static_key ingress_needed __read_mostly
;
1739 void net_inc_ingress_queue(void)
1741 static_key_slow_inc(&ingress_needed
);
1743 EXPORT_SYMBOL_GPL(net_inc_ingress_queue
);
1745 void net_dec_ingress_queue(void)
1747 static_key_slow_dec(&ingress_needed
);
1749 EXPORT_SYMBOL_GPL(net_dec_ingress_queue
);
1752 #ifdef CONFIG_NET_EGRESS
1753 static struct static_key egress_needed __read_mostly
;
1755 void net_inc_egress_queue(void)
1757 static_key_slow_inc(&egress_needed
);
1759 EXPORT_SYMBOL_GPL(net_inc_egress_queue
);
1761 void net_dec_egress_queue(void)
1763 static_key_slow_dec(&egress_needed
);
1765 EXPORT_SYMBOL_GPL(net_dec_egress_queue
);
1768 static struct static_key netstamp_needed __read_mostly
;
1769 #ifdef HAVE_JUMP_LABEL
1770 static atomic_t netstamp_needed_deferred
;
1771 static atomic_t netstamp_wanted
;
1772 static void netstamp_clear(struct work_struct
*work
)
1774 int deferred
= atomic_xchg(&netstamp_needed_deferred
, 0);
1777 wanted
= atomic_add_return(deferred
, &netstamp_wanted
);
1779 static_key_enable(&netstamp_needed
);
1781 static_key_disable(&netstamp_needed
);
1783 static DECLARE_WORK(netstamp_work
, netstamp_clear
);
1786 void net_enable_timestamp(void)
1788 #ifdef HAVE_JUMP_LABEL
1792 wanted
= atomic_read(&netstamp_wanted
);
1795 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
+ 1) == wanted
)
1798 atomic_inc(&netstamp_needed_deferred
);
1799 schedule_work(&netstamp_work
);
1801 static_key_slow_inc(&netstamp_needed
);
1804 EXPORT_SYMBOL(net_enable_timestamp
);
1806 void net_disable_timestamp(void)
1808 #ifdef HAVE_JUMP_LABEL
1812 wanted
= atomic_read(&netstamp_wanted
);
1815 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
- 1) == wanted
)
1818 atomic_dec(&netstamp_needed_deferred
);
1819 schedule_work(&netstamp_work
);
1821 static_key_slow_dec(&netstamp_needed
);
1824 EXPORT_SYMBOL(net_disable_timestamp
);
1826 static inline void net_timestamp_set(struct sk_buff
*skb
)
1829 if (static_key_false(&netstamp_needed
))
1830 __net_timestamp(skb
);
1833 #define net_timestamp_check(COND, SKB) \
1834 if (static_key_false(&netstamp_needed)) { \
1835 if ((COND) && !(SKB)->tstamp) \
1836 __net_timestamp(SKB); \
1839 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
1843 if (!(dev
->flags
& IFF_UP
))
1846 len
= dev
->mtu
+ dev
->hard_header_len
+ VLAN_HLEN
;
1847 if (skb
->len
<= len
)
1850 /* if TSO is enabled, we don't care about the length as the packet
1851 * could be forwarded without being segmented before
1853 if (skb_is_gso(skb
))
1858 EXPORT_SYMBOL_GPL(is_skb_forwardable
);
1860 int __dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1862 int ret
= ____dev_forward_skb(dev
, skb
);
1865 skb
->protocol
= eth_type_trans(skb
, dev
);
1866 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
1871 EXPORT_SYMBOL_GPL(__dev_forward_skb
);
1874 * dev_forward_skb - loopback an skb to another netif
1876 * @dev: destination network device
1877 * @skb: buffer to forward
1880 * NET_RX_SUCCESS (no congestion)
1881 * NET_RX_DROP (packet was dropped, but freed)
1883 * dev_forward_skb can be used for injecting an skb from the
1884 * start_xmit function of one device into the receive queue
1885 * of another device.
1887 * The receiving device may be in another namespace, so
1888 * we have to clear all information in the skb that could
1889 * impact namespace isolation.
1891 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1893 return __dev_forward_skb(dev
, skb
) ?: netif_rx_internal(skb
);
1895 EXPORT_SYMBOL_GPL(dev_forward_skb
);
1897 static inline int deliver_skb(struct sk_buff
*skb
,
1898 struct packet_type
*pt_prev
,
1899 struct net_device
*orig_dev
)
1901 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
1903 refcount_inc(&skb
->users
);
1904 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
1907 static inline void deliver_ptype_list_skb(struct sk_buff
*skb
,
1908 struct packet_type
**pt
,
1909 struct net_device
*orig_dev
,
1911 struct list_head
*ptype_list
)
1913 struct packet_type
*ptype
, *pt_prev
= *pt
;
1915 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1916 if (ptype
->type
!= type
)
1919 deliver_skb(skb
, pt_prev
, orig_dev
);
1925 static inline bool skb_loop_sk(struct packet_type
*ptype
, struct sk_buff
*skb
)
1927 if (!ptype
->af_packet_priv
|| !skb
->sk
)
1930 if (ptype
->id_match
)
1931 return ptype
->id_match(ptype
, skb
->sk
);
1932 else if ((struct sock
*)ptype
->af_packet_priv
== skb
->sk
)
1939 * Support routine. Sends outgoing frames to any network
1940 * taps currently in use.
1943 void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
1945 struct packet_type
*ptype
;
1946 struct sk_buff
*skb2
= NULL
;
1947 struct packet_type
*pt_prev
= NULL
;
1948 struct list_head
*ptype_list
= &ptype_all
;
1952 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1953 /* Never send packets back to the socket
1954 * they originated from - MvS (miquels@drinkel.ow.org)
1956 if (skb_loop_sk(ptype
, skb
))
1960 deliver_skb(skb2
, pt_prev
, skb
->dev
);
1965 /* need to clone skb, done only once */
1966 skb2
= skb_clone(skb
, GFP_ATOMIC
);
1970 net_timestamp_set(skb2
);
1972 /* skb->nh should be correctly
1973 * set by sender, so that the second statement is
1974 * just protection against buggy protocols.
1976 skb_reset_mac_header(skb2
);
1978 if (skb_network_header(skb2
) < skb2
->data
||
1979 skb_network_header(skb2
) > skb_tail_pointer(skb2
)) {
1980 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1981 ntohs(skb2
->protocol
),
1983 skb_reset_network_header(skb2
);
1986 skb2
->transport_header
= skb2
->network_header
;
1987 skb2
->pkt_type
= PACKET_OUTGOING
;
1991 if (ptype_list
== &ptype_all
) {
1992 ptype_list
= &dev
->ptype_all
;
1997 if (!skb_orphan_frags_rx(skb2
, GFP_ATOMIC
))
1998 pt_prev
->func(skb2
, skb
->dev
, pt_prev
, skb
->dev
);
2004 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit
);
2007 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2008 * @dev: Network device
2009 * @txq: number of queues available
2011 * If real_num_tx_queues is changed the tc mappings may no longer be
2012 * valid. To resolve this verify the tc mapping remains valid and if
2013 * not NULL the mapping. With no priorities mapping to this
2014 * offset/count pair it will no longer be used. In the worst case TC0
2015 * is invalid nothing can be done so disable priority mappings. If is
2016 * expected that drivers will fix this mapping if they can before
2017 * calling netif_set_real_num_tx_queues.
2019 static void netif_setup_tc(struct net_device
*dev
, unsigned int txq
)
2022 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2024 /* If TC0 is invalidated disable TC mapping */
2025 if (tc
->offset
+ tc
->count
> txq
) {
2026 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2031 /* Invalidated prio to tc mappings set to TC0 */
2032 for (i
= 1; i
< TC_BITMASK
+ 1; i
++) {
2033 int q
= netdev_get_prio_tc_map(dev
, i
);
2035 tc
= &dev
->tc_to_txq
[q
];
2036 if (tc
->offset
+ tc
->count
> txq
) {
2037 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2039 netdev_set_prio_tc_map(dev
, i
, 0);
2044 int netdev_txq_to_tc(struct net_device
*dev
, unsigned int txq
)
2047 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2050 for (i
= 0; i
< TC_MAX_QUEUE
; i
++, tc
++) {
2051 if ((txq
- tc
->offset
) < tc
->count
)
2060 EXPORT_SYMBOL(netdev_txq_to_tc
);
2063 static DEFINE_MUTEX(xps_map_mutex
);
2064 #define xmap_dereference(P) \
2065 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2067 static bool remove_xps_queue(struct xps_dev_maps
*dev_maps
,
2070 struct xps_map
*map
= NULL
;
2074 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2078 for (pos
= map
->len
; pos
--;) {
2079 if (map
->queues
[pos
] != index
)
2083 map
->queues
[pos
] = map
->queues
[--map
->len
];
2087 RCU_INIT_POINTER(dev_maps
->cpu_map
[tci
], NULL
);
2088 kfree_rcu(map
, rcu
);
2095 static bool remove_xps_queue_cpu(struct net_device
*dev
,
2096 struct xps_dev_maps
*dev_maps
,
2097 int cpu
, u16 offset
, u16 count
)
2099 int num_tc
= dev
->num_tc
? : 1;
2100 bool active
= false;
2103 for (tci
= cpu
* num_tc
; num_tc
--; tci
++) {
2106 for (i
= count
, j
= offset
; i
--; j
++) {
2107 if (!remove_xps_queue(dev_maps
, tci
, j
))
2117 static void netif_reset_xps_queues(struct net_device
*dev
, u16 offset
,
2120 struct xps_dev_maps
*dev_maps
;
2122 bool active
= false;
2124 mutex_lock(&xps_map_mutex
);
2125 dev_maps
= xmap_dereference(dev
->xps_maps
);
2130 for_each_possible_cpu(cpu
)
2131 active
|= remove_xps_queue_cpu(dev
, dev_maps
, cpu
,
2135 RCU_INIT_POINTER(dev
->xps_maps
, NULL
);
2136 kfree_rcu(dev_maps
, rcu
);
2139 for (i
= offset
+ (count
- 1); count
--; i
--)
2140 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, i
),
2144 mutex_unlock(&xps_map_mutex
);
2147 static void netif_reset_xps_queues_gt(struct net_device
*dev
, u16 index
)
2149 netif_reset_xps_queues(dev
, index
, dev
->num_tx_queues
- index
);
2152 static struct xps_map
*expand_xps_map(struct xps_map
*map
,
2155 struct xps_map
*new_map
;
2156 int alloc_len
= XPS_MIN_MAP_ALLOC
;
2159 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
2160 if (map
->queues
[pos
] != index
)
2165 /* Need to add queue to this CPU's existing map */
2167 if (pos
< map
->alloc_len
)
2170 alloc_len
= map
->alloc_len
* 2;
2173 /* Need to allocate new map to store queue on this CPU's map */
2174 new_map
= kzalloc_node(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
,
2179 for (i
= 0; i
< pos
; i
++)
2180 new_map
->queues
[i
] = map
->queues
[i
];
2181 new_map
->alloc_len
= alloc_len
;
2187 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
2190 struct xps_dev_maps
*dev_maps
, *new_dev_maps
= NULL
;
2191 int i
, cpu
, tci
, numa_node_id
= -2;
2192 int maps_sz
, num_tc
= 1, tc
= 0;
2193 struct xps_map
*map
, *new_map
;
2194 bool active
= false;
2197 num_tc
= dev
->num_tc
;
2198 tc
= netdev_txq_to_tc(dev
, index
);
2203 maps_sz
= XPS_DEV_MAPS_SIZE(num_tc
);
2204 if (maps_sz
< L1_CACHE_BYTES
)
2205 maps_sz
= L1_CACHE_BYTES
;
2207 mutex_lock(&xps_map_mutex
);
2209 dev_maps
= xmap_dereference(dev
->xps_maps
);
2211 /* allocate memory for queue storage */
2212 for_each_cpu_and(cpu
, cpu_online_mask
, mask
) {
2214 new_dev_maps
= kzalloc(maps_sz
, GFP_KERNEL
);
2215 if (!new_dev_maps
) {
2216 mutex_unlock(&xps_map_mutex
);
2220 tci
= cpu
* num_tc
+ tc
;
2221 map
= dev_maps
? xmap_dereference(dev_maps
->cpu_map
[tci
]) :
2224 map
= expand_xps_map(map
, cpu
, index
);
2228 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2232 goto out_no_new_maps
;
2234 for_each_possible_cpu(cpu
) {
2235 /* copy maps belonging to foreign traffic classes */
2236 for (i
= tc
, tci
= cpu
* num_tc
; dev_maps
&& i
--; tci
++) {
2237 /* fill in the new device map from the old device map */
2238 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2239 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2242 /* We need to explicitly update tci as prevous loop
2243 * could break out early if dev_maps is NULL.
2245 tci
= cpu
* num_tc
+ tc
;
2247 if (cpumask_test_cpu(cpu
, mask
) && cpu_online(cpu
)) {
2248 /* add queue to CPU maps */
2251 map
= xmap_dereference(new_dev_maps
->cpu_map
[tci
]);
2252 while ((pos
< map
->len
) && (map
->queues
[pos
] != index
))
2255 if (pos
== map
->len
)
2256 map
->queues
[map
->len
++] = index
;
2258 if (numa_node_id
== -2)
2259 numa_node_id
= cpu_to_node(cpu
);
2260 else if (numa_node_id
!= cpu_to_node(cpu
))
2263 } else if (dev_maps
) {
2264 /* fill in the new device map from the old device map */
2265 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2266 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2269 /* copy maps belonging to foreign traffic classes */
2270 for (i
= num_tc
- tc
, tci
++; dev_maps
&& --i
; tci
++) {
2271 /* fill in the new device map from the old device map */
2272 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2273 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2277 rcu_assign_pointer(dev
->xps_maps
, new_dev_maps
);
2279 /* Cleanup old maps */
2281 goto out_no_old_maps
;
2283 for_each_possible_cpu(cpu
) {
2284 for (i
= num_tc
, tci
= cpu
* num_tc
; i
--; tci
++) {
2285 new_map
= xmap_dereference(new_dev_maps
->cpu_map
[tci
]);
2286 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2287 if (map
&& map
!= new_map
)
2288 kfree_rcu(map
, rcu
);
2292 kfree_rcu(dev_maps
, rcu
);
2295 dev_maps
= new_dev_maps
;
2299 /* update Tx queue numa node */
2300 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, index
),
2301 (numa_node_id
>= 0) ? numa_node_id
:
2307 /* removes queue from unused CPUs */
2308 for_each_possible_cpu(cpu
) {
2309 for (i
= tc
, tci
= cpu
* num_tc
; i
--; tci
++)
2310 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2311 if (!cpumask_test_cpu(cpu
, mask
) || !cpu_online(cpu
))
2312 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2313 for (i
= num_tc
- tc
, tci
++; --i
; tci
++)
2314 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2317 /* free map if not active */
2319 RCU_INIT_POINTER(dev
->xps_maps
, NULL
);
2320 kfree_rcu(dev_maps
, rcu
);
2324 mutex_unlock(&xps_map_mutex
);
2328 /* remove any maps that we added */
2329 for_each_possible_cpu(cpu
) {
2330 for (i
= num_tc
, tci
= cpu
* num_tc
; i
--; tci
++) {
2331 new_map
= xmap_dereference(new_dev_maps
->cpu_map
[tci
]);
2333 xmap_dereference(dev_maps
->cpu_map
[tci
]) :
2335 if (new_map
&& new_map
!= map
)
2340 mutex_unlock(&xps_map_mutex
);
2342 kfree(new_dev_maps
);
2345 EXPORT_SYMBOL(netif_set_xps_queue
);
2348 void netdev_reset_tc(struct net_device
*dev
)
2351 netif_reset_xps_queues_gt(dev
, 0);
2354 memset(dev
->tc_to_txq
, 0, sizeof(dev
->tc_to_txq
));
2355 memset(dev
->prio_tc_map
, 0, sizeof(dev
->prio_tc_map
));
2357 EXPORT_SYMBOL(netdev_reset_tc
);
2359 int netdev_set_tc_queue(struct net_device
*dev
, u8 tc
, u16 count
, u16 offset
)
2361 if (tc
>= dev
->num_tc
)
2365 netif_reset_xps_queues(dev
, offset
, count
);
2367 dev
->tc_to_txq
[tc
].count
= count
;
2368 dev
->tc_to_txq
[tc
].offset
= offset
;
2371 EXPORT_SYMBOL(netdev_set_tc_queue
);
2373 int netdev_set_num_tc(struct net_device
*dev
, u8 num_tc
)
2375 if (num_tc
> TC_MAX_QUEUE
)
2379 netif_reset_xps_queues_gt(dev
, 0);
2381 dev
->num_tc
= num_tc
;
2384 EXPORT_SYMBOL(netdev_set_num_tc
);
2387 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2388 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2390 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
)
2395 disabling
= txq
< dev
->real_num_tx_queues
;
2397 if (txq
< 1 || txq
> dev
->num_tx_queues
)
2400 if (dev
->reg_state
== NETREG_REGISTERED
||
2401 dev
->reg_state
== NETREG_UNREGISTERING
) {
2404 rc
= netdev_queue_update_kobjects(dev
, dev
->real_num_tx_queues
,
2410 netif_setup_tc(dev
, txq
);
2412 dev
->real_num_tx_queues
= txq
;
2416 qdisc_reset_all_tx_gt(dev
, txq
);
2418 netif_reset_xps_queues_gt(dev
, txq
);
2422 dev
->real_num_tx_queues
= txq
;
2427 EXPORT_SYMBOL(netif_set_real_num_tx_queues
);
2431 * netif_set_real_num_rx_queues - set actual number of RX queues used
2432 * @dev: Network device
2433 * @rxq: Actual number of RX queues
2435 * This must be called either with the rtnl_lock held or before
2436 * registration of the net device. Returns 0 on success, or a
2437 * negative error code. If called before registration, it always
2440 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
)
2444 if (rxq
< 1 || rxq
> dev
->num_rx_queues
)
2447 if (dev
->reg_state
== NETREG_REGISTERED
) {
2450 rc
= net_rx_queue_update_kobjects(dev
, dev
->real_num_rx_queues
,
2456 dev
->real_num_rx_queues
= rxq
;
2459 EXPORT_SYMBOL(netif_set_real_num_rx_queues
);
2463 * netif_get_num_default_rss_queues - default number of RSS queues
2465 * This routine should set an upper limit on the number of RSS queues
2466 * used by default by multiqueue devices.
2468 int netif_get_num_default_rss_queues(void)
2470 return is_kdump_kernel() ?
2471 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES
, num_online_cpus());
2473 EXPORT_SYMBOL(netif_get_num_default_rss_queues
);
2475 static void __netif_reschedule(struct Qdisc
*q
)
2477 struct softnet_data
*sd
;
2478 unsigned long flags
;
2480 local_irq_save(flags
);
2481 sd
= this_cpu_ptr(&softnet_data
);
2482 q
->next_sched
= NULL
;
2483 *sd
->output_queue_tailp
= q
;
2484 sd
->output_queue_tailp
= &q
->next_sched
;
2485 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2486 local_irq_restore(flags
);
2489 void __netif_schedule(struct Qdisc
*q
)
2491 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
2492 __netif_reschedule(q
);
2494 EXPORT_SYMBOL(__netif_schedule
);
2496 struct dev_kfree_skb_cb
{
2497 enum skb_free_reason reason
;
2500 static struct dev_kfree_skb_cb
*get_kfree_skb_cb(const struct sk_buff
*skb
)
2502 return (struct dev_kfree_skb_cb
*)skb
->cb
;
2505 void netif_schedule_queue(struct netdev_queue
*txq
)
2508 if (!(txq
->state
& QUEUE_STATE_ANY_XOFF
)) {
2509 struct Qdisc
*q
= rcu_dereference(txq
->qdisc
);
2511 __netif_schedule(q
);
2515 EXPORT_SYMBOL(netif_schedule_queue
);
2517 void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
2519 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
)) {
2523 q
= rcu_dereference(dev_queue
->qdisc
);
2524 __netif_schedule(q
);
2528 EXPORT_SYMBOL(netif_tx_wake_queue
);
2530 void __dev_kfree_skb_irq(struct sk_buff
*skb
, enum skb_free_reason reason
)
2532 unsigned long flags
;
2537 if (likely(refcount_read(&skb
->users
) == 1)) {
2539 refcount_set(&skb
->users
, 0);
2540 } else if (likely(!refcount_dec_and_test(&skb
->users
))) {
2543 get_kfree_skb_cb(skb
)->reason
= reason
;
2544 local_irq_save(flags
);
2545 skb
->next
= __this_cpu_read(softnet_data
.completion_queue
);
2546 __this_cpu_write(softnet_data
.completion_queue
, skb
);
2547 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2548 local_irq_restore(flags
);
2550 EXPORT_SYMBOL(__dev_kfree_skb_irq
);
2552 void __dev_kfree_skb_any(struct sk_buff
*skb
, enum skb_free_reason reason
)
2554 if (in_irq() || irqs_disabled())
2555 __dev_kfree_skb_irq(skb
, reason
);
2559 EXPORT_SYMBOL(__dev_kfree_skb_any
);
2563 * netif_device_detach - mark device as removed
2564 * @dev: network device
2566 * Mark device as removed from system and therefore no longer available.
2568 void netif_device_detach(struct net_device
*dev
)
2570 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2571 netif_running(dev
)) {
2572 netif_tx_stop_all_queues(dev
);
2575 EXPORT_SYMBOL(netif_device_detach
);
2578 * netif_device_attach - mark device as attached
2579 * @dev: network device
2581 * Mark device as attached from system and restart if needed.
2583 void netif_device_attach(struct net_device
*dev
)
2585 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2586 netif_running(dev
)) {
2587 netif_tx_wake_all_queues(dev
);
2588 __netdev_watchdog_up(dev
);
2591 EXPORT_SYMBOL(netif_device_attach
);
2594 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2595 * to be used as a distribution range.
2597 u16
__skb_tx_hash(const struct net_device
*dev
, struct sk_buff
*skb
,
2598 unsigned int num_tx_queues
)
2602 u16 qcount
= num_tx_queues
;
2604 if (skb_rx_queue_recorded(skb
)) {
2605 hash
= skb_get_rx_queue(skb
);
2606 while (unlikely(hash
>= num_tx_queues
))
2607 hash
-= num_tx_queues
;
2612 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
2614 qoffset
= dev
->tc_to_txq
[tc
].offset
;
2615 qcount
= dev
->tc_to_txq
[tc
].count
;
2618 return (u16
) reciprocal_scale(skb_get_hash(skb
), qcount
) + qoffset
;
2620 EXPORT_SYMBOL(__skb_tx_hash
);
2622 static void skb_warn_bad_offload(const struct sk_buff
*skb
)
2624 static const netdev_features_t null_features
;
2625 struct net_device
*dev
= skb
->dev
;
2626 const char *name
= "";
2628 if (!net_ratelimit())
2632 if (dev
->dev
.parent
)
2633 name
= dev_driver_string(dev
->dev
.parent
);
2635 name
= netdev_name(dev
);
2637 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2638 "gso_type=%d ip_summed=%d\n",
2639 name
, dev
? &dev
->features
: &null_features
,
2640 skb
->sk
? &skb
->sk
->sk_route_caps
: &null_features
,
2641 skb
->len
, skb
->data_len
, skb_shinfo(skb
)->gso_size
,
2642 skb_shinfo(skb
)->gso_type
, skb
->ip_summed
);
2646 * Invalidate hardware checksum when packet is to be mangled, and
2647 * complete checksum manually on outgoing path.
2649 int skb_checksum_help(struct sk_buff
*skb
)
2652 int ret
= 0, offset
;
2654 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
2655 goto out_set_summed
;
2657 if (unlikely(skb_shinfo(skb
)->gso_size
)) {
2658 skb_warn_bad_offload(skb
);
2662 /* Before computing a checksum, we should make sure no frag could
2663 * be modified by an external entity : checksum could be wrong.
2665 if (skb_has_shared_frag(skb
)) {
2666 ret
= __skb_linearize(skb
);
2671 offset
= skb_checksum_start_offset(skb
);
2672 BUG_ON(offset
>= skb_headlen(skb
));
2673 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
2675 offset
+= skb
->csum_offset
;
2676 BUG_ON(offset
+ sizeof(__sum16
) > skb_headlen(skb
));
2678 if (skb_cloned(skb
) &&
2679 !skb_clone_writable(skb
, offset
+ sizeof(__sum16
))) {
2680 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2685 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
) ?: CSUM_MANGLED_0
;
2687 skb
->ip_summed
= CHECKSUM_NONE
;
2691 EXPORT_SYMBOL(skb_checksum_help
);
2693 int skb_crc32c_csum_help(struct sk_buff
*skb
)
2696 int ret
= 0, offset
, start
;
2698 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2701 if (unlikely(skb_is_gso(skb
)))
2704 /* Before computing a checksum, we should make sure no frag could
2705 * be modified by an external entity : checksum could be wrong.
2707 if (unlikely(skb_has_shared_frag(skb
))) {
2708 ret
= __skb_linearize(skb
);
2712 start
= skb_checksum_start_offset(skb
);
2713 offset
= start
+ offsetof(struct sctphdr
, checksum
);
2714 if (WARN_ON_ONCE(offset
>= skb_headlen(skb
))) {
2718 if (skb_cloned(skb
) &&
2719 !skb_clone_writable(skb
, offset
+ sizeof(__le32
))) {
2720 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2724 crc32c_csum
= cpu_to_le32(~__skb_checksum(skb
, start
,
2725 skb
->len
- start
, ~(__u32
)0,
2727 *(__le32
*)(skb
->data
+ offset
) = crc32c_csum
;
2728 skb
->ip_summed
= CHECKSUM_NONE
;
2729 skb
->csum_not_inet
= 0;
2734 __be16
skb_network_protocol(struct sk_buff
*skb
, int *depth
)
2736 __be16 type
= skb
->protocol
;
2738 /* Tunnel gso handlers can set protocol to ethernet. */
2739 if (type
== htons(ETH_P_TEB
)) {
2742 if (unlikely(!pskb_may_pull(skb
, sizeof(struct ethhdr
))))
2745 eth
= (struct ethhdr
*)skb
->data
;
2746 type
= eth
->h_proto
;
2749 return __vlan_get_protocol(skb
, type
, depth
);
2753 * skb_mac_gso_segment - mac layer segmentation handler.
2754 * @skb: buffer to segment
2755 * @features: features for the output path (see dev->features)
2757 struct sk_buff
*skb_mac_gso_segment(struct sk_buff
*skb
,
2758 netdev_features_t features
)
2760 struct sk_buff
*segs
= ERR_PTR(-EPROTONOSUPPORT
);
2761 struct packet_offload
*ptype
;
2762 int vlan_depth
= skb
->mac_len
;
2763 __be16 type
= skb_network_protocol(skb
, &vlan_depth
);
2765 if (unlikely(!type
))
2766 return ERR_PTR(-EINVAL
);
2768 __skb_pull(skb
, vlan_depth
);
2771 list_for_each_entry_rcu(ptype
, &offload_base
, list
) {
2772 if (ptype
->type
== type
&& ptype
->callbacks
.gso_segment
) {
2773 segs
= ptype
->callbacks
.gso_segment(skb
, features
);
2779 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
2783 EXPORT_SYMBOL(skb_mac_gso_segment
);
2786 /* openvswitch calls this on rx path, so we need a different check.
2788 static inline bool skb_needs_check(struct sk_buff
*skb
, bool tx_path
)
2791 return skb
->ip_summed
!= CHECKSUM_PARTIAL
&&
2792 skb
->ip_summed
!= CHECKSUM_UNNECESSARY
;
2794 return skb
->ip_summed
== CHECKSUM_NONE
;
2798 * __skb_gso_segment - Perform segmentation on skb.
2799 * @skb: buffer to segment
2800 * @features: features for the output path (see dev->features)
2801 * @tx_path: whether it is called in TX path
2803 * This function segments the given skb and returns a list of segments.
2805 * It may return NULL if the skb requires no segmentation. This is
2806 * only possible when GSO is used for verifying header integrity.
2808 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
2810 struct sk_buff
*__skb_gso_segment(struct sk_buff
*skb
,
2811 netdev_features_t features
, bool tx_path
)
2813 struct sk_buff
*segs
;
2815 if (unlikely(skb_needs_check(skb
, tx_path
))) {
2818 /* We're going to init ->check field in TCP or UDP header */
2819 err
= skb_cow_head(skb
, 0);
2821 return ERR_PTR(err
);
2824 /* Only report GSO partial support if it will enable us to
2825 * support segmentation on this frame without needing additional
2828 if (features
& NETIF_F_GSO_PARTIAL
) {
2829 netdev_features_t partial_features
= NETIF_F_GSO_ROBUST
;
2830 struct net_device
*dev
= skb
->dev
;
2832 partial_features
|= dev
->features
& dev
->gso_partial_features
;
2833 if (!skb_gso_ok(skb
, features
| partial_features
))
2834 features
&= ~NETIF_F_GSO_PARTIAL
;
2837 BUILD_BUG_ON(SKB_SGO_CB_OFFSET
+
2838 sizeof(*SKB_GSO_CB(skb
)) > sizeof(skb
->cb
));
2840 SKB_GSO_CB(skb
)->mac_offset
= skb_headroom(skb
);
2841 SKB_GSO_CB(skb
)->encap_level
= 0;
2843 skb_reset_mac_header(skb
);
2844 skb_reset_mac_len(skb
);
2846 segs
= skb_mac_gso_segment(skb
, features
);
2848 if (unlikely(skb_needs_check(skb
, tx_path
) && !IS_ERR(segs
)))
2849 skb_warn_bad_offload(skb
);
2853 EXPORT_SYMBOL(__skb_gso_segment
);
2855 /* Take action when hardware reception checksum errors are detected. */
2857 void netdev_rx_csum_fault(struct net_device
*dev
)
2859 if (net_ratelimit()) {
2860 pr_err("%s: hw csum failure\n", dev
? dev
->name
: "<unknown>");
2864 EXPORT_SYMBOL(netdev_rx_csum_fault
);
2867 /* Actually, we should eliminate this check as soon as we know, that:
2868 * 1. IOMMU is present and allows to map all the memory.
2869 * 2. No high memory really exists on this machine.
2872 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
2874 #ifdef CONFIG_HIGHMEM
2877 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
2878 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2879 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2881 if (PageHighMem(skb_frag_page(frag
)))
2886 if (PCI_DMA_BUS_IS_PHYS
) {
2887 struct device
*pdev
= dev
->dev
.parent
;
2891 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2892 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2893 dma_addr_t addr
= page_to_phys(skb_frag_page(frag
));
2895 if (!pdev
->dma_mask
|| addr
+ PAGE_SIZE
- 1 > *pdev
->dma_mask
)
2903 /* If MPLS offload request, verify we are testing hardware MPLS features
2904 * instead of standard features for the netdev.
2906 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2907 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
2908 netdev_features_t features
,
2911 if (eth_p_mpls(type
))
2912 features
&= skb
->dev
->mpls_features
;
2917 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
2918 netdev_features_t features
,
2925 static netdev_features_t
harmonize_features(struct sk_buff
*skb
,
2926 netdev_features_t features
)
2931 type
= skb_network_protocol(skb
, &tmp
);
2932 features
= net_mpls_features(skb
, features
, type
);
2934 if (skb
->ip_summed
!= CHECKSUM_NONE
&&
2935 !can_checksum_protocol(features
, type
)) {
2936 features
&= ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
2938 if (illegal_highdma(skb
->dev
, skb
))
2939 features
&= ~NETIF_F_SG
;
2944 netdev_features_t
passthru_features_check(struct sk_buff
*skb
,
2945 struct net_device
*dev
,
2946 netdev_features_t features
)
2950 EXPORT_SYMBOL(passthru_features_check
);
2952 static netdev_features_t
dflt_features_check(struct sk_buff
*skb
,
2953 struct net_device
*dev
,
2954 netdev_features_t features
)
2956 return vlan_features_check(skb
, features
);
2959 static netdev_features_t
gso_features_check(const struct sk_buff
*skb
,
2960 struct net_device
*dev
,
2961 netdev_features_t features
)
2963 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
;
2965 if (gso_segs
> dev
->gso_max_segs
)
2966 return features
& ~NETIF_F_GSO_MASK
;
2968 /* Support for GSO partial features requires software
2969 * intervention before we can actually process the packets
2970 * so we need to strip support for any partial features now
2971 * and we can pull them back in after we have partially
2972 * segmented the frame.
2974 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
))
2975 features
&= ~dev
->gso_partial_features
;
2977 /* Make sure to clear the IPv4 ID mangling feature if the
2978 * IPv4 header has the potential to be fragmented.
2980 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
2981 struct iphdr
*iph
= skb
->encapsulation
?
2982 inner_ip_hdr(skb
) : ip_hdr(skb
);
2984 if (!(iph
->frag_off
& htons(IP_DF
)))
2985 features
&= ~NETIF_F_TSO_MANGLEID
;
2991 netdev_features_t
netif_skb_features(struct sk_buff
*skb
)
2993 struct net_device
*dev
= skb
->dev
;
2994 netdev_features_t features
= dev
->features
;
2996 if (skb_is_gso(skb
))
2997 features
= gso_features_check(skb
, dev
, features
);
2999 /* If encapsulation offload request, verify we are testing
3000 * hardware encapsulation features instead of standard
3001 * features for the netdev
3003 if (skb
->encapsulation
)
3004 features
&= dev
->hw_enc_features
;
3006 if (skb_vlan_tagged(skb
))
3007 features
= netdev_intersect_features(features
,
3008 dev
->vlan_features
|
3009 NETIF_F_HW_VLAN_CTAG_TX
|
3010 NETIF_F_HW_VLAN_STAG_TX
);
3012 if (dev
->netdev_ops
->ndo_features_check
)
3013 features
&= dev
->netdev_ops
->ndo_features_check(skb
, dev
,
3016 features
&= dflt_features_check(skb
, dev
, features
);
3018 return harmonize_features(skb
, features
);
3020 EXPORT_SYMBOL(netif_skb_features
);
3022 static int xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
3023 struct netdev_queue
*txq
, bool more
)
3028 if (!list_empty(&ptype_all
) || !list_empty(&dev
->ptype_all
))
3029 dev_queue_xmit_nit(skb
, dev
);
3032 trace_net_dev_start_xmit(skb
, dev
);
3033 rc
= netdev_start_xmit(skb
, dev
, txq
, more
);
3034 trace_net_dev_xmit(skb
, rc
, dev
, len
);
3039 struct sk_buff
*dev_hard_start_xmit(struct sk_buff
*first
, struct net_device
*dev
,
3040 struct netdev_queue
*txq
, int *ret
)
3042 struct sk_buff
*skb
= first
;
3043 int rc
= NETDEV_TX_OK
;
3046 struct sk_buff
*next
= skb
->next
;
3049 rc
= xmit_one(skb
, dev
, txq
, next
!= NULL
);
3050 if (unlikely(!dev_xmit_complete(rc
))) {
3056 if (netif_tx_queue_stopped(txq
) && skb
) {
3057 rc
= NETDEV_TX_BUSY
;
3067 static struct sk_buff
*validate_xmit_vlan(struct sk_buff
*skb
,
3068 netdev_features_t features
)
3070 if (skb_vlan_tag_present(skb
) &&
3071 !vlan_hw_offload_capable(features
, skb
->vlan_proto
))
3072 skb
= __vlan_hwaccel_push_inside(skb
);
3076 int skb_csum_hwoffload_help(struct sk_buff
*skb
,
3077 const netdev_features_t features
)
3079 if (unlikely(skb
->csum_not_inet
))
3080 return !!(features
& NETIF_F_SCTP_CRC
) ? 0 :
3081 skb_crc32c_csum_help(skb
);
3083 return !!(features
& NETIF_F_CSUM_MASK
) ? 0 : skb_checksum_help(skb
);
3085 EXPORT_SYMBOL(skb_csum_hwoffload_help
);
3087 static struct sk_buff
*validate_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
)
3089 netdev_features_t features
;
3091 features
= netif_skb_features(skb
);
3092 skb
= validate_xmit_vlan(skb
, features
);
3096 if (netif_needs_gso(skb
, features
)) {
3097 struct sk_buff
*segs
;
3099 segs
= skb_gso_segment(skb
, features
);
3107 if (skb_needs_linearize(skb
, features
) &&
3108 __skb_linearize(skb
))
3111 if (validate_xmit_xfrm(skb
, features
))
3114 /* If packet is not checksummed and device does not
3115 * support checksumming for this protocol, complete
3116 * checksumming here.
3118 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3119 if (skb
->encapsulation
)
3120 skb_set_inner_transport_header(skb
,
3121 skb_checksum_start_offset(skb
));
3123 skb_set_transport_header(skb
,
3124 skb_checksum_start_offset(skb
));
3125 if (skb_csum_hwoffload_help(skb
, features
))
3135 atomic_long_inc(&dev
->tx_dropped
);
3139 struct sk_buff
*validate_xmit_skb_list(struct sk_buff
*skb
, struct net_device
*dev
)
3141 struct sk_buff
*next
, *head
= NULL
, *tail
;
3143 for (; skb
!= NULL
; skb
= next
) {
3147 /* in case skb wont be segmented, point to itself */
3150 skb
= validate_xmit_skb(skb
, dev
);
3158 /* If skb was segmented, skb->prev points to
3159 * the last segment. If not, it still contains skb.
3165 EXPORT_SYMBOL_GPL(validate_xmit_skb_list
);
3167 static void qdisc_pkt_len_init(struct sk_buff
*skb
)
3169 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
3171 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
3173 /* To get more precise estimation of bytes sent on wire,
3174 * we add to pkt_len the headers size of all segments
3176 if (shinfo
->gso_size
) {
3177 unsigned int hdr_len
;
3178 u16 gso_segs
= shinfo
->gso_segs
;
3180 /* mac layer + network layer */
3181 hdr_len
= skb_transport_header(skb
) - skb_mac_header(skb
);
3183 /* + transport layer */
3184 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))) {
3185 const struct tcphdr
*th
;
3186 struct tcphdr _tcphdr
;
3188 th
= skb_header_pointer(skb
, skb_transport_offset(skb
),
3189 sizeof(_tcphdr
), &_tcphdr
);
3191 hdr_len
+= __tcp_hdrlen(th
);
3193 struct udphdr _udphdr
;
3195 if (skb_header_pointer(skb
, skb_transport_offset(skb
),
3196 sizeof(_udphdr
), &_udphdr
))
3197 hdr_len
+= sizeof(struct udphdr
);
3200 if (shinfo
->gso_type
& SKB_GSO_DODGY
)
3201 gso_segs
= DIV_ROUND_UP(skb
->len
- hdr_len
,
3204 qdisc_skb_cb(skb
)->pkt_len
+= (gso_segs
- 1) * hdr_len
;
3208 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
3209 struct net_device
*dev
,
3210 struct netdev_queue
*txq
)
3212 spinlock_t
*root_lock
= qdisc_lock(q
);
3213 struct sk_buff
*to_free
= NULL
;
3217 qdisc_calculate_pkt_len(skb
, q
);
3219 * Heuristic to force contended enqueues to serialize on a
3220 * separate lock before trying to get qdisc main lock.
3221 * This permits qdisc->running owner to get the lock more
3222 * often and dequeue packets faster.
3224 contended
= qdisc_is_running(q
);
3225 if (unlikely(contended
))
3226 spin_lock(&q
->busylock
);
3228 spin_lock(root_lock
);
3229 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
3230 __qdisc_drop(skb
, &to_free
);
3232 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
3233 qdisc_run_begin(q
)) {
3235 * This is a work-conserving queue; there are no old skbs
3236 * waiting to be sent out; and the qdisc is not running -
3237 * xmit the skb directly.
3240 qdisc_bstats_update(q
, skb
);
3242 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, true)) {
3243 if (unlikely(contended
)) {
3244 spin_unlock(&q
->busylock
);
3251 rc
= NET_XMIT_SUCCESS
;
3253 rc
= q
->enqueue(skb
, q
, &to_free
) & NET_XMIT_MASK
;
3254 if (qdisc_run_begin(q
)) {
3255 if (unlikely(contended
)) {
3256 spin_unlock(&q
->busylock
);
3262 spin_unlock(root_lock
);
3263 if (unlikely(to_free
))
3264 kfree_skb_list(to_free
);
3265 if (unlikely(contended
))
3266 spin_unlock(&q
->busylock
);
3270 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3271 static void skb_update_prio(struct sk_buff
*skb
)
3273 const struct netprio_map
*map
;
3274 const struct sock
*sk
;
3275 unsigned int prioidx
;
3279 map
= rcu_dereference_bh(skb
->dev
->priomap
);
3282 sk
= skb_to_full_sk(skb
);
3286 prioidx
= sock_cgroup_prioidx(&sk
->sk_cgrp_data
);
3288 if (prioidx
< map
->priomap_len
)
3289 skb
->priority
= map
->priomap
[prioidx
];
3292 #define skb_update_prio(skb)
3295 DEFINE_PER_CPU(int, xmit_recursion
);
3296 EXPORT_SYMBOL(xmit_recursion
);
3299 * dev_loopback_xmit - loop back @skb
3300 * @net: network namespace this loopback is happening in
3301 * @sk: sk needed to be a netfilter okfn
3302 * @skb: buffer to transmit
3304 int dev_loopback_xmit(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
3306 skb_reset_mac_header(skb
);
3307 __skb_pull(skb
, skb_network_offset(skb
));
3308 skb
->pkt_type
= PACKET_LOOPBACK
;
3309 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3310 WARN_ON(!skb_dst(skb
));
3315 EXPORT_SYMBOL(dev_loopback_xmit
);
3317 #ifdef CONFIG_NET_EGRESS
3318 static struct sk_buff
*
3319 sch_handle_egress(struct sk_buff
*skb
, int *ret
, struct net_device
*dev
)
3321 struct mini_Qdisc
*miniq
= rcu_dereference_bh(dev
->miniq_egress
);
3322 struct tcf_result cl_res
;
3327 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3328 mini_qdisc_bstats_cpu_update(miniq
, skb
);
3330 switch (tcf_classify(skb
, miniq
->filter_list
, &cl_res
, false)) {
3332 case TC_ACT_RECLASSIFY
:
3333 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
3336 mini_qdisc_qstats_cpu_drop(miniq
);
3337 *ret
= NET_XMIT_DROP
;
3343 *ret
= NET_XMIT_SUCCESS
;
3346 case TC_ACT_REDIRECT
:
3347 /* No need to push/pop skb's mac_header here on egress! */
3348 skb_do_redirect(skb
);
3349 *ret
= NET_XMIT_SUCCESS
;
3357 #endif /* CONFIG_NET_EGRESS */
3359 static inline int get_xps_queue(struct net_device
*dev
, struct sk_buff
*skb
)
3362 struct xps_dev_maps
*dev_maps
;
3363 struct xps_map
*map
;
3364 int queue_index
= -1;
3367 dev_maps
= rcu_dereference(dev
->xps_maps
);
3369 unsigned int tci
= skb
->sender_cpu
- 1;
3373 tci
+= netdev_get_prio_tc_map(dev
, skb
->priority
);
3376 map
= rcu_dereference(dev_maps
->cpu_map
[tci
]);
3379 queue_index
= map
->queues
[0];
3381 queue_index
= map
->queues
[reciprocal_scale(skb_get_hash(skb
),
3383 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
3395 static u16
__netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
)
3397 struct sock
*sk
= skb
->sk
;
3398 int queue_index
= sk_tx_queue_get(sk
);
3400 if (queue_index
< 0 || skb
->ooo_okay
||
3401 queue_index
>= dev
->real_num_tx_queues
) {
3402 int new_index
= get_xps_queue(dev
, skb
);
3405 new_index
= skb_tx_hash(dev
, skb
);
3407 if (queue_index
!= new_index
&& sk
&&
3409 rcu_access_pointer(sk
->sk_dst_cache
))
3410 sk_tx_queue_set(sk
, new_index
);
3412 queue_index
= new_index
;
3418 struct netdev_queue
*netdev_pick_tx(struct net_device
*dev
,
3419 struct sk_buff
*skb
,
3422 int queue_index
= 0;
3425 u32 sender_cpu
= skb
->sender_cpu
- 1;
3427 if (sender_cpu
>= (u32
)NR_CPUS
)
3428 skb
->sender_cpu
= raw_smp_processor_id() + 1;
3431 if (dev
->real_num_tx_queues
!= 1) {
3432 const struct net_device_ops
*ops
= dev
->netdev_ops
;
3434 if (ops
->ndo_select_queue
)
3435 queue_index
= ops
->ndo_select_queue(dev
, skb
, accel_priv
,
3438 queue_index
= __netdev_pick_tx(dev
, skb
);
3441 queue_index
= netdev_cap_txqueue(dev
, queue_index
);
3444 skb_set_queue_mapping(skb
, queue_index
);
3445 return netdev_get_tx_queue(dev
, queue_index
);
3449 * __dev_queue_xmit - transmit a buffer
3450 * @skb: buffer to transmit
3451 * @accel_priv: private data used for L2 forwarding offload
3453 * Queue a buffer for transmission to a network device. The caller must
3454 * have set the device and priority and built the buffer before calling
3455 * this function. The function can be called from an interrupt.
3457 * A negative errno code is returned on a failure. A success does not
3458 * guarantee the frame will be transmitted as it may be dropped due
3459 * to congestion or traffic shaping.
3461 * -----------------------------------------------------------------------------------
3462 * I notice this method can also return errors from the queue disciplines,
3463 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3466 * Regardless of the return value, the skb is consumed, so it is currently
3467 * difficult to retry a send to this method. (You can bump the ref count
3468 * before sending to hold a reference for retry if you are careful.)
3470 * When calling this method, interrupts MUST be enabled. This is because
3471 * the BH enable code must have IRQs enabled so that it will not deadlock.
3474 static int __dev_queue_xmit(struct sk_buff
*skb
, void *accel_priv
)
3476 struct net_device
*dev
= skb
->dev
;
3477 struct netdev_queue
*txq
;
3481 skb_reset_mac_header(skb
);
3483 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_SCHED_TSTAMP
))
3484 __skb_tstamp_tx(skb
, NULL
, skb
->sk
, SCM_TSTAMP_SCHED
);
3486 /* Disable soft irqs for various locks below. Also
3487 * stops preemption for RCU.
3491 skb_update_prio(skb
);
3493 qdisc_pkt_len_init(skb
);
3494 #ifdef CONFIG_NET_CLS_ACT
3495 skb
->tc_at_ingress
= 0;
3496 # ifdef CONFIG_NET_EGRESS
3497 if (static_key_false(&egress_needed
)) {
3498 skb
= sch_handle_egress(skb
, &rc
, dev
);
3504 /* If device/qdisc don't need skb->dst, release it right now while
3505 * its hot in this cpu cache.
3507 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
3512 txq
= netdev_pick_tx(dev
, skb
, accel_priv
);
3513 q
= rcu_dereference_bh(txq
->qdisc
);
3515 trace_net_dev_queue(skb
);
3517 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
3521 /* The device has no queue. Common case for software devices:
3522 * loopback, all the sorts of tunnels...
3524 * Really, it is unlikely that netif_tx_lock protection is necessary
3525 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3527 * However, it is possible, that they rely on protection
3530 * Check this and shot the lock. It is not prone from deadlocks.
3531 *Either shot noqueue qdisc, it is even simpler 8)
3533 if (dev
->flags
& IFF_UP
) {
3534 int cpu
= smp_processor_id(); /* ok because BHs are off */
3536 if (txq
->xmit_lock_owner
!= cpu
) {
3537 if (unlikely(__this_cpu_read(xmit_recursion
) >
3538 XMIT_RECURSION_LIMIT
))
3539 goto recursion_alert
;
3541 skb
= validate_xmit_skb(skb
, dev
);
3545 HARD_TX_LOCK(dev
, txq
, cpu
);
3547 if (!netif_xmit_stopped(txq
)) {
3548 __this_cpu_inc(xmit_recursion
);
3549 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &rc
);
3550 __this_cpu_dec(xmit_recursion
);
3551 if (dev_xmit_complete(rc
)) {
3552 HARD_TX_UNLOCK(dev
, txq
);
3556 HARD_TX_UNLOCK(dev
, txq
);
3557 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3560 /* Recursion is detected! It is possible,
3564 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3570 rcu_read_unlock_bh();
3572 atomic_long_inc(&dev
->tx_dropped
);
3573 kfree_skb_list(skb
);
3576 rcu_read_unlock_bh();
3580 int dev_queue_xmit(struct sk_buff
*skb
)
3582 return __dev_queue_xmit(skb
, NULL
);
3584 EXPORT_SYMBOL(dev_queue_xmit
);
3586 int dev_queue_xmit_accel(struct sk_buff
*skb
, void *accel_priv
)
3588 return __dev_queue_xmit(skb
, accel_priv
);
3590 EXPORT_SYMBOL(dev_queue_xmit_accel
);
3593 /*************************************************************************
3595 *************************************************************************/
3597 int netdev_max_backlog __read_mostly
= 1000;
3598 EXPORT_SYMBOL(netdev_max_backlog
);
3600 int netdev_tstamp_prequeue __read_mostly
= 1;
3601 int netdev_budget __read_mostly
= 300;
3602 unsigned int __read_mostly netdev_budget_usecs
= 2000;
3603 int weight_p __read_mostly
= 64; /* old backlog weight */
3604 int dev_weight_rx_bias __read_mostly
= 1; /* bias for backlog weight */
3605 int dev_weight_tx_bias __read_mostly
= 1; /* bias for output_queue quota */
3606 int dev_rx_weight __read_mostly
= 64;
3607 int dev_tx_weight __read_mostly
= 64;
3609 /* Called with irq disabled */
3610 static inline void ____napi_schedule(struct softnet_data
*sd
,
3611 struct napi_struct
*napi
)
3613 list_add_tail(&napi
->poll_list
, &sd
->poll_list
);
3614 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3619 /* One global table that all flow-based protocols share. */
3620 struct rps_sock_flow_table __rcu
*rps_sock_flow_table __read_mostly
;
3621 EXPORT_SYMBOL(rps_sock_flow_table
);
3622 u32 rps_cpu_mask __read_mostly
;
3623 EXPORT_SYMBOL(rps_cpu_mask
);
3625 struct static_key rps_needed __read_mostly
;
3626 EXPORT_SYMBOL(rps_needed
);
3627 struct static_key rfs_needed __read_mostly
;
3628 EXPORT_SYMBOL(rfs_needed
);
3630 static struct rps_dev_flow
*
3631 set_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3632 struct rps_dev_flow
*rflow
, u16 next_cpu
)
3634 if (next_cpu
< nr_cpu_ids
) {
3635 #ifdef CONFIG_RFS_ACCEL
3636 struct netdev_rx_queue
*rxqueue
;
3637 struct rps_dev_flow_table
*flow_table
;
3638 struct rps_dev_flow
*old_rflow
;
3643 /* Should we steer this flow to a different hardware queue? */
3644 if (!skb_rx_queue_recorded(skb
) || !dev
->rx_cpu_rmap
||
3645 !(dev
->features
& NETIF_F_NTUPLE
))
3647 rxq_index
= cpu_rmap_lookup_index(dev
->rx_cpu_rmap
, next_cpu
);
3648 if (rxq_index
== skb_get_rx_queue(skb
))
3651 rxqueue
= dev
->_rx
+ rxq_index
;
3652 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3655 flow_id
= skb_get_hash(skb
) & flow_table
->mask
;
3656 rc
= dev
->netdev_ops
->ndo_rx_flow_steer(dev
, skb
,
3657 rxq_index
, flow_id
);
3661 rflow
= &flow_table
->flows
[flow_id
];
3663 if (old_rflow
->filter
== rflow
->filter
)
3664 old_rflow
->filter
= RPS_NO_FILTER
;
3668 per_cpu(softnet_data
, next_cpu
).input_queue_head
;
3671 rflow
->cpu
= next_cpu
;
3676 * get_rps_cpu is called from netif_receive_skb and returns the target
3677 * CPU from the RPS map of the receiving queue for a given skb.
3678 * rcu_read_lock must be held on entry.
3680 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3681 struct rps_dev_flow
**rflowp
)
3683 const struct rps_sock_flow_table
*sock_flow_table
;
3684 struct netdev_rx_queue
*rxqueue
= dev
->_rx
;
3685 struct rps_dev_flow_table
*flow_table
;
3686 struct rps_map
*map
;
3691 if (skb_rx_queue_recorded(skb
)) {
3692 u16 index
= skb_get_rx_queue(skb
);
3694 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
3695 WARN_ONCE(dev
->real_num_rx_queues
> 1,
3696 "%s received packet on queue %u, but number "
3697 "of RX queues is %u\n",
3698 dev
->name
, index
, dev
->real_num_rx_queues
);
3704 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3706 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3707 map
= rcu_dereference(rxqueue
->rps_map
);
3708 if (!flow_table
&& !map
)
3711 skb_reset_network_header(skb
);
3712 hash
= skb_get_hash(skb
);
3716 sock_flow_table
= rcu_dereference(rps_sock_flow_table
);
3717 if (flow_table
&& sock_flow_table
) {
3718 struct rps_dev_flow
*rflow
;
3722 /* First check into global flow table if there is a match */
3723 ident
= sock_flow_table
->ents
[hash
& sock_flow_table
->mask
];
3724 if ((ident
^ hash
) & ~rps_cpu_mask
)
3727 next_cpu
= ident
& rps_cpu_mask
;
3729 /* OK, now we know there is a match,
3730 * we can look at the local (per receive queue) flow table
3732 rflow
= &flow_table
->flows
[hash
& flow_table
->mask
];
3736 * If the desired CPU (where last recvmsg was done) is
3737 * different from current CPU (one in the rx-queue flow
3738 * table entry), switch if one of the following holds:
3739 * - Current CPU is unset (>= nr_cpu_ids).
3740 * - Current CPU is offline.
3741 * - The current CPU's queue tail has advanced beyond the
3742 * last packet that was enqueued using this table entry.
3743 * This guarantees that all previous packets for the flow
3744 * have been dequeued, thus preserving in order delivery.
3746 if (unlikely(tcpu
!= next_cpu
) &&
3747 (tcpu
>= nr_cpu_ids
|| !cpu_online(tcpu
) ||
3748 ((int)(per_cpu(softnet_data
, tcpu
).input_queue_head
-
3749 rflow
->last_qtail
)) >= 0)) {
3751 rflow
= set_rps_cpu(dev
, skb
, rflow
, next_cpu
);
3754 if (tcpu
< nr_cpu_ids
&& cpu_online(tcpu
)) {
3764 tcpu
= map
->cpus
[reciprocal_scale(hash
, map
->len
)];
3765 if (cpu_online(tcpu
)) {
3775 #ifdef CONFIG_RFS_ACCEL
3778 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3779 * @dev: Device on which the filter was set
3780 * @rxq_index: RX queue index
3781 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3782 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3784 * Drivers that implement ndo_rx_flow_steer() should periodically call
3785 * this function for each installed filter and remove the filters for
3786 * which it returns %true.
3788 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
3789 u32 flow_id
, u16 filter_id
)
3791 struct netdev_rx_queue
*rxqueue
= dev
->_rx
+ rxq_index
;
3792 struct rps_dev_flow_table
*flow_table
;
3793 struct rps_dev_flow
*rflow
;
3798 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3799 if (flow_table
&& flow_id
<= flow_table
->mask
) {
3800 rflow
= &flow_table
->flows
[flow_id
];
3801 cpu
= READ_ONCE(rflow
->cpu
);
3802 if (rflow
->filter
== filter_id
&& cpu
< nr_cpu_ids
&&
3803 ((int)(per_cpu(softnet_data
, cpu
).input_queue_head
-
3804 rflow
->last_qtail
) <
3805 (int)(10 * flow_table
->mask
)))
3811 EXPORT_SYMBOL(rps_may_expire_flow
);
3813 #endif /* CONFIG_RFS_ACCEL */
3815 /* Called from hardirq (IPI) context */
3816 static void rps_trigger_softirq(void *data
)
3818 struct softnet_data
*sd
= data
;
3820 ____napi_schedule(sd
, &sd
->backlog
);
3824 #endif /* CONFIG_RPS */
3827 * Check if this softnet_data structure is another cpu one
3828 * If yes, queue it to our IPI list and return 1
3831 static int rps_ipi_queued(struct softnet_data
*sd
)
3834 struct softnet_data
*mysd
= this_cpu_ptr(&softnet_data
);
3837 sd
->rps_ipi_next
= mysd
->rps_ipi_list
;
3838 mysd
->rps_ipi_list
= sd
;
3840 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3843 #endif /* CONFIG_RPS */
3847 #ifdef CONFIG_NET_FLOW_LIMIT
3848 int netdev_flow_limit_table_len __read_mostly
= (1 << 12);
3851 static bool skb_flow_limit(struct sk_buff
*skb
, unsigned int qlen
)
3853 #ifdef CONFIG_NET_FLOW_LIMIT
3854 struct sd_flow_limit
*fl
;
3855 struct softnet_data
*sd
;
3856 unsigned int old_flow
, new_flow
;
3858 if (qlen
< (netdev_max_backlog
>> 1))
3861 sd
= this_cpu_ptr(&softnet_data
);
3864 fl
= rcu_dereference(sd
->flow_limit
);
3866 new_flow
= skb_get_hash(skb
) & (fl
->num_buckets
- 1);
3867 old_flow
= fl
->history
[fl
->history_head
];
3868 fl
->history
[fl
->history_head
] = new_flow
;
3871 fl
->history_head
&= FLOW_LIMIT_HISTORY
- 1;
3873 if (likely(fl
->buckets
[old_flow
]))
3874 fl
->buckets
[old_flow
]--;
3876 if (++fl
->buckets
[new_flow
] > (FLOW_LIMIT_HISTORY
>> 1)) {
3888 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3889 * queue (may be a remote CPU queue).
3891 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
,
3892 unsigned int *qtail
)
3894 struct softnet_data
*sd
;
3895 unsigned long flags
;
3898 sd
= &per_cpu(softnet_data
, cpu
);
3900 local_irq_save(flags
);
3903 if (!netif_running(skb
->dev
))
3905 qlen
= skb_queue_len(&sd
->input_pkt_queue
);
3906 if (qlen
<= netdev_max_backlog
&& !skb_flow_limit(skb
, qlen
)) {
3909 __skb_queue_tail(&sd
->input_pkt_queue
, skb
);
3910 input_queue_tail_incr_save(sd
, qtail
);
3912 local_irq_restore(flags
);
3913 return NET_RX_SUCCESS
;
3916 /* Schedule NAPI for backlog device
3917 * We can use non atomic operation since we own the queue lock
3919 if (!__test_and_set_bit(NAPI_STATE_SCHED
, &sd
->backlog
.state
)) {
3920 if (!rps_ipi_queued(sd
))
3921 ____napi_schedule(sd
, &sd
->backlog
);
3930 local_irq_restore(flags
);
3932 atomic_long_inc(&skb
->dev
->rx_dropped
);
3937 static u32
netif_receive_generic_xdp(struct sk_buff
*skb
,
3938 struct bpf_prog
*xdp_prog
)
3940 u32 metalen
, act
= XDP_DROP
;
3941 struct xdp_buff xdp
;
3946 /* Reinjected packets coming from act_mirred or similar should
3947 * not get XDP generic processing.
3949 if (skb_cloned(skb
))
3952 /* XDP packets must be linear and must have sufficient headroom
3953 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
3954 * native XDP provides, thus we need to do it here as well.
3956 if (skb_is_nonlinear(skb
) ||
3957 skb_headroom(skb
) < XDP_PACKET_HEADROOM
) {
3958 int hroom
= XDP_PACKET_HEADROOM
- skb_headroom(skb
);
3959 int troom
= skb
->tail
+ skb
->data_len
- skb
->end
;
3961 /* In case we have to go down the path and also linearize,
3962 * then lets do the pskb_expand_head() work just once here.
3964 if (pskb_expand_head(skb
,
3965 hroom
> 0 ? ALIGN(hroom
, NET_SKB_PAD
) : 0,
3966 troom
> 0 ? troom
+ 128 : 0, GFP_ATOMIC
))
3968 if (skb_linearize(skb
))
3972 /* The XDP program wants to see the packet starting at the MAC
3975 mac_len
= skb
->data
- skb_mac_header(skb
);
3976 hlen
= skb_headlen(skb
) + mac_len
;
3977 xdp
.data
= skb
->data
- mac_len
;
3978 xdp
.data_meta
= xdp
.data
;
3979 xdp
.data_end
= xdp
.data
+ hlen
;
3980 xdp
.data_hard_start
= skb
->data
- skb_headroom(skb
);
3981 orig_data
= xdp
.data
;
3983 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
3985 off
= xdp
.data
- orig_data
;
3987 __skb_pull(skb
, off
);
3989 __skb_push(skb
, -off
);
3990 skb
->mac_header
+= off
;
3995 __skb_push(skb
, mac_len
);
3998 metalen
= xdp
.data
- xdp
.data_meta
;
4000 skb_metadata_set(skb
, metalen
);
4003 bpf_warn_invalid_xdp_action(act
);
4006 trace_xdp_exception(skb
->dev
, xdp_prog
, act
);
4017 /* When doing generic XDP we have to bypass the qdisc layer and the
4018 * network taps in order to match in-driver-XDP behavior.
4020 void generic_xdp_tx(struct sk_buff
*skb
, struct bpf_prog
*xdp_prog
)
4022 struct net_device
*dev
= skb
->dev
;
4023 struct netdev_queue
*txq
;
4024 bool free_skb
= true;
4027 txq
= netdev_pick_tx(dev
, skb
, NULL
);
4028 cpu
= smp_processor_id();
4029 HARD_TX_LOCK(dev
, txq
, cpu
);
4030 if (!netif_xmit_stopped(txq
)) {
4031 rc
= netdev_start_xmit(skb
, dev
, txq
, 0);
4032 if (dev_xmit_complete(rc
))
4035 HARD_TX_UNLOCK(dev
, txq
);
4037 trace_xdp_exception(dev
, xdp_prog
, XDP_TX
);
4041 EXPORT_SYMBOL_GPL(generic_xdp_tx
);
4043 static struct static_key generic_xdp_needed __read_mostly
;
4045 int do_xdp_generic(struct bpf_prog
*xdp_prog
, struct sk_buff
*skb
)
4048 u32 act
= netif_receive_generic_xdp(skb
, xdp_prog
);
4051 if (act
!= XDP_PASS
) {
4054 err
= xdp_do_generic_redirect(skb
->dev
, skb
,
4058 /* fallthru to submit skb */
4060 generic_xdp_tx(skb
, xdp_prog
);
4071 EXPORT_SYMBOL_GPL(do_xdp_generic
);
4073 static int netif_rx_internal(struct sk_buff
*skb
)
4077 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
4079 trace_netif_rx(skb
);
4081 if (static_key_false(&generic_xdp_needed
)) {
4086 ret
= do_xdp_generic(rcu_dereference(skb
->dev
->xdp_prog
), skb
);
4090 /* Consider XDP consuming the packet a success from
4091 * the netdev point of view we do not want to count
4094 if (ret
!= XDP_PASS
)
4095 return NET_RX_SUCCESS
;
4099 if (static_key_false(&rps_needed
)) {
4100 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
4106 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
4108 cpu
= smp_processor_id();
4110 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
4119 ret
= enqueue_to_backlog(skb
, get_cpu(), &qtail
);
4126 * netif_rx - post buffer to the network code
4127 * @skb: buffer to post
4129 * This function receives a packet from a device driver and queues it for
4130 * the upper (protocol) levels to process. It always succeeds. The buffer
4131 * may be dropped during processing for congestion control or by the
4135 * NET_RX_SUCCESS (no congestion)
4136 * NET_RX_DROP (packet was dropped)
4140 int netif_rx(struct sk_buff
*skb
)
4142 trace_netif_rx_entry(skb
);
4144 return netif_rx_internal(skb
);
4146 EXPORT_SYMBOL(netif_rx
);
4148 int netif_rx_ni(struct sk_buff
*skb
)
4152 trace_netif_rx_ni_entry(skb
);
4155 err
= netif_rx_internal(skb
);
4156 if (local_softirq_pending())
4162 EXPORT_SYMBOL(netif_rx_ni
);
4164 static __latent_entropy
void net_tx_action(struct softirq_action
*h
)
4166 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
4168 if (sd
->completion_queue
) {
4169 struct sk_buff
*clist
;
4171 local_irq_disable();
4172 clist
= sd
->completion_queue
;
4173 sd
->completion_queue
= NULL
;
4177 struct sk_buff
*skb
= clist
;
4179 clist
= clist
->next
;
4181 WARN_ON(refcount_read(&skb
->users
));
4182 if (likely(get_kfree_skb_cb(skb
)->reason
== SKB_REASON_CONSUMED
))
4183 trace_consume_skb(skb
);
4185 trace_kfree_skb(skb
, net_tx_action
);
4187 if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
)
4190 __kfree_skb_defer(skb
);
4193 __kfree_skb_flush();
4196 if (sd
->output_queue
) {
4199 local_irq_disable();
4200 head
= sd
->output_queue
;
4201 sd
->output_queue
= NULL
;
4202 sd
->output_queue_tailp
= &sd
->output_queue
;
4206 struct Qdisc
*q
= head
;
4207 spinlock_t
*root_lock
;
4209 head
= head
->next_sched
;
4211 root_lock
= qdisc_lock(q
);
4212 spin_lock(root_lock
);
4213 /* We need to make sure head->next_sched is read
4214 * before clearing __QDISC_STATE_SCHED
4216 smp_mb__before_atomic();
4217 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
4219 spin_unlock(root_lock
);
4224 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4225 /* This hook is defined here for ATM LANE */
4226 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
4227 unsigned char *addr
) __read_mostly
;
4228 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
4231 static inline struct sk_buff
*
4232 sch_handle_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
, int *ret
,
4233 struct net_device
*orig_dev
)
4235 #ifdef CONFIG_NET_CLS_ACT
4236 struct mini_Qdisc
*miniq
= rcu_dereference_bh(skb
->dev
->miniq_ingress
);
4237 struct tcf_result cl_res
;
4239 /* If there's at least one ingress present somewhere (so
4240 * we get here via enabled static key), remaining devices
4241 * that are not configured with an ingress qdisc will bail
4248 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4252 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
4253 skb
->tc_at_ingress
= 1;
4254 mini_qdisc_bstats_cpu_update(miniq
, skb
);
4256 switch (tcf_classify(skb
, miniq
->filter_list
, &cl_res
, false)) {
4258 case TC_ACT_RECLASSIFY
:
4259 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
4262 mini_qdisc_qstats_cpu_drop(miniq
);
4270 case TC_ACT_REDIRECT
:
4271 /* skb_mac_header check was done by cls/act_bpf, so
4272 * we can safely push the L2 header back before
4273 * redirecting to another netdev
4275 __skb_push(skb
, skb
->mac_len
);
4276 skb_do_redirect(skb
);
4281 #endif /* CONFIG_NET_CLS_ACT */
4286 * netdev_is_rx_handler_busy - check if receive handler is registered
4287 * @dev: device to check
4289 * Check if a receive handler is already registered for a given device.
4290 * Return true if there one.
4292 * The caller must hold the rtnl_mutex.
4294 bool netdev_is_rx_handler_busy(struct net_device
*dev
)
4297 return dev
&& rtnl_dereference(dev
->rx_handler
);
4299 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy
);
4302 * netdev_rx_handler_register - register receive handler
4303 * @dev: device to register a handler for
4304 * @rx_handler: receive handler to register
4305 * @rx_handler_data: data pointer that is used by rx handler
4307 * Register a receive handler for a device. This handler will then be
4308 * called from __netif_receive_skb. A negative errno code is returned
4311 * The caller must hold the rtnl_mutex.
4313 * For a general description of rx_handler, see enum rx_handler_result.
4315 int netdev_rx_handler_register(struct net_device
*dev
,
4316 rx_handler_func_t
*rx_handler
,
4317 void *rx_handler_data
)
4319 if (netdev_is_rx_handler_busy(dev
))
4322 /* Note: rx_handler_data must be set before rx_handler */
4323 rcu_assign_pointer(dev
->rx_handler_data
, rx_handler_data
);
4324 rcu_assign_pointer(dev
->rx_handler
, rx_handler
);
4328 EXPORT_SYMBOL_GPL(netdev_rx_handler_register
);
4331 * netdev_rx_handler_unregister - unregister receive handler
4332 * @dev: device to unregister a handler from
4334 * Unregister a receive handler from a device.
4336 * The caller must hold the rtnl_mutex.
4338 void netdev_rx_handler_unregister(struct net_device
*dev
)
4342 RCU_INIT_POINTER(dev
->rx_handler
, NULL
);
4343 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4344 * section has a guarantee to see a non NULL rx_handler_data
4348 RCU_INIT_POINTER(dev
->rx_handler_data
, NULL
);
4350 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister
);
4353 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4354 * the special handling of PFMEMALLOC skbs.
4356 static bool skb_pfmemalloc_protocol(struct sk_buff
*skb
)
4358 switch (skb
->protocol
) {
4359 case htons(ETH_P_ARP
):
4360 case htons(ETH_P_IP
):
4361 case htons(ETH_P_IPV6
):
4362 case htons(ETH_P_8021Q
):
4363 case htons(ETH_P_8021AD
):
4370 static inline int nf_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
,
4371 int *ret
, struct net_device
*orig_dev
)
4373 #ifdef CONFIG_NETFILTER_INGRESS
4374 if (nf_hook_ingress_active(skb
)) {
4378 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4383 ingress_retval
= nf_hook_ingress(skb
);
4385 return ingress_retval
;
4387 #endif /* CONFIG_NETFILTER_INGRESS */
4391 static int __netif_receive_skb_core(struct sk_buff
*skb
, bool pfmemalloc
)
4393 struct packet_type
*ptype
, *pt_prev
;
4394 rx_handler_func_t
*rx_handler
;
4395 struct net_device
*orig_dev
;
4396 bool deliver_exact
= false;
4397 int ret
= NET_RX_DROP
;
4400 net_timestamp_check(!netdev_tstamp_prequeue
, skb
);
4402 trace_netif_receive_skb(skb
);
4404 orig_dev
= skb
->dev
;
4406 skb_reset_network_header(skb
);
4407 if (!skb_transport_header_was_set(skb
))
4408 skb_reset_transport_header(skb
);
4409 skb_reset_mac_len(skb
);
4414 skb
->skb_iif
= skb
->dev
->ifindex
;
4416 __this_cpu_inc(softnet_data
.processed
);
4418 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
) ||
4419 skb
->protocol
== cpu_to_be16(ETH_P_8021AD
)) {
4420 skb
= skb_vlan_untag(skb
);
4425 if (skb_skip_tc_classify(skb
))
4431 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
4433 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4437 list_for_each_entry_rcu(ptype
, &skb
->dev
->ptype_all
, list
) {
4439 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4444 #ifdef CONFIG_NET_INGRESS
4445 if (static_key_false(&ingress_needed
)) {
4446 skb
= sch_handle_ingress(skb
, &pt_prev
, &ret
, orig_dev
);
4450 if (nf_ingress(skb
, &pt_prev
, &ret
, orig_dev
) < 0)
4456 if (pfmemalloc
&& !skb_pfmemalloc_protocol(skb
))
4459 if (skb_vlan_tag_present(skb
)) {
4461 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4464 if (vlan_do_receive(&skb
))
4466 else if (unlikely(!skb
))
4470 rx_handler
= rcu_dereference(skb
->dev
->rx_handler
);
4473 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4476 switch (rx_handler(&skb
)) {
4477 case RX_HANDLER_CONSUMED
:
4478 ret
= NET_RX_SUCCESS
;
4480 case RX_HANDLER_ANOTHER
:
4482 case RX_HANDLER_EXACT
:
4483 deliver_exact
= true;
4484 case RX_HANDLER_PASS
:
4491 if (unlikely(skb_vlan_tag_present(skb
))) {
4492 if (skb_vlan_tag_get_id(skb
))
4493 skb
->pkt_type
= PACKET_OTHERHOST
;
4494 /* Note: we might in the future use prio bits
4495 * and set skb->priority like in vlan_do_receive()
4496 * For the time being, just ignore Priority Code Point
4501 type
= skb
->protocol
;
4503 /* deliver only exact match when indicated */
4504 if (likely(!deliver_exact
)) {
4505 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4506 &ptype_base
[ntohs(type
) &
4510 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4511 &orig_dev
->ptype_specific
);
4513 if (unlikely(skb
->dev
!= orig_dev
)) {
4514 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4515 &skb
->dev
->ptype_specific
);
4519 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
4522 ret
= pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
4526 atomic_long_inc(&skb
->dev
->rx_dropped
);
4528 atomic_long_inc(&skb
->dev
->rx_nohandler
);
4530 /* Jamal, now you will not able to escape explaining
4531 * me how you were going to use this. :-)
4541 * netif_receive_skb_core - special purpose version of netif_receive_skb
4542 * @skb: buffer to process
4544 * More direct receive version of netif_receive_skb(). It should
4545 * only be used by callers that have a need to skip RPS and Generic XDP.
4546 * Caller must also take care of handling if (page_is_)pfmemalloc.
4548 * This function may only be called from softirq context and interrupts
4549 * should be enabled.
4551 * Return values (usually ignored):
4552 * NET_RX_SUCCESS: no congestion
4553 * NET_RX_DROP: packet was dropped
4555 int netif_receive_skb_core(struct sk_buff
*skb
)
4560 ret
= __netif_receive_skb_core(skb
, false);
4565 EXPORT_SYMBOL(netif_receive_skb_core
);
4567 static int __netif_receive_skb(struct sk_buff
*skb
)
4571 if (sk_memalloc_socks() && skb_pfmemalloc(skb
)) {
4572 unsigned int noreclaim_flag
;
4575 * PFMEMALLOC skbs are special, they should
4576 * - be delivered to SOCK_MEMALLOC sockets only
4577 * - stay away from userspace
4578 * - have bounded memory usage
4580 * Use PF_MEMALLOC as this saves us from propagating the allocation
4581 * context down to all allocation sites.
4583 noreclaim_flag
= memalloc_noreclaim_save();
4584 ret
= __netif_receive_skb_core(skb
, true);
4585 memalloc_noreclaim_restore(noreclaim_flag
);
4587 ret
= __netif_receive_skb_core(skb
, false);
4592 static int generic_xdp_install(struct net_device
*dev
, struct netdev_bpf
*xdp
)
4594 struct bpf_prog
*old
= rtnl_dereference(dev
->xdp_prog
);
4595 struct bpf_prog
*new = xdp
->prog
;
4598 switch (xdp
->command
) {
4599 case XDP_SETUP_PROG
:
4600 rcu_assign_pointer(dev
->xdp_prog
, new);
4605 static_key_slow_dec(&generic_xdp_needed
);
4606 } else if (new && !old
) {
4607 static_key_slow_inc(&generic_xdp_needed
);
4608 dev_disable_lro(dev
);
4612 case XDP_QUERY_PROG
:
4613 xdp
->prog_attached
= !!old
;
4614 xdp
->prog_id
= old
? old
->aux
->id
: 0;
4625 static int netif_receive_skb_internal(struct sk_buff
*skb
)
4629 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
4631 if (skb_defer_rx_timestamp(skb
))
4632 return NET_RX_SUCCESS
;
4634 if (static_key_false(&generic_xdp_needed
)) {
4639 ret
= do_xdp_generic(rcu_dereference(skb
->dev
->xdp_prog
), skb
);
4643 if (ret
!= XDP_PASS
)
4649 if (static_key_false(&rps_needed
)) {
4650 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
4651 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
4654 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
4660 ret
= __netif_receive_skb(skb
);
4666 * netif_receive_skb - process receive buffer from network
4667 * @skb: buffer to process
4669 * netif_receive_skb() is the main receive data processing function.
4670 * It always succeeds. The buffer may be dropped during processing
4671 * for congestion control or by the protocol layers.
4673 * This function may only be called from softirq context and interrupts
4674 * should be enabled.
4676 * Return values (usually ignored):
4677 * NET_RX_SUCCESS: no congestion
4678 * NET_RX_DROP: packet was dropped
4680 int netif_receive_skb(struct sk_buff
*skb
)
4682 trace_netif_receive_skb_entry(skb
);
4684 return netif_receive_skb_internal(skb
);
4686 EXPORT_SYMBOL(netif_receive_skb
);
4688 DEFINE_PER_CPU(struct work_struct
, flush_works
);
4690 /* Network device is going away, flush any packets still pending */
4691 static void flush_backlog(struct work_struct
*work
)
4693 struct sk_buff
*skb
, *tmp
;
4694 struct softnet_data
*sd
;
4697 sd
= this_cpu_ptr(&softnet_data
);
4699 local_irq_disable();
4701 skb_queue_walk_safe(&sd
->input_pkt_queue
, skb
, tmp
) {
4702 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
4703 __skb_unlink(skb
, &sd
->input_pkt_queue
);
4705 input_queue_head_incr(sd
);
4711 skb_queue_walk_safe(&sd
->process_queue
, skb
, tmp
) {
4712 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
4713 __skb_unlink(skb
, &sd
->process_queue
);
4715 input_queue_head_incr(sd
);
4721 static void flush_all_backlogs(void)
4727 for_each_online_cpu(cpu
)
4728 queue_work_on(cpu
, system_highpri_wq
,
4729 per_cpu_ptr(&flush_works
, cpu
));
4731 for_each_online_cpu(cpu
)
4732 flush_work(per_cpu_ptr(&flush_works
, cpu
));
4737 static int napi_gro_complete(struct sk_buff
*skb
)
4739 struct packet_offload
*ptype
;
4740 __be16 type
= skb
->protocol
;
4741 struct list_head
*head
= &offload_base
;
4744 BUILD_BUG_ON(sizeof(struct napi_gro_cb
) > sizeof(skb
->cb
));
4746 if (NAPI_GRO_CB(skb
)->count
== 1) {
4747 skb_shinfo(skb
)->gso_size
= 0;
4752 list_for_each_entry_rcu(ptype
, head
, list
) {
4753 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
4756 err
= ptype
->callbacks
.gro_complete(skb
, 0);
4762 WARN_ON(&ptype
->list
== head
);
4764 return NET_RX_SUCCESS
;
4768 return netif_receive_skb_internal(skb
);
4771 /* napi->gro_list contains packets ordered by age.
4772 * youngest packets at the head of it.
4773 * Complete skbs in reverse order to reduce latencies.
4775 void napi_gro_flush(struct napi_struct
*napi
, bool flush_old
)
4777 struct sk_buff
*skb
, *prev
= NULL
;
4779 /* scan list and build reverse chain */
4780 for (skb
= napi
->gro_list
; skb
!= NULL
; skb
= skb
->next
) {
4785 for (skb
= prev
; skb
; skb
= prev
) {
4788 if (flush_old
&& NAPI_GRO_CB(skb
)->age
== jiffies
)
4792 napi_gro_complete(skb
);
4796 napi
->gro_list
= NULL
;
4798 EXPORT_SYMBOL(napi_gro_flush
);
4800 static void gro_list_prepare(struct napi_struct
*napi
, struct sk_buff
*skb
)
4803 unsigned int maclen
= skb
->dev
->hard_header_len
;
4804 u32 hash
= skb_get_hash_raw(skb
);
4806 for (p
= napi
->gro_list
; p
; p
= p
->next
) {
4807 unsigned long diffs
;
4809 NAPI_GRO_CB(p
)->flush
= 0;
4811 if (hash
!= skb_get_hash_raw(p
)) {
4812 NAPI_GRO_CB(p
)->same_flow
= 0;
4816 diffs
= (unsigned long)p
->dev
^ (unsigned long)skb
->dev
;
4817 diffs
|= p
->vlan_tci
^ skb
->vlan_tci
;
4818 diffs
|= skb_metadata_dst_cmp(p
, skb
);
4819 diffs
|= skb_metadata_differs(p
, skb
);
4820 if (maclen
== ETH_HLEN
)
4821 diffs
|= compare_ether_header(skb_mac_header(p
),
4822 skb_mac_header(skb
));
4824 diffs
= memcmp(skb_mac_header(p
),
4825 skb_mac_header(skb
),
4827 NAPI_GRO_CB(p
)->same_flow
= !diffs
;
4831 static void skb_gro_reset_offset(struct sk_buff
*skb
)
4833 const struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
4834 const skb_frag_t
*frag0
= &pinfo
->frags
[0];
4836 NAPI_GRO_CB(skb
)->data_offset
= 0;
4837 NAPI_GRO_CB(skb
)->frag0
= NULL
;
4838 NAPI_GRO_CB(skb
)->frag0_len
= 0;
4840 if (skb_mac_header(skb
) == skb_tail_pointer(skb
) &&
4842 !PageHighMem(skb_frag_page(frag0
))) {
4843 NAPI_GRO_CB(skb
)->frag0
= skb_frag_address(frag0
);
4844 NAPI_GRO_CB(skb
)->frag0_len
= min_t(unsigned int,
4845 skb_frag_size(frag0
),
4846 skb
->end
- skb
->tail
);
4850 static void gro_pull_from_frag0(struct sk_buff
*skb
, int grow
)
4852 struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
4854 BUG_ON(skb
->end
- skb
->tail
< grow
);
4856 memcpy(skb_tail_pointer(skb
), NAPI_GRO_CB(skb
)->frag0
, grow
);
4858 skb
->data_len
-= grow
;
4861 pinfo
->frags
[0].page_offset
+= grow
;
4862 skb_frag_size_sub(&pinfo
->frags
[0], grow
);
4864 if (unlikely(!skb_frag_size(&pinfo
->frags
[0]))) {
4865 skb_frag_unref(skb
, 0);
4866 memmove(pinfo
->frags
, pinfo
->frags
+ 1,
4867 --pinfo
->nr_frags
* sizeof(pinfo
->frags
[0]));
4871 static enum gro_result
dev_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
4873 struct sk_buff
**pp
= NULL
;
4874 struct packet_offload
*ptype
;
4875 __be16 type
= skb
->protocol
;
4876 struct list_head
*head
= &offload_base
;
4878 enum gro_result ret
;
4881 if (netif_elide_gro(skb
->dev
))
4884 gro_list_prepare(napi
, skb
);
4887 list_for_each_entry_rcu(ptype
, head
, list
) {
4888 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
4891 skb_set_network_header(skb
, skb_gro_offset(skb
));
4892 skb_reset_mac_len(skb
);
4893 NAPI_GRO_CB(skb
)->same_flow
= 0;
4894 NAPI_GRO_CB(skb
)->flush
= skb_is_gso(skb
) || skb_has_frag_list(skb
);
4895 NAPI_GRO_CB(skb
)->free
= 0;
4896 NAPI_GRO_CB(skb
)->encap_mark
= 0;
4897 NAPI_GRO_CB(skb
)->recursion_counter
= 0;
4898 NAPI_GRO_CB(skb
)->is_fou
= 0;
4899 NAPI_GRO_CB(skb
)->is_atomic
= 1;
4900 NAPI_GRO_CB(skb
)->gro_remcsum_start
= 0;
4902 /* Setup for GRO checksum validation */
4903 switch (skb
->ip_summed
) {
4904 case CHECKSUM_COMPLETE
:
4905 NAPI_GRO_CB(skb
)->csum
= skb
->csum
;
4906 NAPI_GRO_CB(skb
)->csum_valid
= 1;
4907 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
4909 case CHECKSUM_UNNECESSARY
:
4910 NAPI_GRO_CB(skb
)->csum_cnt
= skb
->csum_level
+ 1;
4911 NAPI_GRO_CB(skb
)->csum_valid
= 0;
4914 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
4915 NAPI_GRO_CB(skb
)->csum_valid
= 0;
4918 pp
= ptype
->callbacks
.gro_receive(&napi
->gro_list
, skb
);
4923 if (&ptype
->list
== head
)
4926 if (IS_ERR(pp
) && PTR_ERR(pp
) == -EINPROGRESS
) {
4931 same_flow
= NAPI_GRO_CB(skb
)->same_flow
;
4932 ret
= NAPI_GRO_CB(skb
)->free
? GRO_MERGED_FREE
: GRO_MERGED
;
4935 struct sk_buff
*nskb
= *pp
;
4939 napi_gro_complete(nskb
);
4946 if (NAPI_GRO_CB(skb
)->flush
)
4949 if (unlikely(napi
->gro_count
>= MAX_GRO_SKBS
)) {
4950 struct sk_buff
*nskb
= napi
->gro_list
;
4952 /* locate the end of the list to select the 'oldest' flow */
4953 while (nskb
->next
) {
4959 napi_gro_complete(nskb
);
4963 NAPI_GRO_CB(skb
)->count
= 1;
4964 NAPI_GRO_CB(skb
)->age
= jiffies
;
4965 NAPI_GRO_CB(skb
)->last
= skb
;
4966 skb_shinfo(skb
)->gso_size
= skb_gro_len(skb
);
4967 skb
->next
= napi
->gro_list
;
4968 napi
->gro_list
= skb
;
4972 grow
= skb_gro_offset(skb
) - skb_headlen(skb
);
4974 gro_pull_from_frag0(skb
, grow
);
4983 struct packet_offload
*gro_find_receive_by_type(__be16 type
)
4985 struct list_head
*offload_head
= &offload_base
;
4986 struct packet_offload
*ptype
;
4988 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
4989 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
4995 EXPORT_SYMBOL(gro_find_receive_by_type
);
4997 struct packet_offload
*gro_find_complete_by_type(__be16 type
)
4999 struct list_head
*offload_head
= &offload_base
;
5000 struct packet_offload
*ptype
;
5002 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
5003 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
5009 EXPORT_SYMBOL(gro_find_complete_by_type
);
5011 static void napi_skb_free_stolen_head(struct sk_buff
*skb
)
5015 kmem_cache_free(skbuff_head_cache
, skb
);
5018 static gro_result_t
napi_skb_finish(gro_result_t ret
, struct sk_buff
*skb
)
5022 if (netif_receive_skb_internal(skb
))
5030 case GRO_MERGED_FREE
:
5031 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
5032 napi_skb_free_stolen_head(skb
);
5046 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
5048 skb_mark_napi_id(skb
, napi
);
5049 trace_napi_gro_receive_entry(skb
);
5051 skb_gro_reset_offset(skb
);
5053 return napi_skb_finish(dev_gro_receive(napi
, skb
), skb
);
5055 EXPORT_SYMBOL(napi_gro_receive
);
5057 static void napi_reuse_skb(struct napi_struct
*napi
, struct sk_buff
*skb
)
5059 if (unlikely(skb
->pfmemalloc
)) {
5063 __skb_pull(skb
, skb_headlen(skb
));
5064 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
5065 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
- skb_headroom(skb
));
5067 skb
->dev
= napi
->dev
;
5070 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
5071 skb
->pkt_type
= PACKET_HOST
;
5073 skb
->encapsulation
= 0;
5074 skb_shinfo(skb
)->gso_type
= 0;
5075 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
5081 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
)
5083 struct sk_buff
*skb
= napi
->skb
;
5086 skb
= napi_alloc_skb(napi
, GRO_MAX_HEAD
);
5089 skb_mark_napi_id(skb
, napi
);
5094 EXPORT_SYMBOL(napi_get_frags
);
5096 static gro_result_t
napi_frags_finish(struct napi_struct
*napi
,
5097 struct sk_buff
*skb
,
5103 __skb_push(skb
, ETH_HLEN
);
5104 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
5105 if (ret
== GRO_NORMAL
&& netif_receive_skb_internal(skb
))
5110 napi_reuse_skb(napi
, skb
);
5113 case GRO_MERGED_FREE
:
5114 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
5115 napi_skb_free_stolen_head(skb
);
5117 napi_reuse_skb(napi
, skb
);
5128 /* Upper GRO stack assumes network header starts at gro_offset=0
5129 * Drivers could call both napi_gro_frags() and napi_gro_receive()
5130 * We copy ethernet header into skb->data to have a common layout.
5132 static struct sk_buff
*napi_frags_skb(struct napi_struct
*napi
)
5134 struct sk_buff
*skb
= napi
->skb
;
5135 const struct ethhdr
*eth
;
5136 unsigned int hlen
= sizeof(*eth
);
5140 skb_reset_mac_header(skb
);
5141 skb_gro_reset_offset(skb
);
5143 if (unlikely(skb_gro_header_hard(skb
, hlen
))) {
5144 eth
= skb_gro_header_slow(skb
, hlen
, 0);
5145 if (unlikely(!eth
)) {
5146 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5147 __func__
, napi
->dev
->name
);
5148 napi_reuse_skb(napi
, skb
);
5152 eth
= (const struct ethhdr
*)skb
->data
;
5153 gro_pull_from_frag0(skb
, hlen
);
5154 NAPI_GRO_CB(skb
)->frag0
+= hlen
;
5155 NAPI_GRO_CB(skb
)->frag0_len
-= hlen
;
5157 __skb_pull(skb
, hlen
);
5160 * This works because the only protocols we care about don't require
5162 * We'll fix it up properly in napi_frags_finish()
5164 skb
->protocol
= eth
->h_proto
;
5169 gro_result_t
napi_gro_frags(struct napi_struct
*napi
)
5171 struct sk_buff
*skb
= napi_frags_skb(napi
);
5176 trace_napi_gro_frags_entry(skb
);
5178 return napi_frags_finish(napi
, skb
, dev_gro_receive(napi
, skb
));
5180 EXPORT_SYMBOL(napi_gro_frags
);
5182 /* Compute the checksum from gro_offset and return the folded value
5183 * after adding in any pseudo checksum.
5185 __sum16
__skb_gro_checksum_complete(struct sk_buff
*skb
)
5190 wsum
= skb_checksum(skb
, skb_gro_offset(skb
), skb_gro_len(skb
), 0);
5192 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5193 sum
= csum_fold(csum_add(NAPI_GRO_CB(skb
)->csum
, wsum
));
5195 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
5196 !skb
->csum_complete_sw
)
5197 netdev_rx_csum_fault(skb
->dev
);
5200 NAPI_GRO_CB(skb
)->csum
= wsum
;
5201 NAPI_GRO_CB(skb
)->csum_valid
= 1;
5205 EXPORT_SYMBOL(__skb_gro_checksum_complete
);
5207 static void net_rps_send_ipi(struct softnet_data
*remsd
)
5211 struct softnet_data
*next
= remsd
->rps_ipi_next
;
5213 if (cpu_online(remsd
->cpu
))
5214 smp_call_function_single_async(remsd
->cpu
, &remsd
->csd
);
5221 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5222 * Note: called with local irq disabled, but exits with local irq enabled.
5224 static void net_rps_action_and_irq_enable(struct softnet_data
*sd
)
5227 struct softnet_data
*remsd
= sd
->rps_ipi_list
;
5230 sd
->rps_ipi_list
= NULL
;
5234 /* Send pending IPI's to kick RPS processing on remote cpus. */
5235 net_rps_send_ipi(remsd
);
5241 static bool sd_has_rps_ipi_waiting(struct softnet_data
*sd
)
5244 return sd
->rps_ipi_list
!= NULL
;
5250 static int process_backlog(struct napi_struct
*napi
, int quota
)
5252 struct softnet_data
*sd
= container_of(napi
, struct softnet_data
, backlog
);
5256 /* Check if we have pending ipi, its better to send them now,
5257 * not waiting net_rx_action() end.
5259 if (sd_has_rps_ipi_waiting(sd
)) {
5260 local_irq_disable();
5261 net_rps_action_and_irq_enable(sd
);
5264 napi
->weight
= dev_rx_weight
;
5266 struct sk_buff
*skb
;
5268 while ((skb
= __skb_dequeue(&sd
->process_queue
))) {
5270 __netif_receive_skb(skb
);
5272 input_queue_head_incr(sd
);
5273 if (++work
>= quota
)
5278 local_irq_disable();
5280 if (skb_queue_empty(&sd
->input_pkt_queue
)) {
5282 * Inline a custom version of __napi_complete().
5283 * only current cpu owns and manipulates this napi,
5284 * and NAPI_STATE_SCHED is the only possible flag set
5286 * We can use a plain write instead of clear_bit(),
5287 * and we dont need an smp_mb() memory barrier.
5292 skb_queue_splice_tail_init(&sd
->input_pkt_queue
,
5293 &sd
->process_queue
);
5303 * __napi_schedule - schedule for receive
5304 * @n: entry to schedule
5306 * The entry's receive function will be scheduled to run.
5307 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
5309 void __napi_schedule(struct napi_struct
*n
)
5311 unsigned long flags
;
5313 local_irq_save(flags
);
5314 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
5315 local_irq_restore(flags
);
5317 EXPORT_SYMBOL(__napi_schedule
);
5320 * napi_schedule_prep - check if napi can be scheduled
5323 * Test if NAPI routine is already running, and if not mark
5324 * it as running. This is used as a condition variable
5325 * insure only one NAPI poll instance runs. We also make
5326 * sure there is no pending NAPI disable.
5328 bool napi_schedule_prep(struct napi_struct
*n
)
5330 unsigned long val
, new;
5333 val
= READ_ONCE(n
->state
);
5334 if (unlikely(val
& NAPIF_STATE_DISABLE
))
5336 new = val
| NAPIF_STATE_SCHED
;
5338 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5339 * This was suggested by Alexander Duyck, as compiler
5340 * emits better code than :
5341 * if (val & NAPIF_STATE_SCHED)
5342 * new |= NAPIF_STATE_MISSED;
5344 new |= (val
& NAPIF_STATE_SCHED
) / NAPIF_STATE_SCHED
*
5346 } while (cmpxchg(&n
->state
, val
, new) != val
);
5348 return !(val
& NAPIF_STATE_SCHED
);
5350 EXPORT_SYMBOL(napi_schedule_prep
);
5353 * __napi_schedule_irqoff - schedule for receive
5354 * @n: entry to schedule
5356 * Variant of __napi_schedule() assuming hard irqs are masked
5358 void __napi_schedule_irqoff(struct napi_struct
*n
)
5360 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
5362 EXPORT_SYMBOL(__napi_schedule_irqoff
);
5364 bool napi_complete_done(struct napi_struct
*n
, int work_done
)
5366 unsigned long flags
, val
, new;
5369 * 1) Don't let napi dequeue from the cpu poll list
5370 * just in case its running on a different cpu.
5371 * 2) If we are busy polling, do nothing here, we have
5372 * the guarantee we will be called later.
5374 if (unlikely(n
->state
& (NAPIF_STATE_NPSVC
|
5375 NAPIF_STATE_IN_BUSY_POLL
)))
5379 unsigned long timeout
= 0;
5382 timeout
= n
->dev
->gro_flush_timeout
;
5384 /* When the NAPI instance uses a timeout and keeps postponing
5385 * it, we need to bound somehow the time packets are kept in
5388 napi_gro_flush(n
, !!timeout
);
5390 hrtimer_start(&n
->timer
, ns_to_ktime(timeout
),
5391 HRTIMER_MODE_REL_PINNED
);
5393 if (unlikely(!list_empty(&n
->poll_list
))) {
5394 /* If n->poll_list is not empty, we need to mask irqs */
5395 local_irq_save(flags
);
5396 list_del_init(&n
->poll_list
);
5397 local_irq_restore(flags
);
5401 val
= READ_ONCE(n
->state
);
5403 WARN_ON_ONCE(!(val
& NAPIF_STATE_SCHED
));
5405 new = val
& ~(NAPIF_STATE_MISSED
| NAPIF_STATE_SCHED
);
5407 /* If STATE_MISSED was set, leave STATE_SCHED set,
5408 * because we will call napi->poll() one more time.
5409 * This C code was suggested by Alexander Duyck to help gcc.
5411 new |= (val
& NAPIF_STATE_MISSED
) / NAPIF_STATE_MISSED
*
5413 } while (cmpxchg(&n
->state
, val
, new) != val
);
5415 if (unlikely(val
& NAPIF_STATE_MISSED
)) {
5422 EXPORT_SYMBOL(napi_complete_done
);
5424 /* must be called under rcu_read_lock(), as we dont take a reference */
5425 static struct napi_struct
*napi_by_id(unsigned int napi_id
)
5427 unsigned int hash
= napi_id
% HASH_SIZE(napi_hash
);
5428 struct napi_struct
*napi
;
5430 hlist_for_each_entry_rcu(napi
, &napi_hash
[hash
], napi_hash_node
)
5431 if (napi
->napi_id
== napi_id
)
5437 #if defined(CONFIG_NET_RX_BUSY_POLL)
5439 #define BUSY_POLL_BUDGET 8
5441 static void busy_poll_stop(struct napi_struct
*napi
, void *have_poll_lock
)
5445 /* Busy polling means there is a high chance device driver hard irq
5446 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5447 * set in napi_schedule_prep().
5448 * Since we are about to call napi->poll() once more, we can safely
5449 * clear NAPI_STATE_MISSED.
5451 * Note: x86 could use a single "lock and ..." instruction
5452 * to perform these two clear_bit()
5454 clear_bit(NAPI_STATE_MISSED
, &napi
->state
);
5455 clear_bit(NAPI_STATE_IN_BUSY_POLL
, &napi
->state
);
5459 /* All we really want here is to re-enable device interrupts.
5460 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5462 rc
= napi
->poll(napi
, BUSY_POLL_BUDGET
);
5463 trace_napi_poll(napi
, rc
, BUSY_POLL_BUDGET
);
5464 netpoll_poll_unlock(have_poll_lock
);
5465 if (rc
== BUSY_POLL_BUDGET
)
5466 __napi_schedule(napi
);
5470 void napi_busy_loop(unsigned int napi_id
,
5471 bool (*loop_end
)(void *, unsigned long),
5474 unsigned long start_time
= loop_end
? busy_loop_current_time() : 0;
5475 int (*napi_poll
)(struct napi_struct
*napi
, int budget
);
5476 void *have_poll_lock
= NULL
;
5477 struct napi_struct
*napi
;
5484 napi
= napi_by_id(napi_id
);
5494 unsigned long val
= READ_ONCE(napi
->state
);
5496 /* If multiple threads are competing for this napi,
5497 * we avoid dirtying napi->state as much as we can.
5499 if (val
& (NAPIF_STATE_DISABLE
| NAPIF_STATE_SCHED
|
5500 NAPIF_STATE_IN_BUSY_POLL
))
5502 if (cmpxchg(&napi
->state
, val
,
5503 val
| NAPIF_STATE_IN_BUSY_POLL
|
5504 NAPIF_STATE_SCHED
) != val
)
5506 have_poll_lock
= netpoll_poll_lock(napi
);
5507 napi_poll
= napi
->poll
;
5509 work
= napi_poll(napi
, BUSY_POLL_BUDGET
);
5510 trace_napi_poll(napi
, work
, BUSY_POLL_BUDGET
);
5513 __NET_ADD_STATS(dev_net(napi
->dev
),
5514 LINUX_MIB_BUSYPOLLRXPACKETS
, work
);
5517 if (!loop_end
|| loop_end(loop_end_arg
, start_time
))
5520 if (unlikely(need_resched())) {
5522 busy_poll_stop(napi
, have_poll_lock
);
5526 if (loop_end(loop_end_arg
, start_time
))
5533 busy_poll_stop(napi
, have_poll_lock
);
5538 EXPORT_SYMBOL(napi_busy_loop
);
5540 #endif /* CONFIG_NET_RX_BUSY_POLL */
5542 static void napi_hash_add(struct napi_struct
*napi
)
5544 if (test_bit(NAPI_STATE_NO_BUSY_POLL
, &napi
->state
) ||
5545 test_and_set_bit(NAPI_STATE_HASHED
, &napi
->state
))
5548 spin_lock(&napi_hash_lock
);
5550 /* 0..NR_CPUS range is reserved for sender_cpu use */
5552 if (unlikely(++napi_gen_id
< MIN_NAPI_ID
))
5553 napi_gen_id
= MIN_NAPI_ID
;
5554 } while (napi_by_id(napi_gen_id
));
5555 napi
->napi_id
= napi_gen_id
;
5557 hlist_add_head_rcu(&napi
->napi_hash_node
,
5558 &napi_hash
[napi
->napi_id
% HASH_SIZE(napi_hash
)]);
5560 spin_unlock(&napi_hash_lock
);
5563 /* Warning : caller is responsible to make sure rcu grace period
5564 * is respected before freeing memory containing @napi
5566 bool napi_hash_del(struct napi_struct
*napi
)
5568 bool rcu_sync_needed
= false;
5570 spin_lock(&napi_hash_lock
);
5572 if (test_and_clear_bit(NAPI_STATE_HASHED
, &napi
->state
)) {
5573 rcu_sync_needed
= true;
5574 hlist_del_rcu(&napi
->napi_hash_node
);
5576 spin_unlock(&napi_hash_lock
);
5577 return rcu_sync_needed
;
5579 EXPORT_SYMBOL_GPL(napi_hash_del
);
5581 static enum hrtimer_restart
napi_watchdog(struct hrtimer
*timer
)
5583 struct napi_struct
*napi
;
5585 napi
= container_of(timer
, struct napi_struct
, timer
);
5587 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
5588 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
5590 if (napi
->gro_list
&& !napi_disable_pending(napi
) &&
5591 !test_and_set_bit(NAPI_STATE_SCHED
, &napi
->state
))
5592 __napi_schedule_irqoff(napi
);
5594 return HRTIMER_NORESTART
;
5597 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
5598 int (*poll
)(struct napi_struct
*, int), int weight
)
5600 INIT_LIST_HEAD(&napi
->poll_list
);
5601 hrtimer_init(&napi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
5602 napi
->timer
.function
= napi_watchdog
;
5603 napi
->gro_count
= 0;
5604 napi
->gro_list
= NULL
;
5607 if (weight
> NAPI_POLL_WEIGHT
)
5608 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5610 napi
->weight
= weight
;
5611 list_add(&napi
->dev_list
, &dev
->napi_list
);
5613 #ifdef CONFIG_NETPOLL
5614 napi
->poll_owner
= -1;
5616 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
5617 napi_hash_add(napi
);
5619 EXPORT_SYMBOL(netif_napi_add
);
5621 void napi_disable(struct napi_struct
*n
)
5624 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
5626 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
5628 while (test_and_set_bit(NAPI_STATE_NPSVC
, &n
->state
))
5631 hrtimer_cancel(&n
->timer
);
5633 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
5635 EXPORT_SYMBOL(napi_disable
);
5637 /* Must be called in process context */
5638 void netif_napi_del(struct napi_struct
*napi
)
5641 if (napi_hash_del(napi
))
5643 list_del_init(&napi
->dev_list
);
5644 napi_free_frags(napi
);
5646 kfree_skb_list(napi
->gro_list
);
5647 napi
->gro_list
= NULL
;
5648 napi
->gro_count
= 0;
5650 EXPORT_SYMBOL(netif_napi_del
);
5652 static int napi_poll(struct napi_struct
*n
, struct list_head
*repoll
)
5657 list_del_init(&n
->poll_list
);
5659 have
= netpoll_poll_lock(n
);
5663 /* This NAPI_STATE_SCHED test is for avoiding a race
5664 * with netpoll's poll_napi(). Only the entity which
5665 * obtains the lock and sees NAPI_STATE_SCHED set will
5666 * actually make the ->poll() call. Therefore we avoid
5667 * accidentally calling ->poll() when NAPI is not scheduled.
5670 if (test_bit(NAPI_STATE_SCHED
, &n
->state
)) {
5671 work
= n
->poll(n
, weight
);
5672 trace_napi_poll(n
, work
, weight
);
5675 WARN_ON_ONCE(work
> weight
);
5677 if (likely(work
< weight
))
5680 /* Drivers must not modify the NAPI state if they
5681 * consume the entire weight. In such cases this code
5682 * still "owns" the NAPI instance and therefore can
5683 * move the instance around on the list at-will.
5685 if (unlikely(napi_disable_pending(n
))) {
5691 /* flush too old packets
5692 * If HZ < 1000, flush all packets.
5694 napi_gro_flush(n
, HZ
>= 1000);
5697 /* Some drivers may have called napi_schedule
5698 * prior to exhausting their budget.
5700 if (unlikely(!list_empty(&n
->poll_list
))) {
5701 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5702 n
->dev
? n
->dev
->name
: "backlog");
5706 list_add_tail(&n
->poll_list
, repoll
);
5709 netpoll_poll_unlock(have
);
5714 static __latent_entropy
void net_rx_action(struct softirq_action
*h
)
5716 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
5717 unsigned long time_limit
= jiffies
+
5718 usecs_to_jiffies(netdev_budget_usecs
);
5719 int budget
= netdev_budget
;
5723 local_irq_disable();
5724 list_splice_init(&sd
->poll_list
, &list
);
5728 struct napi_struct
*n
;
5730 if (list_empty(&list
)) {
5731 if (!sd_has_rps_ipi_waiting(sd
) && list_empty(&repoll
))
5736 n
= list_first_entry(&list
, struct napi_struct
, poll_list
);
5737 budget
-= napi_poll(n
, &repoll
);
5739 /* If softirq window is exhausted then punt.
5740 * Allow this to run for 2 jiffies since which will allow
5741 * an average latency of 1.5/HZ.
5743 if (unlikely(budget
<= 0 ||
5744 time_after_eq(jiffies
, time_limit
))) {
5750 local_irq_disable();
5752 list_splice_tail_init(&sd
->poll_list
, &list
);
5753 list_splice_tail(&repoll
, &list
);
5754 list_splice(&list
, &sd
->poll_list
);
5755 if (!list_empty(&sd
->poll_list
))
5756 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
5758 net_rps_action_and_irq_enable(sd
);
5760 __kfree_skb_flush();
5763 struct netdev_adjacent
{
5764 struct net_device
*dev
;
5766 /* upper master flag, there can only be one master device per list */
5769 /* counter for the number of times this device was added to us */
5772 /* private field for the users */
5775 struct list_head list
;
5776 struct rcu_head rcu
;
5779 static struct netdev_adjacent
*__netdev_find_adj(struct net_device
*adj_dev
,
5780 struct list_head
*adj_list
)
5782 struct netdev_adjacent
*adj
;
5784 list_for_each_entry(adj
, adj_list
, list
) {
5785 if (adj
->dev
== adj_dev
)
5791 static int __netdev_has_upper_dev(struct net_device
*upper_dev
, void *data
)
5793 struct net_device
*dev
= data
;
5795 return upper_dev
== dev
;
5799 * netdev_has_upper_dev - Check if device is linked to an upper device
5801 * @upper_dev: upper device to check
5803 * Find out if a device is linked to specified upper device and return true
5804 * in case it is. Note that this checks only immediate upper device,
5805 * not through a complete stack of devices. The caller must hold the RTNL lock.
5807 bool netdev_has_upper_dev(struct net_device
*dev
,
5808 struct net_device
*upper_dev
)
5812 return netdev_walk_all_upper_dev_rcu(dev
, __netdev_has_upper_dev
,
5815 EXPORT_SYMBOL(netdev_has_upper_dev
);
5818 * netdev_has_upper_dev_all - Check if device is linked to an upper device
5820 * @upper_dev: upper device to check
5822 * Find out if a device is linked to specified upper device and return true
5823 * in case it is. Note that this checks the entire upper device chain.
5824 * The caller must hold rcu lock.
5827 bool netdev_has_upper_dev_all_rcu(struct net_device
*dev
,
5828 struct net_device
*upper_dev
)
5830 return !!netdev_walk_all_upper_dev_rcu(dev
, __netdev_has_upper_dev
,
5833 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu
);
5836 * netdev_has_any_upper_dev - Check if device is linked to some device
5839 * Find out if a device is linked to an upper device and return true in case
5840 * it is. The caller must hold the RTNL lock.
5842 bool netdev_has_any_upper_dev(struct net_device
*dev
)
5846 return !list_empty(&dev
->adj_list
.upper
);
5848 EXPORT_SYMBOL(netdev_has_any_upper_dev
);
5851 * netdev_master_upper_dev_get - Get master upper device
5854 * Find a master upper device and return pointer to it or NULL in case
5855 * it's not there. The caller must hold the RTNL lock.
5857 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
)
5859 struct netdev_adjacent
*upper
;
5863 if (list_empty(&dev
->adj_list
.upper
))
5866 upper
= list_first_entry(&dev
->adj_list
.upper
,
5867 struct netdev_adjacent
, list
);
5868 if (likely(upper
->master
))
5872 EXPORT_SYMBOL(netdev_master_upper_dev_get
);
5875 * netdev_has_any_lower_dev - Check if device is linked to some device
5878 * Find out if a device is linked to a lower device and return true in case
5879 * it is. The caller must hold the RTNL lock.
5881 static bool netdev_has_any_lower_dev(struct net_device
*dev
)
5885 return !list_empty(&dev
->adj_list
.lower
);
5888 void *netdev_adjacent_get_private(struct list_head
*adj_list
)
5890 struct netdev_adjacent
*adj
;
5892 adj
= list_entry(adj_list
, struct netdev_adjacent
, list
);
5894 return adj
->private;
5896 EXPORT_SYMBOL(netdev_adjacent_get_private
);
5899 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5901 * @iter: list_head ** of the current position
5903 * Gets the next device from the dev's upper list, starting from iter
5904 * position. The caller must hold RCU read lock.
5906 struct net_device
*netdev_upper_get_next_dev_rcu(struct net_device
*dev
,
5907 struct list_head
**iter
)
5909 struct netdev_adjacent
*upper
;
5911 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5913 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
5915 if (&upper
->list
== &dev
->adj_list
.upper
)
5918 *iter
= &upper
->list
;
5922 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu
);
5924 static struct net_device
*netdev_next_upper_dev(struct net_device
*dev
,
5925 struct list_head
**iter
)
5927 struct netdev_adjacent
*upper
;
5929 upper
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
5931 if (&upper
->list
== &dev
->adj_list
.upper
)
5934 *iter
= &upper
->list
;
5939 static struct net_device
*netdev_next_upper_dev_rcu(struct net_device
*dev
,
5940 struct list_head
**iter
)
5942 struct netdev_adjacent
*upper
;
5944 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5946 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
5948 if (&upper
->list
== &dev
->adj_list
.upper
)
5951 *iter
= &upper
->list
;
5956 static int netdev_walk_all_upper_dev(struct net_device
*dev
,
5957 int (*fn
)(struct net_device
*dev
,
5961 struct net_device
*udev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
5962 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
5966 iter
= &dev
->adj_list
.upper
;
5970 ret
= fn(now
, data
);
5977 udev
= netdev_next_upper_dev(now
, &iter
);
5982 niter
= &udev
->adj_list
.upper
;
5983 dev_stack
[cur
] = now
;
5984 iter_stack
[cur
++] = iter
;
5991 next
= dev_stack
[--cur
];
5992 niter
= iter_stack
[cur
];
6002 int netdev_walk_all_upper_dev_rcu(struct net_device
*dev
,
6003 int (*fn
)(struct net_device
*dev
,
6007 struct net_device
*udev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
6008 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
6012 iter
= &dev
->adj_list
.upper
;
6016 ret
= fn(now
, data
);
6023 udev
= netdev_next_upper_dev_rcu(now
, &iter
);
6028 niter
= &udev
->adj_list
.upper
;
6029 dev_stack
[cur
] = now
;
6030 iter_stack
[cur
++] = iter
;
6037 next
= dev_stack
[--cur
];
6038 niter
= iter_stack
[cur
];
6047 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu
);
6050 * netdev_lower_get_next_private - Get the next ->private from the
6051 * lower neighbour list
6053 * @iter: list_head ** of the current position
6055 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6056 * list, starting from iter position. The caller must hold either hold the
6057 * RTNL lock or its own locking that guarantees that the neighbour lower
6058 * list will remain unchanged.
6060 void *netdev_lower_get_next_private(struct net_device
*dev
,
6061 struct list_head
**iter
)
6063 struct netdev_adjacent
*lower
;
6065 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
6067 if (&lower
->list
== &dev
->adj_list
.lower
)
6070 *iter
= lower
->list
.next
;
6072 return lower
->private;
6074 EXPORT_SYMBOL(netdev_lower_get_next_private
);
6077 * netdev_lower_get_next_private_rcu - Get the next ->private from the
6078 * lower neighbour list, RCU
6081 * @iter: list_head ** of the current position
6083 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6084 * list, starting from iter position. The caller must hold RCU read lock.
6086 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
6087 struct list_head
**iter
)
6089 struct netdev_adjacent
*lower
;
6091 WARN_ON_ONCE(!rcu_read_lock_held());
6093 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6095 if (&lower
->list
== &dev
->adj_list
.lower
)
6098 *iter
= &lower
->list
;
6100 return lower
->private;
6102 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu
);
6105 * netdev_lower_get_next - Get the next device from the lower neighbour
6108 * @iter: list_head ** of the current position
6110 * Gets the next netdev_adjacent from the dev's lower neighbour
6111 * list, starting from iter position. The caller must hold RTNL lock or
6112 * its own locking that guarantees that the neighbour lower
6113 * list will remain unchanged.
6115 void *netdev_lower_get_next(struct net_device
*dev
, struct list_head
**iter
)
6117 struct netdev_adjacent
*lower
;
6119 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
6121 if (&lower
->list
== &dev
->adj_list
.lower
)
6124 *iter
= lower
->list
.next
;
6128 EXPORT_SYMBOL(netdev_lower_get_next
);
6130 static struct net_device
*netdev_next_lower_dev(struct net_device
*dev
,
6131 struct list_head
**iter
)
6133 struct netdev_adjacent
*lower
;
6135 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
6137 if (&lower
->list
== &dev
->adj_list
.lower
)
6140 *iter
= &lower
->list
;
6145 int netdev_walk_all_lower_dev(struct net_device
*dev
,
6146 int (*fn
)(struct net_device
*dev
,
6150 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
6151 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
6155 iter
= &dev
->adj_list
.lower
;
6159 ret
= fn(now
, data
);
6166 ldev
= netdev_next_lower_dev(now
, &iter
);
6171 niter
= &ldev
->adj_list
.lower
;
6172 dev_stack
[cur
] = now
;
6173 iter_stack
[cur
++] = iter
;
6180 next
= dev_stack
[--cur
];
6181 niter
= iter_stack
[cur
];
6190 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev
);
6192 static struct net_device
*netdev_next_lower_dev_rcu(struct net_device
*dev
,
6193 struct list_head
**iter
)
6195 struct netdev_adjacent
*lower
;
6197 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6198 if (&lower
->list
== &dev
->adj_list
.lower
)
6201 *iter
= &lower
->list
;
6206 static u8
__netdev_upper_depth(struct net_device
*dev
)
6208 struct net_device
*udev
;
6209 struct list_head
*iter
;
6212 for (iter
= &dev
->adj_list
.upper
,
6213 udev
= netdev_next_upper_dev(dev
, &iter
);
6215 udev
= netdev_next_upper_dev(dev
, &iter
)) {
6216 if (max_depth
< udev
->upper_level
)
6217 max_depth
= udev
->upper_level
;
6223 static u8
__netdev_lower_depth(struct net_device
*dev
)
6225 struct net_device
*ldev
;
6226 struct list_head
*iter
;
6229 for (iter
= &dev
->adj_list
.lower
,
6230 ldev
= netdev_next_lower_dev(dev
, &iter
);
6232 ldev
= netdev_next_lower_dev(dev
, &iter
)) {
6233 if (max_depth
< ldev
->lower_level
)
6234 max_depth
= ldev
->lower_level
;
6240 static int __netdev_update_upper_level(struct net_device
*dev
, void *data
)
6242 dev
->upper_level
= __netdev_upper_depth(dev
) + 1;
6246 static int __netdev_update_lower_level(struct net_device
*dev
, void *data
)
6248 dev
->lower_level
= __netdev_lower_depth(dev
) + 1;
6252 int netdev_walk_all_lower_dev_rcu(struct net_device
*dev
,
6253 int (*fn
)(struct net_device
*dev
,
6257 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
6258 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
6262 iter
= &dev
->adj_list
.lower
;
6266 ret
= fn(now
, data
);
6273 ldev
= netdev_next_lower_dev_rcu(now
, &iter
);
6278 niter
= &ldev
->adj_list
.lower
;
6279 dev_stack
[cur
] = now
;
6280 iter_stack
[cur
++] = iter
;
6287 next
= dev_stack
[--cur
];
6288 niter
= iter_stack
[cur
];
6297 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu
);
6300 * netdev_lower_get_first_private_rcu - Get the first ->private from the
6301 * lower neighbour list, RCU
6305 * Gets the first netdev_adjacent->private from the dev's lower neighbour
6306 * list. The caller must hold RCU read lock.
6308 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
)
6310 struct netdev_adjacent
*lower
;
6312 lower
= list_first_or_null_rcu(&dev
->adj_list
.lower
,
6313 struct netdev_adjacent
, list
);
6315 return lower
->private;
6318 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu
);
6321 * netdev_master_upper_dev_get_rcu - Get master upper device
6324 * Find a master upper device and return pointer to it or NULL in case
6325 * it's not there. The caller must hold the RCU read lock.
6327 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
)
6329 struct netdev_adjacent
*upper
;
6331 upper
= list_first_or_null_rcu(&dev
->adj_list
.upper
,
6332 struct netdev_adjacent
, list
);
6333 if (upper
&& likely(upper
->master
))
6337 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu
);
6339 static int netdev_adjacent_sysfs_add(struct net_device
*dev
,
6340 struct net_device
*adj_dev
,
6341 struct list_head
*dev_list
)
6343 char linkname
[IFNAMSIZ
+7];
6345 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
6346 "upper_%s" : "lower_%s", adj_dev
->name
);
6347 return sysfs_create_link(&(dev
->dev
.kobj
), &(adj_dev
->dev
.kobj
),
6350 static void netdev_adjacent_sysfs_del(struct net_device
*dev
,
6352 struct list_head
*dev_list
)
6354 char linkname
[IFNAMSIZ
+7];
6356 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
6357 "upper_%s" : "lower_%s", name
);
6358 sysfs_remove_link(&(dev
->dev
.kobj
), linkname
);
6361 static inline bool netdev_adjacent_is_neigh_list(struct net_device
*dev
,
6362 struct net_device
*adj_dev
,
6363 struct list_head
*dev_list
)
6365 return (dev_list
== &dev
->adj_list
.upper
||
6366 dev_list
== &dev
->adj_list
.lower
) &&
6367 net_eq(dev_net(dev
), dev_net(adj_dev
));
6370 static int __netdev_adjacent_dev_insert(struct net_device
*dev
,
6371 struct net_device
*adj_dev
,
6372 struct list_head
*dev_list
,
6373 void *private, bool master
)
6375 struct netdev_adjacent
*adj
;
6378 adj
= __netdev_find_adj(adj_dev
, dev_list
);
6382 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6383 dev
->name
, adj_dev
->name
, adj
->ref_nr
);
6388 adj
= kmalloc(sizeof(*adj
), GFP_KERNEL
);
6393 adj
->master
= master
;
6395 adj
->private = private;
6398 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6399 dev
->name
, adj_dev
->name
, adj
->ref_nr
, adj_dev
->name
);
6401 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
)) {
6402 ret
= netdev_adjacent_sysfs_add(dev
, adj_dev
, dev_list
);
6407 /* Ensure that master link is always the first item in list. */
6409 ret
= sysfs_create_link(&(dev
->dev
.kobj
),
6410 &(adj_dev
->dev
.kobj
), "master");
6412 goto remove_symlinks
;
6414 list_add_rcu(&adj
->list
, dev_list
);
6416 list_add_tail_rcu(&adj
->list
, dev_list
);
6422 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
6423 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
6431 static void __netdev_adjacent_dev_remove(struct net_device
*dev
,
6432 struct net_device
*adj_dev
,
6434 struct list_head
*dev_list
)
6436 struct netdev_adjacent
*adj
;
6438 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6439 dev
->name
, adj_dev
->name
, ref_nr
);
6441 adj
= __netdev_find_adj(adj_dev
, dev_list
);
6444 pr_err("Adjacency does not exist for device %s from %s\n",
6445 dev
->name
, adj_dev
->name
);
6450 if (adj
->ref_nr
> ref_nr
) {
6451 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6452 dev
->name
, adj_dev
->name
, ref_nr
,
6453 adj
->ref_nr
- ref_nr
);
6454 adj
->ref_nr
-= ref_nr
;
6459 sysfs_remove_link(&(dev
->dev
.kobj
), "master");
6461 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
6462 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
6464 list_del_rcu(&adj
->list
);
6465 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
6466 adj_dev
->name
, dev
->name
, adj_dev
->name
);
6468 kfree_rcu(adj
, rcu
);
6471 static int __netdev_adjacent_dev_link_lists(struct net_device
*dev
,
6472 struct net_device
*upper_dev
,
6473 struct list_head
*up_list
,
6474 struct list_head
*down_list
,
6475 void *private, bool master
)
6479 ret
= __netdev_adjacent_dev_insert(dev
, upper_dev
, up_list
,
6484 ret
= __netdev_adjacent_dev_insert(upper_dev
, dev
, down_list
,
6487 __netdev_adjacent_dev_remove(dev
, upper_dev
, 1, up_list
);
6494 static void __netdev_adjacent_dev_unlink_lists(struct net_device
*dev
,
6495 struct net_device
*upper_dev
,
6497 struct list_head
*up_list
,
6498 struct list_head
*down_list
)
6500 __netdev_adjacent_dev_remove(dev
, upper_dev
, ref_nr
, up_list
);
6501 __netdev_adjacent_dev_remove(upper_dev
, dev
, ref_nr
, down_list
);
6504 static int __netdev_adjacent_dev_link_neighbour(struct net_device
*dev
,
6505 struct net_device
*upper_dev
,
6506 void *private, bool master
)
6508 return __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
6509 &dev
->adj_list
.upper
,
6510 &upper_dev
->adj_list
.lower
,
6514 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device
*dev
,
6515 struct net_device
*upper_dev
)
6517 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
, 1,
6518 &dev
->adj_list
.upper
,
6519 &upper_dev
->adj_list
.lower
);
6522 static int __netdev_upper_dev_link(struct net_device
*dev
,
6523 struct net_device
*upper_dev
, bool master
,
6524 void *upper_priv
, void *upper_info
,
6525 struct netlink_ext_ack
*extack
)
6527 struct netdev_notifier_changeupper_info changeupper_info
= {
6532 .upper_dev
= upper_dev
,
6535 .upper_info
= upper_info
,
6541 if (dev
== upper_dev
)
6544 /* To prevent loops, check if dev is not upper device to upper_dev. */
6545 if (netdev_has_upper_dev(upper_dev
, dev
))
6548 if (netdev_has_upper_dev(dev
, upper_dev
))
6551 if ((dev
->lower_level
+ upper_dev
->upper_level
) > MAX_NEST_DEV
)
6554 if (master
&& netdev_master_upper_dev_get(dev
))
6557 ret
= call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
6558 &changeupper_info
.info
);
6559 ret
= notifier_to_errno(ret
);
6563 ret
= __netdev_adjacent_dev_link_neighbour(dev
, upper_dev
, upper_priv
,
6568 ret
= call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
6569 &changeupper_info
.info
);
6570 ret
= notifier_to_errno(ret
);
6574 __netdev_update_upper_level(dev
, NULL
);
6575 netdev_walk_all_lower_dev(dev
, __netdev_update_upper_level
, NULL
);
6577 __netdev_update_lower_level(upper_dev
, NULL
);
6578 netdev_walk_all_upper_dev(upper_dev
, __netdev_update_lower_level
, NULL
);
6583 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
6589 * netdev_upper_dev_link - Add a link to the upper device
6591 * @upper_dev: new upper device
6593 * Adds a link to device which is upper to this one. The caller must hold
6594 * the RTNL lock. On a failure a negative errno code is returned.
6595 * On success the reference counts are adjusted and the function
6598 int netdev_upper_dev_link(struct net_device
*dev
,
6599 struct net_device
*upper_dev
,
6600 struct netlink_ext_ack
*extack
)
6602 return __netdev_upper_dev_link(dev
, upper_dev
, false,
6603 NULL
, NULL
, extack
);
6605 EXPORT_SYMBOL(netdev_upper_dev_link
);
6608 * netdev_master_upper_dev_link - Add a master link to the upper device
6610 * @upper_dev: new upper device
6611 * @upper_priv: upper device private
6612 * @upper_info: upper info to be passed down via notifier
6614 * Adds a link to device which is upper to this one. In this case, only
6615 * one master upper device can be linked, although other non-master devices
6616 * might be linked as well. The caller must hold the RTNL lock.
6617 * On a failure a negative errno code is returned. On success the reference
6618 * counts are adjusted and the function returns zero.
6620 int netdev_master_upper_dev_link(struct net_device
*dev
,
6621 struct net_device
*upper_dev
,
6622 void *upper_priv
, void *upper_info
,
6623 struct netlink_ext_ack
*extack
)
6625 return __netdev_upper_dev_link(dev
, upper_dev
, true,
6626 upper_priv
, upper_info
, extack
);
6628 EXPORT_SYMBOL(netdev_master_upper_dev_link
);
6631 * netdev_upper_dev_unlink - Removes a link to upper device
6633 * @upper_dev: new upper device
6635 * Removes a link to device which is upper to this one. The caller must hold
6638 void netdev_upper_dev_unlink(struct net_device
*dev
,
6639 struct net_device
*upper_dev
)
6641 struct netdev_notifier_changeupper_info changeupper_info
= {
6645 .upper_dev
= upper_dev
,
6651 changeupper_info
.master
= netdev_master_upper_dev_get(dev
) == upper_dev
;
6653 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
6654 &changeupper_info
.info
);
6656 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
6658 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
6659 &changeupper_info
.info
);
6661 __netdev_update_upper_level(dev
, NULL
);
6662 netdev_walk_all_lower_dev(dev
, __netdev_update_upper_level
, NULL
);
6664 __netdev_update_lower_level(upper_dev
, NULL
);
6665 netdev_walk_all_upper_dev(upper_dev
, __netdev_update_lower_level
, NULL
);
6667 EXPORT_SYMBOL(netdev_upper_dev_unlink
);
6670 * netdev_bonding_info_change - Dispatch event about slave change
6672 * @bonding_info: info to dispatch
6674 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
6675 * The caller must hold the RTNL lock.
6677 void netdev_bonding_info_change(struct net_device
*dev
,
6678 struct netdev_bonding_info
*bonding_info
)
6680 struct netdev_notifier_bonding_info info
= {
6684 memcpy(&info
.bonding_info
, bonding_info
,
6685 sizeof(struct netdev_bonding_info
));
6686 call_netdevice_notifiers_info(NETDEV_BONDING_INFO
,
6689 EXPORT_SYMBOL(netdev_bonding_info_change
);
6691 static void netdev_adjacent_add_links(struct net_device
*dev
)
6693 struct netdev_adjacent
*iter
;
6695 struct net
*net
= dev_net(dev
);
6697 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
6698 if (!net_eq(net
, dev_net(iter
->dev
)))
6700 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6701 &iter
->dev
->adj_list
.lower
);
6702 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
6703 &dev
->adj_list
.upper
);
6706 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
6707 if (!net_eq(net
, dev_net(iter
->dev
)))
6709 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6710 &iter
->dev
->adj_list
.upper
);
6711 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
6712 &dev
->adj_list
.lower
);
6716 static void netdev_adjacent_del_links(struct net_device
*dev
)
6718 struct netdev_adjacent
*iter
;
6720 struct net
*net
= dev_net(dev
);
6722 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
6723 if (!net_eq(net
, dev_net(iter
->dev
)))
6725 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
6726 &iter
->dev
->adj_list
.lower
);
6727 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
6728 &dev
->adj_list
.upper
);
6731 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
6732 if (!net_eq(net
, dev_net(iter
->dev
)))
6734 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
6735 &iter
->dev
->adj_list
.upper
);
6736 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
6737 &dev
->adj_list
.lower
);
6741 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
)
6743 struct netdev_adjacent
*iter
;
6745 struct net
*net
= dev_net(dev
);
6747 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
6748 if (!net_eq(net
, dev_net(iter
->dev
)))
6750 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
6751 &iter
->dev
->adj_list
.lower
);
6752 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6753 &iter
->dev
->adj_list
.lower
);
6756 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
6757 if (!net_eq(net
, dev_net(iter
->dev
)))
6759 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
6760 &iter
->dev
->adj_list
.upper
);
6761 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6762 &iter
->dev
->adj_list
.upper
);
6766 void *netdev_lower_dev_get_private(struct net_device
*dev
,
6767 struct net_device
*lower_dev
)
6769 struct netdev_adjacent
*lower
;
6773 lower
= __netdev_find_adj(lower_dev
, &dev
->adj_list
.lower
);
6777 return lower
->private;
6779 EXPORT_SYMBOL(netdev_lower_dev_get_private
);
6782 int dev_get_nest_level(struct net_device
*dev
)
6784 struct net_device
*lower
= NULL
;
6785 struct list_head
*iter
;
6791 netdev_for_each_lower_dev(dev
, lower
, iter
) {
6792 nest
= dev_get_nest_level(lower
);
6793 if (max_nest
< nest
)
6797 return max_nest
+ 1;
6799 EXPORT_SYMBOL(dev_get_nest_level
);
6802 * netdev_lower_change - Dispatch event about lower device state change
6803 * @lower_dev: device
6804 * @lower_state_info: state to dispatch
6806 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
6807 * The caller must hold the RTNL lock.
6809 void netdev_lower_state_changed(struct net_device
*lower_dev
,
6810 void *lower_state_info
)
6812 struct netdev_notifier_changelowerstate_info changelowerstate_info
= {
6813 .info
.dev
= lower_dev
,
6817 changelowerstate_info
.lower_state_info
= lower_state_info
;
6818 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE
,
6819 &changelowerstate_info
.info
);
6821 EXPORT_SYMBOL(netdev_lower_state_changed
);
6823 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
6825 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6827 if (ops
->ndo_change_rx_flags
)
6828 ops
->ndo_change_rx_flags(dev
, flags
);
6831 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
, bool notify
)
6833 unsigned int old_flags
= dev
->flags
;
6839 dev
->flags
|= IFF_PROMISC
;
6840 dev
->promiscuity
+= inc
;
6841 if (dev
->promiscuity
== 0) {
6844 * If inc causes overflow, untouch promisc and return error.
6847 dev
->flags
&= ~IFF_PROMISC
;
6849 dev
->promiscuity
-= inc
;
6850 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6855 if (dev
->flags
!= old_flags
) {
6856 pr_info("device %s %s promiscuous mode\n",
6858 dev
->flags
& IFF_PROMISC
? "entered" : "left");
6859 if (audit_enabled
) {
6860 current_uid_gid(&uid
, &gid
);
6861 audit_log(current
->audit_context
, GFP_ATOMIC
,
6862 AUDIT_ANOM_PROMISCUOUS
,
6863 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6864 dev
->name
, (dev
->flags
& IFF_PROMISC
),
6865 (old_flags
& IFF_PROMISC
),
6866 from_kuid(&init_user_ns
, audit_get_loginuid(current
)),
6867 from_kuid(&init_user_ns
, uid
),
6868 from_kgid(&init_user_ns
, gid
),
6869 audit_get_sessionid(current
));
6872 dev_change_rx_flags(dev
, IFF_PROMISC
);
6875 __dev_notify_flags(dev
, old_flags
, IFF_PROMISC
);
6880 * dev_set_promiscuity - update promiscuity count on a device
6884 * Add or remove promiscuity from a device. While the count in the device
6885 * remains above zero the interface remains promiscuous. Once it hits zero
6886 * the device reverts back to normal filtering operation. A negative inc
6887 * value is used to drop promiscuity on the device.
6888 * Return 0 if successful or a negative errno code on error.
6890 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
6892 unsigned int old_flags
= dev
->flags
;
6895 err
= __dev_set_promiscuity(dev
, inc
, true);
6898 if (dev
->flags
!= old_flags
)
6899 dev_set_rx_mode(dev
);
6902 EXPORT_SYMBOL(dev_set_promiscuity
);
6904 static int __dev_set_allmulti(struct net_device
*dev
, int inc
, bool notify
)
6906 unsigned int old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
6910 dev
->flags
|= IFF_ALLMULTI
;
6911 dev
->allmulti
+= inc
;
6912 if (dev
->allmulti
== 0) {
6915 * If inc causes overflow, untouch allmulti and return error.
6918 dev
->flags
&= ~IFF_ALLMULTI
;
6920 dev
->allmulti
-= inc
;
6921 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6926 if (dev
->flags
^ old_flags
) {
6927 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
6928 dev_set_rx_mode(dev
);
6930 __dev_notify_flags(dev
, old_flags
,
6931 dev
->gflags
^ old_gflags
);
6937 * dev_set_allmulti - update allmulti count on a device
6941 * Add or remove reception of all multicast frames to a device. While the
6942 * count in the device remains above zero the interface remains listening
6943 * to all interfaces. Once it hits zero the device reverts back to normal
6944 * filtering operation. A negative @inc value is used to drop the counter
6945 * when releasing a resource needing all multicasts.
6946 * Return 0 if successful or a negative errno code on error.
6949 int dev_set_allmulti(struct net_device
*dev
, int inc
)
6951 return __dev_set_allmulti(dev
, inc
, true);
6953 EXPORT_SYMBOL(dev_set_allmulti
);
6956 * Upload unicast and multicast address lists to device and
6957 * configure RX filtering. When the device doesn't support unicast
6958 * filtering it is put in promiscuous mode while unicast addresses
6961 void __dev_set_rx_mode(struct net_device
*dev
)
6963 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6965 /* dev_open will call this function so the list will stay sane. */
6966 if (!(dev
->flags
&IFF_UP
))
6969 if (!netif_device_present(dev
))
6972 if (!(dev
->priv_flags
& IFF_UNICAST_FLT
)) {
6973 /* Unicast addresses changes may only happen under the rtnl,
6974 * therefore calling __dev_set_promiscuity here is safe.
6976 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
6977 __dev_set_promiscuity(dev
, 1, false);
6978 dev
->uc_promisc
= true;
6979 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
6980 __dev_set_promiscuity(dev
, -1, false);
6981 dev
->uc_promisc
= false;
6985 if (ops
->ndo_set_rx_mode
)
6986 ops
->ndo_set_rx_mode(dev
);
6989 void dev_set_rx_mode(struct net_device
*dev
)
6991 netif_addr_lock_bh(dev
);
6992 __dev_set_rx_mode(dev
);
6993 netif_addr_unlock_bh(dev
);
6997 * dev_get_flags - get flags reported to userspace
7000 * Get the combination of flag bits exported through APIs to userspace.
7002 unsigned int dev_get_flags(const struct net_device
*dev
)
7006 flags
= (dev
->flags
& ~(IFF_PROMISC
|
7011 (dev
->gflags
& (IFF_PROMISC
|
7014 if (netif_running(dev
)) {
7015 if (netif_oper_up(dev
))
7016 flags
|= IFF_RUNNING
;
7017 if (netif_carrier_ok(dev
))
7018 flags
|= IFF_LOWER_UP
;
7019 if (netif_dormant(dev
))
7020 flags
|= IFF_DORMANT
;
7025 EXPORT_SYMBOL(dev_get_flags
);
7027 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
)
7029 unsigned int old_flags
= dev
->flags
;
7035 * Set the flags on our device.
7038 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
7039 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
7041 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
7045 * Load in the correct multicast list now the flags have changed.
7048 if ((old_flags
^ flags
) & IFF_MULTICAST
)
7049 dev_change_rx_flags(dev
, IFF_MULTICAST
);
7051 dev_set_rx_mode(dev
);
7054 * Have we downed the interface. We handle IFF_UP ourselves
7055 * according to user attempts to set it, rather than blindly
7060 if ((old_flags
^ flags
) & IFF_UP
) {
7061 if (old_flags
& IFF_UP
)
7064 ret
= __dev_open(dev
);
7067 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
7068 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
7069 unsigned int old_flags
= dev
->flags
;
7071 dev
->gflags
^= IFF_PROMISC
;
7073 if (__dev_set_promiscuity(dev
, inc
, false) >= 0)
7074 if (dev
->flags
!= old_flags
)
7075 dev_set_rx_mode(dev
);
7078 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
7079 * is important. Some (broken) drivers set IFF_PROMISC, when
7080 * IFF_ALLMULTI is requested not asking us and not reporting.
7082 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
7083 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
7085 dev
->gflags
^= IFF_ALLMULTI
;
7086 __dev_set_allmulti(dev
, inc
, false);
7092 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
,
7093 unsigned int gchanges
)
7095 unsigned int changes
= dev
->flags
^ old_flags
;
7098 rtmsg_ifinfo(RTM_NEWLINK
, dev
, gchanges
, GFP_ATOMIC
);
7100 if (changes
& IFF_UP
) {
7101 if (dev
->flags
& IFF_UP
)
7102 call_netdevice_notifiers(NETDEV_UP
, dev
);
7104 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
7107 if (dev
->flags
& IFF_UP
&&
7108 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
))) {
7109 struct netdev_notifier_change_info change_info
= {
7113 .flags_changed
= changes
,
7116 call_netdevice_notifiers_info(NETDEV_CHANGE
, &change_info
.info
);
7121 * dev_change_flags - change device settings
7123 * @flags: device state flags
7125 * Change settings on device based state flags. The flags are
7126 * in the userspace exported format.
7128 int dev_change_flags(struct net_device
*dev
, unsigned int flags
)
7131 unsigned int changes
, old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
7133 ret
= __dev_change_flags(dev
, flags
);
7137 changes
= (old_flags
^ dev
->flags
) | (old_gflags
^ dev
->gflags
);
7138 __dev_notify_flags(dev
, old_flags
, changes
);
7141 EXPORT_SYMBOL(dev_change_flags
);
7143 int __dev_set_mtu(struct net_device
*dev
, int new_mtu
)
7145 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7147 if (ops
->ndo_change_mtu
)
7148 return ops
->ndo_change_mtu(dev
, new_mtu
);
7150 /* Pairs with all the lockless reads of dev->mtu in the stack */
7151 WRITE_ONCE(dev
->mtu
, new_mtu
);
7154 EXPORT_SYMBOL(__dev_set_mtu
);
7157 * dev_set_mtu - Change maximum transfer unit
7159 * @new_mtu: new transfer unit
7161 * Change the maximum transfer size of the network device.
7163 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
7167 if (new_mtu
== dev
->mtu
)
7170 err
= dev_validate_mtu(dev
, new_mtu
);
7174 if (!netif_device_present(dev
))
7177 err
= call_netdevice_notifiers(NETDEV_PRECHANGEMTU
, dev
);
7178 err
= notifier_to_errno(err
);
7182 orig_mtu
= dev
->mtu
;
7183 err
= __dev_set_mtu(dev
, new_mtu
);
7186 err
= call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
7188 err
= notifier_to_errno(err
);
7190 /* setting mtu back and notifying everyone again,
7191 * so that they have a chance to revert changes.
7193 __dev_set_mtu(dev
, orig_mtu
);
7194 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
7200 EXPORT_SYMBOL(dev_set_mtu
);
7203 * dev_set_group - Change group this device belongs to
7205 * @new_group: group this device should belong to
7207 void dev_set_group(struct net_device
*dev
, int new_group
)
7209 dev
->group
= new_group
;
7211 EXPORT_SYMBOL(dev_set_group
);
7214 * dev_set_mac_address - Change Media Access Control Address
7218 * Change the hardware (MAC) address of the device
7220 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
)
7222 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7225 if (!ops
->ndo_set_mac_address
)
7227 if (sa
->sa_family
!= dev
->type
)
7229 if (!netif_device_present(dev
))
7231 err
= ops
->ndo_set_mac_address(dev
, sa
);
7234 dev
->addr_assign_type
= NET_ADDR_SET
;
7235 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
7236 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
7239 EXPORT_SYMBOL(dev_set_mac_address
);
7242 * dev_change_carrier - Change device carrier
7244 * @new_carrier: new value
7246 * Change device carrier
7248 int dev_change_carrier(struct net_device
*dev
, bool new_carrier
)
7250 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7252 if (!ops
->ndo_change_carrier
)
7254 if (!netif_device_present(dev
))
7256 return ops
->ndo_change_carrier(dev
, new_carrier
);
7258 EXPORT_SYMBOL(dev_change_carrier
);
7261 * dev_get_phys_port_id - Get device physical port ID
7265 * Get device physical port ID
7267 int dev_get_phys_port_id(struct net_device
*dev
,
7268 struct netdev_phys_item_id
*ppid
)
7270 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7272 if (!ops
->ndo_get_phys_port_id
)
7274 return ops
->ndo_get_phys_port_id(dev
, ppid
);
7276 EXPORT_SYMBOL(dev_get_phys_port_id
);
7279 * dev_get_phys_port_name - Get device physical port name
7282 * @len: limit of bytes to copy to name
7284 * Get device physical port name
7286 int dev_get_phys_port_name(struct net_device
*dev
,
7287 char *name
, size_t len
)
7289 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7291 if (!ops
->ndo_get_phys_port_name
)
7293 return ops
->ndo_get_phys_port_name(dev
, name
, len
);
7295 EXPORT_SYMBOL(dev_get_phys_port_name
);
7298 * dev_change_proto_down - update protocol port state information
7300 * @proto_down: new value
7302 * This info can be used by switch drivers to set the phys state of the
7305 int dev_change_proto_down(struct net_device
*dev
, bool proto_down
)
7307 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7309 if (!ops
->ndo_change_proto_down
)
7311 if (!netif_device_present(dev
))
7313 return ops
->ndo_change_proto_down(dev
, proto_down
);
7315 EXPORT_SYMBOL(dev_change_proto_down
);
7317 u8
__dev_xdp_attached(struct net_device
*dev
, bpf_op_t bpf_op
, u32
*prog_id
)
7319 struct netdev_bpf xdp
;
7321 memset(&xdp
, 0, sizeof(xdp
));
7322 xdp
.command
= XDP_QUERY_PROG
;
7324 /* Query must always succeed. */
7325 WARN_ON(bpf_op(dev
, &xdp
) < 0);
7327 *prog_id
= xdp
.prog_id
;
7329 return xdp
.prog_attached
;
7332 static int dev_xdp_install(struct net_device
*dev
, bpf_op_t bpf_op
,
7333 struct netlink_ext_ack
*extack
, u32 flags
,
7334 struct bpf_prog
*prog
)
7336 struct netdev_bpf xdp
;
7338 memset(&xdp
, 0, sizeof(xdp
));
7339 if (flags
& XDP_FLAGS_HW_MODE
)
7340 xdp
.command
= XDP_SETUP_PROG_HW
;
7342 xdp
.command
= XDP_SETUP_PROG
;
7343 xdp
.extack
= extack
;
7347 return bpf_op(dev
, &xdp
);
7351 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
7353 * @extack: netlink extended ack
7354 * @fd: new program fd or negative value to clear
7355 * @flags: xdp-related flags
7357 * Set or clear a bpf program for a device
7359 int dev_change_xdp_fd(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
7362 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7363 struct bpf_prog
*prog
= NULL
;
7364 bpf_op_t bpf_op
, bpf_chk
;
7369 bpf_op
= bpf_chk
= ops
->ndo_bpf
;
7370 if (!bpf_op
&& (flags
& (XDP_FLAGS_DRV_MODE
| XDP_FLAGS_HW_MODE
)))
7372 if (!bpf_op
|| (flags
& XDP_FLAGS_SKB_MODE
))
7373 bpf_op
= generic_xdp_install
;
7374 if (bpf_op
== bpf_chk
)
7375 bpf_chk
= generic_xdp_install
;
7378 if (bpf_chk
&& __dev_xdp_attached(dev
, bpf_chk
, NULL
))
7380 if ((flags
& XDP_FLAGS_UPDATE_IF_NOEXIST
) &&
7381 __dev_xdp_attached(dev
, bpf_op
, NULL
))
7384 prog
= bpf_prog_get_type_dev(fd
, BPF_PROG_TYPE_XDP
,
7385 bpf_op
== ops
->ndo_bpf
);
7387 return PTR_ERR(prog
);
7389 if (!(flags
& XDP_FLAGS_HW_MODE
) &&
7390 bpf_prog_is_dev_bound(prog
->aux
)) {
7391 NL_SET_ERR_MSG(extack
, "using device-bound program without HW_MODE flag is not supported");
7397 err
= dev_xdp_install(dev
, bpf_op
, extack
, flags
, prog
);
7398 if (err
< 0 && prog
)
7405 * dev_new_index - allocate an ifindex
7406 * @net: the applicable net namespace
7408 * Returns a suitable unique value for a new device interface
7409 * number. The caller must hold the rtnl semaphore or the
7410 * dev_base_lock to be sure it remains unique.
7412 static int dev_new_index(struct net
*net
)
7414 int ifindex
= net
->ifindex
;
7419 if (!__dev_get_by_index(net
, ifindex
))
7420 return net
->ifindex
= ifindex
;
7424 /* Delayed registration/unregisteration */
7425 static LIST_HEAD(net_todo_list
);
7426 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq
);
7428 static void net_set_todo(struct net_device
*dev
)
7430 list_add_tail(&dev
->todo_list
, &net_todo_list
);
7431 dev_net(dev
)->dev_unreg_count
++;
7434 static void rollback_registered_many(struct list_head
*head
)
7436 struct net_device
*dev
, *tmp
;
7437 LIST_HEAD(close_head
);
7439 BUG_ON(dev_boot_phase
);
7442 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
7443 /* Some devices call without registering
7444 * for initialization unwind. Remove those
7445 * devices and proceed with the remaining.
7447 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
7448 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
7452 list_del(&dev
->unreg_list
);
7455 dev
->dismantle
= true;
7456 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
7459 /* If device is running, close it first. */
7460 list_for_each_entry(dev
, head
, unreg_list
)
7461 list_add_tail(&dev
->close_list
, &close_head
);
7462 dev_close_many(&close_head
, true);
7464 list_for_each_entry(dev
, head
, unreg_list
) {
7465 /* And unlink it from device chain. */
7466 unlist_netdevice(dev
);
7468 dev
->reg_state
= NETREG_UNREGISTERING
;
7470 flush_all_backlogs();
7474 list_for_each_entry(dev
, head
, unreg_list
) {
7475 struct sk_buff
*skb
= NULL
;
7477 /* Shutdown queueing discipline. */
7481 /* Notify protocols, that we are about to destroy
7482 * this device. They should clean all the things.
7484 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
7486 if (!dev
->rtnl_link_ops
||
7487 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
7488 skb
= rtmsg_ifinfo_build_skb(RTM_DELLINK
, dev
, ~0U, 0,
7489 GFP_KERNEL
, NULL
, 0);
7492 * Flush the unicast and multicast chains
7497 if (dev
->netdev_ops
->ndo_uninit
)
7498 dev
->netdev_ops
->ndo_uninit(dev
);
7501 rtmsg_ifinfo_send(skb
, dev
, GFP_KERNEL
);
7503 /* Notifier chain MUST detach us all upper devices. */
7504 WARN_ON(netdev_has_any_upper_dev(dev
));
7505 WARN_ON(netdev_has_any_lower_dev(dev
));
7507 /* Remove entries from kobject tree */
7508 netdev_unregister_kobject(dev
);
7510 /* Remove XPS queueing entries */
7511 netif_reset_xps_queues_gt(dev
, 0);
7517 list_for_each_entry(dev
, head
, unreg_list
)
7521 static void rollback_registered(struct net_device
*dev
)
7525 list_add(&dev
->unreg_list
, &single
);
7526 rollback_registered_many(&single
);
7530 static netdev_features_t
netdev_sync_upper_features(struct net_device
*lower
,
7531 struct net_device
*upper
, netdev_features_t features
)
7533 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
7534 netdev_features_t feature
;
7537 for_each_netdev_feature(upper_disables
, feature_bit
) {
7538 feature
= __NETIF_F_BIT(feature_bit
);
7539 if (!(upper
->wanted_features
& feature
)
7540 && (features
& feature
)) {
7541 netdev_dbg(lower
, "Dropping feature %pNF, upper dev %s has it off.\n",
7542 &feature
, upper
->name
);
7543 features
&= ~feature
;
7550 static void netdev_sync_lower_features(struct net_device
*upper
,
7551 struct net_device
*lower
, netdev_features_t features
)
7553 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
7554 netdev_features_t feature
;
7557 for_each_netdev_feature(upper_disables
, feature_bit
) {
7558 feature
= __NETIF_F_BIT(feature_bit
);
7559 if (!(features
& feature
) && (lower
->features
& feature
)) {
7560 netdev_dbg(upper
, "Disabling feature %pNF on lower dev %s.\n",
7561 &feature
, lower
->name
);
7562 lower
->wanted_features
&= ~feature
;
7563 netdev_update_features(lower
);
7565 if (unlikely(lower
->features
& feature
))
7566 netdev_WARN(upper
, "failed to disable %pNF on %s!\n",
7567 &feature
, lower
->name
);
7572 static netdev_features_t
netdev_fix_features(struct net_device
*dev
,
7573 netdev_features_t features
)
7575 /* Fix illegal checksum combinations */
7576 if ((features
& NETIF_F_HW_CSUM
) &&
7577 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
7578 netdev_warn(dev
, "mixed HW and IP checksum settings.\n");
7579 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
7582 /* TSO requires that SG is present as well. */
7583 if ((features
& NETIF_F_ALL_TSO
) && !(features
& NETIF_F_SG
)) {
7584 netdev_dbg(dev
, "Dropping TSO features since no SG feature.\n");
7585 features
&= ~NETIF_F_ALL_TSO
;
7588 if ((features
& NETIF_F_TSO
) && !(features
& NETIF_F_HW_CSUM
) &&
7589 !(features
& NETIF_F_IP_CSUM
)) {
7590 netdev_dbg(dev
, "Dropping TSO features since no CSUM feature.\n");
7591 features
&= ~NETIF_F_TSO
;
7592 features
&= ~NETIF_F_TSO_ECN
;
7595 if ((features
& NETIF_F_TSO6
) && !(features
& NETIF_F_HW_CSUM
) &&
7596 !(features
& NETIF_F_IPV6_CSUM
)) {
7597 netdev_dbg(dev
, "Dropping TSO6 features since no CSUM feature.\n");
7598 features
&= ~NETIF_F_TSO6
;
7601 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
7602 if ((features
& NETIF_F_TSO_MANGLEID
) && !(features
& NETIF_F_TSO
))
7603 features
&= ~NETIF_F_TSO_MANGLEID
;
7605 /* TSO ECN requires that TSO is present as well. */
7606 if ((features
& NETIF_F_ALL_TSO
) == NETIF_F_TSO_ECN
)
7607 features
&= ~NETIF_F_TSO_ECN
;
7609 /* Software GSO depends on SG. */
7610 if ((features
& NETIF_F_GSO
) && !(features
& NETIF_F_SG
)) {
7611 netdev_dbg(dev
, "Dropping NETIF_F_GSO since no SG feature.\n");
7612 features
&= ~NETIF_F_GSO
;
7615 /* GSO partial features require GSO partial be set */
7616 if ((features
& dev
->gso_partial_features
) &&
7617 !(features
& NETIF_F_GSO_PARTIAL
)) {
7619 "Dropping partially supported GSO features since no GSO partial.\n");
7620 features
&= ~dev
->gso_partial_features
;
7626 int __netdev_update_features(struct net_device
*dev
)
7628 struct net_device
*upper
, *lower
;
7629 netdev_features_t features
;
7630 struct list_head
*iter
;
7635 features
= netdev_get_wanted_features(dev
);
7637 if (dev
->netdev_ops
->ndo_fix_features
)
7638 features
= dev
->netdev_ops
->ndo_fix_features(dev
, features
);
7640 /* driver might be less strict about feature dependencies */
7641 features
= netdev_fix_features(dev
, features
);
7643 /* some features can't be enabled if they're off an an upper device */
7644 netdev_for_each_upper_dev_rcu(dev
, upper
, iter
)
7645 features
= netdev_sync_upper_features(dev
, upper
, features
);
7647 if (dev
->features
== features
)
7650 netdev_dbg(dev
, "Features changed: %pNF -> %pNF\n",
7651 &dev
->features
, &features
);
7653 if (dev
->netdev_ops
->ndo_set_features
)
7654 err
= dev
->netdev_ops
->ndo_set_features(dev
, features
);
7658 if (unlikely(err
< 0)) {
7660 "set_features() failed (%d); wanted %pNF, left %pNF\n",
7661 err
, &features
, &dev
->features
);
7662 /* return non-0 since some features might have changed and
7663 * it's better to fire a spurious notification than miss it
7669 /* some features must be disabled on lower devices when disabled
7670 * on an upper device (think: bonding master or bridge)
7672 netdev_for_each_lower_dev(dev
, lower
, iter
)
7673 netdev_sync_lower_features(dev
, lower
, features
);
7676 netdev_features_t diff
= features
^ dev
->features
;
7678 if (diff
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
7679 /* udp_tunnel_{get,drop}_rx_info both need
7680 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
7681 * device, or they won't do anything.
7682 * Thus we need to update dev->features
7683 * *before* calling udp_tunnel_get_rx_info,
7684 * but *after* calling udp_tunnel_drop_rx_info.
7686 if (features
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
7687 dev
->features
= features
;
7688 udp_tunnel_get_rx_info(dev
);
7690 udp_tunnel_drop_rx_info(dev
);
7694 dev
->features
= features
;
7697 return err
< 0 ? 0 : 1;
7701 * netdev_update_features - recalculate device features
7702 * @dev: the device to check
7704 * Recalculate dev->features set and send notifications if it
7705 * has changed. Should be called after driver or hardware dependent
7706 * conditions might have changed that influence the features.
7708 void netdev_update_features(struct net_device
*dev
)
7710 if (__netdev_update_features(dev
))
7711 netdev_features_change(dev
);
7713 EXPORT_SYMBOL(netdev_update_features
);
7716 * netdev_change_features - recalculate device features
7717 * @dev: the device to check
7719 * Recalculate dev->features set and send notifications even
7720 * if they have not changed. Should be called instead of
7721 * netdev_update_features() if also dev->vlan_features might
7722 * have changed to allow the changes to be propagated to stacked
7725 void netdev_change_features(struct net_device
*dev
)
7727 __netdev_update_features(dev
);
7728 netdev_features_change(dev
);
7730 EXPORT_SYMBOL(netdev_change_features
);
7733 * netif_stacked_transfer_operstate - transfer operstate
7734 * @rootdev: the root or lower level device to transfer state from
7735 * @dev: the device to transfer operstate to
7737 * Transfer operational state from root to device. This is normally
7738 * called when a stacking relationship exists between the root
7739 * device and the device(a leaf device).
7741 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
7742 struct net_device
*dev
)
7744 if (rootdev
->operstate
== IF_OPER_DORMANT
)
7745 netif_dormant_on(dev
);
7747 netif_dormant_off(dev
);
7749 if (netif_carrier_ok(rootdev
))
7750 netif_carrier_on(dev
);
7752 netif_carrier_off(dev
);
7754 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
7757 static int netif_alloc_rx_queues(struct net_device
*dev
)
7759 unsigned int i
, count
= dev
->num_rx_queues
;
7760 struct netdev_rx_queue
*rx
;
7761 size_t sz
= count
* sizeof(*rx
);
7765 rx
= kvzalloc(sz
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
7771 for (i
= 0; i
< count
; i
++)
7777 static void netdev_init_one_queue(struct net_device
*dev
,
7778 struct netdev_queue
*queue
, void *_unused
)
7780 /* Initialize queue lock */
7781 spin_lock_init(&queue
->_xmit_lock
);
7782 netdev_set_xmit_lockdep_class(&queue
->_xmit_lock
, dev
->type
);
7783 queue
->xmit_lock_owner
= -1;
7784 netdev_queue_numa_node_write(queue
, NUMA_NO_NODE
);
7787 dql_init(&queue
->dql
, HZ
);
7791 static void netif_free_tx_queues(struct net_device
*dev
)
7796 static int netif_alloc_netdev_queues(struct net_device
*dev
)
7798 unsigned int count
= dev
->num_tx_queues
;
7799 struct netdev_queue
*tx
;
7800 size_t sz
= count
* sizeof(*tx
);
7802 if (count
< 1 || count
> 0xffff)
7805 tx
= kvzalloc(sz
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
7811 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
7812 spin_lock_init(&dev
->tx_global_lock
);
7817 void netif_tx_stop_all_queues(struct net_device
*dev
)
7821 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
7822 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
7824 netif_tx_stop_queue(txq
);
7827 EXPORT_SYMBOL(netif_tx_stop_all_queues
);
7830 * register_netdevice - register a network device
7831 * @dev: device to register
7833 * Take a completed network device structure and add it to the kernel
7834 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7835 * chain. 0 is returned on success. A negative errno code is returned
7836 * on a failure to set up the device, or if the name is a duplicate.
7838 * Callers must hold the rtnl semaphore. You may want
7839 * register_netdev() instead of this.
7842 * The locking appears insufficient to guarantee two parallel registers
7843 * will not get the same name.
7846 int register_netdevice(struct net_device
*dev
)
7849 struct net
*net
= dev_net(dev
);
7851 BUG_ON(dev_boot_phase
);
7856 /* When net_device's are persistent, this will be fatal. */
7857 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
7860 spin_lock_init(&dev
->addr_list_lock
);
7861 netdev_set_addr_lockdep_class(dev
);
7863 ret
= dev_get_valid_name(net
, dev
, dev
->name
);
7867 /* Init, if this function is available */
7868 if (dev
->netdev_ops
->ndo_init
) {
7869 ret
= dev
->netdev_ops
->ndo_init(dev
);
7877 if (((dev
->hw_features
| dev
->features
) &
7878 NETIF_F_HW_VLAN_CTAG_FILTER
) &&
7879 (!dev
->netdev_ops
->ndo_vlan_rx_add_vid
||
7880 !dev
->netdev_ops
->ndo_vlan_rx_kill_vid
)) {
7881 netdev_WARN(dev
, "Buggy VLAN acceleration in driver!\n");
7888 dev
->ifindex
= dev_new_index(net
);
7889 else if (__dev_get_by_index(net
, dev
->ifindex
))
7892 /* Transfer changeable features to wanted_features and enable
7893 * software offloads (GSO and GRO).
7895 dev
->hw_features
|= NETIF_F_SOFT_FEATURES
;
7896 dev
->features
|= NETIF_F_SOFT_FEATURES
;
7898 if (dev
->netdev_ops
->ndo_udp_tunnel_add
) {
7899 dev
->features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
7900 dev
->hw_features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
7903 dev
->wanted_features
= dev
->features
& dev
->hw_features
;
7905 if (!(dev
->flags
& IFF_LOOPBACK
))
7906 dev
->hw_features
|= NETIF_F_NOCACHE_COPY
;
7908 /* If IPv4 TCP segmentation offload is supported we should also
7909 * allow the device to enable segmenting the frame with the option
7910 * of ignoring a static IP ID value. This doesn't enable the
7911 * feature itself but allows the user to enable it later.
7913 if (dev
->hw_features
& NETIF_F_TSO
)
7914 dev
->hw_features
|= NETIF_F_TSO_MANGLEID
;
7915 if (dev
->vlan_features
& NETIF_F_TSO
)
7916 dev
->vlan_features
|= NETIF_F_TSO_MANGLEID
;
7917 if (dev
->mpls_features
& NETIF_F_TSO
)
7918 dev
->mpls_features
|= NETIF_F_TSO_MANGLEID
;
7919 if (dev
->hw_enc_features
& NETIF_F_TSO
)
7920 dev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
7922 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
7924 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
7926 /* Make NETIF_F_SG inheritable to tunnel devices.
7928 dev
->hw_enc_features
|= NETIF_F_SG
| NETIF_F_GSO_PARTIAL
;
7930 /* Make NETIF_F_SG inheritable to MPLS.
7932 dev
->mpls_features
|= NETIF_F_SG
;
7934 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
7935 ret
= notifier_to_errno(ret
);
7939 ret
= netdev_register_kobject(dev
);
7941 dev
->reg_state
= NETREG_UNREGISTERED
;
7944 dev
->reg_state
= NETREG_REGISTERED
;
7946 __netdev_update_features(dev
);
7949 * Default initial state at registry is that the
7950 * device is present.
7953 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
7955 linkwatch_init_dev(dev
);
7957 dev_init_scheduler(dev
);
7959 list_netdevice(dev
);
7960 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
7962 /* If the device has permanent device address, driver should
7963 * set dev_addr and also addr_assign_type should be set to
7964 * NET_ADDR_PERM (default value).
7966 if (dev
->addr_assign_type
== NET_ADDR_PERM
)
7967 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
7969 /* Notify protocols, that a new device appeared. */
7970 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
7971 ret
= notifier_to_errno(ret
);
7973 rollback_registered(dev
);
7976 dev
->reg_state
= NETREG_UNREGISTERED
;
7979 * Prevent userspace races by waiting until the network
7980 * device is fully setup before sending notifications.
7982 if (!dev
->rtnl_link_ops
||
7983 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
7984 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
7990 if (dev
->netdev_ops
->ndo_uninit
)
7991 dev
->netdev_ops
->ndo_uninit(dev
);
7992 if (dev
->priv_destructor
)
7993 dev
->priv_destructor(dev
);
7996 EXPORT_SYMBOL(register_netdevice
);
7999 * init_dummy_netdev - init a dummy network device for NAPI
8000 * @dev: device to init
8002 * This takes a network device structure and initialize the minimum
8003 * amount of fields so it can be used to schedule NAPI polls without
8004 * registering a full blown interface. This is to be used by drivers
8005 * that need to tie several hardware interfaces to a single NAPI
8006 * poll scheduler due to HW limitations.
8008 int init_dummy_netdev(struct net_device
*dev
)
8010 /* Clear everything. Note we don't initialize spinlocks
8011 * are they aren't supposed to be taken by any of the
8012 * NAPI code and this dummy netdev is supposed to be
8013 * only ever used for NAPI polls
8015 memset(dev
, 0, sizeof(struct net_device
));
8017 /* make sure we BUG if trying to hit standard
8018 * register/unregister code path
8020 dev
->reg_state
= NETREG_DUMMY
;
8022 /* NAPI wants this */
8023 INIT_LIST_HEAD(&dev
->napi_list
);
8025 /* a dummy interface is started by default */
8026 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
8027 set_bit(__LINK_STATE_START
, &dev
->state
);
8029 /* napi_busy_loop stats accounting wants this */
8030 dev_net_set(dev
, &init_net
);
8032 /* Note : We dont allocate pcpu_refcnt for dummy devices,
8033 * because users of this 'device' dont need to change
8039 EXPORT_SYMBOL_GPL(init_dummy_netdev
);
8042 int dev_validate_mtu(struct net_device
*dev
, int new_mtu
)
8044 /* MTU must be positive, and in range */
8045 if (new_mtu
< 0 || new_mtu
< dev
->min_mtu
) {
8046 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
8047 dev
->name
, new_mtu
, dev
->min_mtu
);
8051 if (dev
->max_mtu
> 0 && new_mtu
> dev
->max_mtu
) {
8052 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
8053 dev
->name
, new_mtu
, dev
->max_mtu
);
8060 * register_netdev - register a network device
8061 * @dev: device to register
8063 * Take a completed network device structure and add it to the kernel
8064 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
8065 * chain. 0 is returned on success. A negative errno code is returned
8066 * on a failure to set up the device, or if the name is a duplicate.
8068 * This is a wrapper around register_netdevice that takes the rtnl semaphore
8069 * and expands the device name if you passed a format string to
8072 int register_netdev(struct net_device
*dev
)
8077 err
= register_netdevice(dev
);
8081 EXPORT_SYMBOL(register_netdev
);
8083 int netdev_refcnt_read(const struct net_device
*dev
)
8087 for_each_possible_cpu(i
)
8088 refcnt
+= *per_cpu_ptr(dev
->pcpu_refcnt
, i
);
8091 EXPORT_SYMBOL(netdev_refcnt_read
);
8094 * netdev_wait_allrefs - wait until all references are gone.
8095 * @dev: target net_device
8097 * This is called when unregistering network devices.
8099 * Any protocol or device that holds a reference should register
8100 * for netdevice notification, and cleanup and put back the
8101 * reference if they receive an UNREGISTER event.
8102 * We can get stuck here if buggy protocols don't correctly
8105 static void netdev_wait_allrefs(struct net_device
*dev
)
8107 unsigned long rebroadcast_time
, warning_time
;
8110 linkwatch_forget_dev(dev
);
8112 rebroadcast_time
= warning_time
= jiffies
;
8113 refcnt
= netdev_refcnt_read(dev
);
8115 while (refcnt
!= 0) {
8116 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
8119 /* Rebroadcast unregister notification */
8120 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
8126 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
8127 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
8129 /* We must not have linkwatch events
8130 * pending on unregister. If this
8131 * happens, we simply run the queue
8132 * unscheduled, resulting in a noop
8135 linkwatch_run_queue();
8140 rebroadcast_time
= jiffies
;
8145 refcnt
= netdev_refcnt_read(dev
);
8147 if (refcnt
&& time_after(jiffies
, warning_time
+ 10 * HZ
)) {
8148 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
8150 warning_time
= jiffies
;
8159 * register_netdevice(x1);
8160 * register_netdevice(x2);
8162 * unregister_netdevice(y1);
8163 * unregister_netdevice(y2);
8169 * We are invoked by rtnl_unlock().
8170 * This allows us to deal with problems:
8171 * 1) We can delete sysfs objects which invoke hotplug
8172 * without deadlocking with linkwatch via keventd.
8173 * 2) Since we run with the RTNL semaphore not held, we can sleep
8174 * safely in order to wait for the netdev refcnt to drop to zero.
8176 * We must not return until all unregister events added during
8177 * the interval the lock was held have been completed.
8179 void netdev_run_todo(void)
8181 struct list_head list
;
8183 /* Snapshot list, allow later requests */
8184 list_replace_init(&net_todo_list
, &list
);
8189 /* Wait for rcu callbacks to finish before next phase */
8190 if (!list_empty(&list
))
8193 while (!list_empty(&list
)) {
8194 struct net_device
*dev
8195 = list_first_entry(&list
, struct net_device
, todo_list
);
8196 list_del(&dev
->todo_list
);
8199 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
8202 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
8203 pr_err("network todo '%s' but state %d\n",
8204 dev
->name
, dev
->reg_state
);
8209 dev
->reg_state
= NETREG_UNREGISTERED
;
8211 netdev_wait_allrefs(dev
);
8214 BUG_ON(netdev_refcnt_read(dev
));
8215 BUG_ON(!list_empty(&dev
->ptype_all
));
8216 BUG_ON(!list_empty(&dev
->ptype_specific
));
8217 WARN_ON(rcu_access_pointer(dev
->ip_ptr
));
8218 WARN_ON(rcu_access_pointer(dev
->ip6_ptr
));
8219 WARN_ON(dev
->dn_ptr
);
8221 if (dev
->priv_destructor
)
8222 dev
->priv_destructor(dev
);
8223 if (dev
->needs_free_netdev
)
8226 /* Report a network device has been unregistered */
8228 dev_net(dev
)->dev_unreg_count
--;
8230 wake_up(&netdev_unregistering_wq
);
8232 /* Free network device */
8233 kobject_put(&dev
->dev
.kobj
);
8237 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
8238 * all the same fields in the same order as net_device_stats, with only
8239 * the type differing, but rtnl_link_stats64 may have additional fields
8240 * at the end for newer counters.
8242 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
8243 const struct net_device_stats
*netdev_stats
)
8245 #if BITS_PER_LONG == 64
8246 BUILD_BUG_ON(sizeof(*stats64
) < sizeof(*netdev_stats
));
8247 memcpy(stats64
, netdev_stats
, sizeof(*netdev_stats
));
8248 /* zero out counters that only exist in rtnl_link_stats64 */
8249 memset((char *)stats64
+ sizeof(*netdev_stats
), 0,
8250 sizeof(*stats64
) - sizeof(*netdev_stats
));
8252 size_t i
, n
= sizeof(*netdev_stats
) / sizeof(unsigned long);
8253 const unsigned long *src
= (const unsigned long *)netdev_stats
;
8254 u64
*dst
= (u64
*)stats64
;
8256 BUILD_BUG_ON(n
> sizeof(*stats64
) / sizeof(u64
));
8257 for (i
= 0; i
< n
; i
++)
8259 /* zero out counters that only exist in rtnl_link_stats64 */
8260 memset((char *)stats64
+ n
* sizeof(u64
), 0,
8261 sizeof(*stats64
) - n
* sizeof(u64
));
8264 EXPORT_SYMBOL(netdev_stats_to_stats64
);
8267 * dev_get_stats - get network device statistics
8268 * @dev: device to get statistics from
8269 * @storage: place to store stats
8271 * Get network statistics from device. Return @storage.
8272 * The device driver may provide its own method by setting
8273 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
8274 * otherwise the internal statistics structure is used.
8276 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
8277 struct rtnl_link_stats64
*storage
)
8279 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8281 if (ops
->ndo_get_stats64
) {
8282 memset(storage
, 0, sizeof(*storage
));
8283 ops
->ndo_get_stats64(dev
, storage
);
8284 } else if (ops
->ndo_get_stats
) {
8285 netdev_stats_to_stats64(storage
, ops
->ndo_get_stats(dev
));
8287 netdev_stats_to_stats64(storage
, &dev
->stats
);
8289 storage
->rx_dropped
+= (unsigned long)atomic_long_read(&dev
->rx_dropped
);
8290 storage
->tx_dropped
+= (unsigned long)atomic_long_read(&dev
->tx_dropped
);
8291 storage
->rx_nohandler
+= (unsigned long)atomic_long_read(&dev
->rx_nohandler
);
8294 EXPORT_SYMBOL(dev_get_stats
);
8296 struct netdev_queue
*dev_ingress_queue_create(struct net_device
*dev
)
8298 struct netdev_queue
*queue
= dev_ingress_queue(dev
);
8300 #ifdef CONFIG_NET_CLS_ACT
8303 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
8306 netdev_init_one_queue(dev
, queue
, NULL
);
8307 RCU_INIT_POINTER(queue
->qdisc
, &noop_qdisc
);
8308 queue
->qdisc_sleeping
= &noop_qdisc
;
8309 rcu_assign_pointer(dev
->ingress_queue
, queue
);
8314 static const struct ethtool_ops default_ethtool_ops
;
8316 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
8317 const struct ethtool_ops
*ops
)
8319 if (dev
->ethtool_ops
== &default_ethtool_ops
)
8320 dev
->ethtool_ops
= ops
;
8322 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops
);
8324 void netdev_freemem(struct net_device
*dev
)
8326 char *addr
= (char *)dev
- dev
->padded
;
8332 * alloc_netdev_mqs - allocate network device
8333 * @sizeof_priv: size of private data to allocate space for
8334 * @name: device name format string
8335 * @name_assign_type: origin of device name
8336 * @setup: callback to initialize device
8337 * @txqs: the number of TX subqueues to allocate
8338 * @rxqs: the number of RX subqueues to allocate
8340 * Allocates a struct net_device with private data area for driver use
8341 * and performs basic initialization. Also allocates subqueue structs
8342 * for each queue on the device.
8344 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
8345 unsigned char name_assign_type
,
8346 void (*setup
)(struct net_device
*),
8347 unsigned int txqs
, unsigned int rxqs
)
8349 struct net_device
*dev
;
8350 unsigned int alloc_size
;
8351 struct net_device
*p
;
8353 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
8356 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
8362 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
8367 alloc_size
= sizeof(struct net_device
);
8369 /* ensure 32-byte alignment of private area */
8370 alloc_size
= ALIGN(alloc_size
, NETDEV_ALIGN
);
8371 alloc_size
+= sizeof_priv
;
8373 /* ensure 32-byte alignment of whole construct */
8374 alloc_size
+= NETDEV_ALIGN
- 1;
8376 p
= kvzalloc(alloc_size
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
8380 dev
= PTR_ALIGN(p
, NETDEV_ALIGN
);
8381 dev
->padded
= (char *)dev
- (char *)p
;
8383 dev
->pcpu_refcnt
= alloc_percpu(int);
8384 if (!dev
->pcpu_refcnt
)
8387 if (dev_addr_init(dev
))
8393 dev_net_set(dev
, &init_net
);
8395 dev
->gso_max_size
= GSO_MAX_SIZE
;
8396 dev
->gso_max_segs
= GSO_MAX_SEGS
;
8397 dev
->upper_level
= 1;
8398 dev
->lower_level
= 1;
8400 INIT_LIST_HEAD(&dev
->napi_list
);
8401 INIT_LIST_HEAD(&dev
->unreg_list
);
8402 INIT_LIST_HEAD(&dev
->close_list
);
8403 INIT_LIST_HEAD(&dev
->link_watch_list
);
8404 INIT_LIST_HEAD(&dev
->adj_list
.upper
);
8405 INIT_LIST_HEAD(&dev
->adj_list
.lower
);
8406 INIT_LIST_HEAD(&dev
->ptype_all
);
8407 INIT_LIST_HEAD(&dev
->ptype_specific
);
8408 #ifdef CONFIG_NET_SCHED
8409 hash_init(dev
->qdisc_hash
);
8411 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
;
8414 if (!dev
->tx_queue_len
) {
8415 dev
->priv_flags
|= IFF_NO_QUEUE
;
8416 dev
->tx_queue_len
= DEFAULT_TX_QUEUE_LEN
;
8419 dev
->num_tx_queues
= txqs
;
8420 dev
->real_num_tx_queues
= txqs
;
8421 if (netif_alloc_netdev_queues(dev
))
8425 dev
->num_rx_queues
= rxqs
;
8426 dev
->real_num_rx_queues
= rxqs
;
8427 if (netif_alloc_rx_queues(dev
))
8431 strcpy(dev
->name
, name
);
8432 dev
->name_assign_type
= name_assign_type
;
8433 dev
->group
= INIT_NETDEV_GROUP
;
8434 if (!dev
->ethtool_ops
)
8435 dev
->ethtool_ops
= &default_ethtool_ops
;
8437 nf_hook_ingress_init(dev
);
8446 free_percpu(dev
->pcpu_refcnt
);
8448 netdev_freemem(dev
);
8451 EXPORT_SYMBOL(alloc_netdev_mqs
);
8454 * free_netdev - free network device
8457 * This function does the last stage of destroying an allocated device
8458 * interface. The reference to the device object is released. If this
8459 * is the last reference then it will be freed.Must be called in process
8462 void free_netdev(struct net_device
*dev
)
8464 struct napi_struct
*p
, *n
;
8465 struct bpf_prog
*prog
;
8468 netif_free_tx_queues(dev
);
8473 kfree(rcu_dereference_protected(dev
->ingress_queue
, 1));
8475 /* Flush device addresses */
8476 dev_addr_flush(dev
);
8478 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
8481 free_percpu(dev
->pcpu_refcnt
);
8482 dev
->pcpu_refcnt
= NULL
;
8484 prog
= rcu_dereference_protected(dev
->xdp_prog
, 1);
8487 static_key_slow_dec(&generic_xdp_needed
);
8490 /* Compatibility with error handling in drivers */
8491 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
8492 netdev_freemem(dev
);
8496 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
8497 dev
->reg_state
= NETREG_RELEASED
;
8499 /* will free via device release */
8500 put_device(&dev
->dev
);
8502 EXPORT_SYMBOL(free_netdev
);
8505 * synchronize_net - Synchronize with packet receive processing
8507 * Wait for packets currently being received to be done.
8508 * Does not block later packets from starting.
8510 void synchronize_net(void)
8513 if (rtnl_is_locked())
8514 synchronize_rcu_expedited();
8518 EXPORT_SYMBOL(synchronize_net
);
8521 * unregister_netdevice_queue - remove device from the kernel
8525 * This function shuts down a device interface and removes it
8526 * from the kernel tables.
8527 * If head not NULL, device is queued to be unregistered later.
8529 * Callers must hold the rtnl semaphore. You may want
8530 * unregister_netdev() instead of this.
8533 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
8538 list_move_tail(&dev
->unreg_list
, head
);
8540 rollback_registered(dev
);
8541 /* Finish processing unregister after unlock */
8545 EXPORT_SYMBOL(unregister_netdevice_queue
);
8548 * unregister_netdevice_many - unregister many devices
8549 * @head: list of devices
8551 * Note: As most callers use a stack allocated list_head,
8552 * we force a list_del() to make sure stack wont be corrupted later.
8554 void unregister_netdevice_many(struct list_head
*head
)
8556 struct net_device
*dev
;
8558 if (!list_empty(head
)) {
8559 rollback_registered_many(head
);
8560 list_for_each_entry(dev
, head
, unreg_list
)
8565 EXPORT_SYMBOL(unregister_netdevice_many
);
8568 * unregister_netdev - remove device from the kernel
8571 * This function shuts down a device interface and removes it
8572 * from the kernel tables.
8574 * This is just a wrapper for unregister_netdevice that takes
8575 * the rtnl semaphore. In general you want to use this and not
8576 * unregister_netdevice.
8578 void unregister_netdev(struct net_device
*dev
)
8581 unregister_netdevice(dev
);
8584 EXPORT_SYMBOL(unregister_netdev
);
8587 * dev_change_net_namespace - move device to different nethost namespace
8589 * @net: network namespace
8590 * @pat: If not NULL name pattern to try if the current device name
8591 * is already taken in the destination network namespace.
8593 * This function shuts down a device interface and moves it
8594 * to a new network namespace. On success 0 is returned, on
8595 * a failure a netagive errno code is returned.
8597 * Callers must hold the rtnl semaphore.
8600 int dev_change_net_namespace(struct net_device
*dev
, struct net
*net
, const char *pat
)
8602 int err
, new_nsid
, new_ifindex
;
8606 /* Don't allow namespace local devices to be moved. */
8608 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
8611 /* Ensure the device has been registrered */
8612 if (dev
->reg_state
!= NETREG_REGISTERED
)
8615 /* Get out if there is nothing todo */
8617 if (net_eq(dev_net(dev
), net
))
8620 /* Pick the destination device name, and ensure
8621 * we can use it in the destination network namespace.
8624 if (__dev_get_by_name(net
, dev
->name
)) {
8625 /* We get here if we can't use the current device name */
8628 err
= dev_get_valid_name(net
, dev
, pat
);
8634 * And now a mini version of register_netdevice unregister_netdevice.
8637 /* If device is running close it first. */
8640 /* And unlink it from device chain */
8641 unlist_netdevice(dev
);
8645 /* Shutdown queueing discipline. */
8648 /* Notify protocols, that we are about to destroy
8649 * this device. They should clean all the things.
8651 * Note that dev->reg_state stays at NETREG_REGISTERED.
8652 * This is wanted because this way 8021q and macvlan know
8653 * the device is just moving and can keep their slaves up.
8655 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
8657 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
8659 new_nsid
= peernet2id_alloc(dev_net(dev
), net
, GFP_KERNEL
);
8660 /* If there is an ifindex conflict assign a new one */
8661 if (__dev_get_by_index(net
, dev
->ifindex
))
8662 new_ifindex
= dev_new_index(net
);
8664 new_ifindex
= dev
->ifindex
;
8666 rtmsg_ifinfo_newnet(RTM_DELLINK
, dev
, ~0U, GFP_KERNEL
, &new_nsid
,
8670 * Flush the unicast and multicast chains
8675 /* Send a netdev-removed uevent to the old namespace */
8676 kobject_uevent(&dev
->dev
.kobj
, KOBJ_REMOVE
);
8677 netdev_adjacent_del_links(dev
);
8679 /* Actually switch the network namespace */
8680 dev_net_set(dev
, net
);
8681 dev
->ifindex
= new_ifindex
;
8683 /* Send a netdev-add uevent to the new namespace */
8684 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ADD
);
8685 netdev_adjacent_add_links(dev
);
8687 /* Fixup kobjects */
8688 err
= device_rename(&dev
->dev
, dev
->name
);
8691 /* Add the device back in the hashes */
8692 list_netdevice(dev
);
8694 /* Notify protocols, that a new device appeared. */
8695 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
8698 * Prevent userspace races by waiting until the network
8699 * device is fully setup before sending notifications.
8701 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
8708 EXPORT_SYMBOL_GPL(dev_change_net_namespace
);
8710 static int dev_cpu_dead(unsigned int oldcpu
)
8712 struct sk_buff
**list_skb
;
8713 struct sk_buff
*skb
;
8715 struct softnet_data
*sd
, *oldsd
, *remsd
= NULL
;
8717 local_irq_disable();
8718 cpu
= smp_processor_id();
8719 sd
= &per_cpu(softnet_data
, cpu
);
8720 oldsd
= &per_cpu(softnet_data
, oldcpu
);
8722 /* Find end of our completion_queue. */
8723 list_skb
= &sd
->completion_queue
;
8725 list_skb
= &(*list_skb
)->next
;
8726 /* Append completion queue from offline CPU. */
8727 *list_skb
= oldsd
->completion_queue
;
8728 oldsd
->completion_queue
= NULL
;
8730 /* Append output queue from offline CPU. */
8731 if (oldsd
->output_queue
) {
8732 *sd
->output_queue_tailp
= oldsd
->output_queue
;
8733 sd
->output_queue_tailp
= oldsd
->output_queue_tailp
;
8734 oldsd
->output_queue
= NULL
;
8735 oldsd
->output_queue_tailp
= &oldsd
->output_queue
;
8737 /* Append NAPI poll list from offline CPU, with one exception :
8738 * process_backlog() must be called by cpu owning percpu backlog.
8739 * We properly handle process_queue & input_pkt_queue later.
8741 while (!list_empty(&oldsd
->poll_list
)) {
8742 struct napi_struct
*napi
= list_first_entry(&oldsd
->poll_list
,
8746 list_del_init(&napi
->poll_list
);
8747 if (napi
->poll
== process_backlog
)
8750 ____napi_schedule(sd
, napi
);
8753 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
8757 remsd
= oldsd
->rps_ipi_list
;
8758 oldsd
->rps_ipi_list
= NULL
;
8760 /* send out pending IPI's on offline CPU */
8761 net_rps_send_ipi(remsd
);
8763 /* Process offline CPU's input_pkt_queue */
8764 while ((skb
= __skb_dequeue(&oldsd
->process_queue
))) {
8766 input_queue_head_incr(oldsd
);
8768 while ((skb
= skb_dequeue(&oldsd
->input_pkt_queue
))) {
8770 input_queue_head_incr(oldsd
);
8777 * netdev_increment_features - increment feature set by one
8778 * @all: current feature set
8779 * @one: new feature set
8780 * @mask: mask feature set
8782 * Computes a new feature set after adding a device with feature set
8783 * @one to the master device with current feature set @all. Will not
8784 * enable anything that is off in @mask. Returns the new feature set.
8786 netdev_features_t
netdev_increment_features(netdev_features_t all
,
8787 netdev_features_t one
, netdev_features_t mask
)
8789 if (mask
& NETIF_F_HW_CSUM
)
8790 mask
|= NETIF_F_CSUM_MASK
;
8791 mask
|= NETIF_F_VLAN_CHALLENGED
;
8793 all
|= one
& (NETIF_F_ONE_FOR_ALL
| NETIF_F_CSUM_MASK
) & mask
;
8794 all
&= one
| ~NETIF_F_ALL_FOR_ALL
;
8796 /* If one device supports hw checksumming, set for all. */
8797 if (all
& NETIF_F_HW_CSUM
)
8798 all
&= ~(NETIF_F_CSUM_MASK
& ~NETIF_F_HW_CSUM
);
8802 EXPORT_SYMBOL(netdev_increment_features
);
8804 static struct hlist_head
* __net_init
netdev_create_hash(void)
8807 struct hlist_head
*hash
;
8809 hash
= kmalloc(sizeof(*hash
) * NETDEV_HASHENTRIES
, GFP_KERNEL
);
8811 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
8812 INIT_HLIST_HEAD(&hash
[i
]);
8817 /* Initialize per network namespace state */
8818 static int __net_init
netdev_init(struct net
*net
)
8820 if (net
!= &init_net
)
8821 INIT_LIST_HEAD(&net
->dev_base_head
);
8823 net
->dev_name_head
= netdev_create_hash();
8824 if (net
->dev_name_head
== NULL
)
8827 net
->dev_index_head
= netdev_create_hash();
8828 if (net
->dev_index_head
== NULL
)
8834 kfree(net
->dev_name_head
);
8840 * netdev_drivername - network driver for the device
8841 * @dev: network device
8843 * Determine network driver for device.
8845 const char *netdev_drivername(const struct net_device
*dev
)
8847 const struct device_driver
*driver
;
8848 const struct device
*parent
;
8849 const char *empty
= "";
8851 parent
= dev
->dev
.parent
;
8855 driver
= parent
->driver
;
8856 if (driver
&& driver
->name
)
8857 return driver
->name
;
8861 static void __netdev_printk(const char *level
, const struct net_device
*dev
,
8862 struct va_format
*vaf
)
8864 if (dev
&& dev
->dev
.parent
) {
8865 dev_printk_emit(level
[1] - '0',
8868 dev_driver_string(dev
->dev
.parent
),
8869 dev_name(dev
->dev
.parent
),
8870 netdev_name(dev
), netdev_reg_state(dev
),
8873 printk("%s%s%s: %pV",
8874 level
, netdev_name(dev
), netdev_reg_state(dev
), vaf
);
8876 printk("%s(NULL net_device): %pV", level
, vaf
);
8880 void netdev_printk(const char *level
, const struct net_device
*dev
,
8881 const char *format
, ...)
8883 struct va_format vaf
;
8886 va_start(args
, format
);
8891 __netdev_printk(level
, dev
, &vaf
);
8895 EXPORT_SYMBOL(netdev_printk
);
8897 #define define_netdev_printk_level(func, level) \
8898 void func(const struct net_device *dev, const char *fmt, ...) \
8900 struct va_format vaf; \
8903 va_start(args, fmt); \
8908 __netdev_printk(level, dev, &vaf); \
8912 EXPORT_SYMBOL(func);
8914 define_netdev_printk_level(netdev_emerg
, KERN_EMERG
);
8915 define_netdev_printk_level(netdev_alert
, KERN_ALERT
);
8916 define_netdev_printk_level(netdev_crit
, KERN_CRIT
);
8917 define_netdev_printk_level(netdev_err
, KERN_ERR
);
8918 define_netdev_printk_level(netdev_warn
, KERN_WARNING
);
8919 define_netdev_printk_level(netdev_notice
, KERN_NOTICE
);
8920 define_netdev_printk_level(netdev_info
, KERN_INFO
);
8922 static void __net_exit
netdev_exit(struct net
*net
)
8924 kfree(net
->dev_name_head
);
8925 kfree(net
->dev_index_head
);
8926 if (net
!= &init_net
)
8927 WARN_ON_ONCE(!list_empty(&net
->dev_base_head
));
8930 static struct pernet_operations __net_initdata netdev_net_ops
= {
8931 .init
= netdev_init
,
8932 .exit
= netdev_exit
,
8935 static void __net_exit
default_device_exit(struct net
*net
)
8937 struct net_device
*dev
, *aux
;
8939 * Push all migratable network devices back to the
8940 * initial network namespace
8943 for_each_netdev_safe(net
, dev
, aux
) {
8945 char fb_name
[IFNAMSIZ
];
8947 /* Ignore unmoveable devices (i.e. loopback) */
8948 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
8951 /* Leave virtual devices for the generic cleanup */
8952 if (dev
->rtnl_link_ops
)
8955 /* Push remaining network devices to init_net */
8956 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
8957 if (__dev_get_by_name(&init_net
, fb_name
))
8958 snprintf(fb_name
, IFNAMSIZ
, "dev%%d");
8959 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
8961 pr_emerg("%s: failed to move %s to init_net: %d\n",
8962 __func__
, dev
->name
, err
);
8969 static void __net_exit
rtnl_lock_unregistering(struct list_head
*net_list
)
8971 /* Return with the rtnl_lock held when there are no network
8972 * devices unregistering in any network namespace in net_list.
8976 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
8978 add_wait_queue(&netdev_unregistering_wq
, &wait
);
8980 unregistering
= false;
8982 list_for_each_entry(net
, net_list
, exit_list
) {
8983 if (net
->dev_unreg_count
> 0) {
8984 unregistering
= true;
8992 wait_woken(&wait
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
8994 remove_wait_queue(&netdev_unregistering_wq
, &wait
);
8997 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
8999 /* At exit all network devices most be removed from a network
9000 * namespace. Do this in the reverse order of registration.
9001 * Do this across as many network namespaces as possible to
9002 * improve batching efficiency.
9004 struct net_device
*dev
;
9006 LIST_HEAD(dev_kill_list
);
9008 /* To prevent network device cleanup code from dereferencing
9009 * loopback devices or network devices that have been freed
9010 * wait here for all pending unregistrations to complete,
9011 * before unregistring the loopback device and allowing the
9012 * network namespace be freed.
9014 * The netdev todo list containing all network devices
9015 * unregistrations that happen in default_device_exit_batch
9016 * will run in the rtnl_unlock() at the end of
9017 * default_device_exit_batch.
9019 rtnl_lock_unregistering(net_list
);
9020 list_for_each_entry(net
, net_list
, exit_list
) {
9021 for_each_netdev_reverse(net
, dev
) {
9022 if (dev
->rtnl_link_ops
&& dev
->rtnl_link_ops
->dellink
)
9023 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
9025 unregister_netdevice_queue(dev
, &dev_kill_list
);
9028 unregister_netdevice_many(&dev_kill_list
);
9032 static struct pernet_operations __net_initdata default_device_ops
= {
9033 .exit
= default_device_exit
,
9034 .exit_batch
= default_device_exit_batch
,
9038 * Initialize the DEV module. At boot time this walks the device list and
9039 * unhooks any devices that fail to initialise (normally hardware not
9040 * present) and leaves us with a valid list of present and active devices.
9045 * This is called single threaded during boot, so no need
9046 * to take the rtnl semaphore.
9048 static int __init
net_dev_init(void)
9050 int i
, rc
= -ENOMEM
;
9052 BUG_ON(!dev_boot_phase
);
9054 if (dev_proc_init())
9057 if (netdev_kobject_init())
9060 INIT_LIST_HEAD(&ptype_all
);
9061 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
9062 INIT_LIST_HEAD(&ptype_base
[i
]);
9064 INIT_LIST_HEAD(&offload_base
);
9066 if (register_pernet_subsys(&netdev_net_ops
))
9070 * Initialise the packet receive queues.
9073 for_each_possible_cpu(i
) {
9074 struct work_struct
*flush
= per_cpu_ptr(&flush_works
, i
);
9075 struct softnet_data
*sd
= &per_cpu(softnet_data
, i
);
9077 INIT_WORK(flush
, flush_backlog
);
9079 skb_queue_head_init(&sd
->input_pkt_queue
);
9080 skb_queue_head_init(&sd
->process_queue
);
9081 INIT_LIST_HEAD(&sd
->poll_list
);
9082 sd
->output_queue_tailp
= &sd
->output_queue
;
9084 sd
->csd
.func
= rps_trigger_softirq
;
9089 sd
->backlog
.poll
= process_backlog
;
9090 sd
->backlog
.weight
= weight_p
;
9095 /* The loopback device is special if any other network devices
9096 * is present in a network namespace the loopback device must
9097 * be present. Since we now dynamically allocate and free the
9098 * loopback device ensure this invariant is maintained by
9099 * keeping the loopback device as the first device on the
9100 * list of network devices. Ensuring the loopback devices
9101 * is the first device that appears and the last network device
9104 if (register_pernet_device(&loopback_net_ops
))
9107 if (register_pernet_device(&default_device_ops
))
9110 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
9111 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
9113 rc
= cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD
, "net/dev:dead",
9114 NULL
, dev_cpu_dead
);
9121 subsys_initcall(net_dev_init
);