1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET3 Protocol independent device support routines.
5 * Derived from the non IP parts of dev.c 1.0.19
7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Florian la Roche <rzsfl@rz.uni-sb.de>
12 * Alan Cox <gw4pts@gw4pts.ampr.org>
13 * David Hinds <dahinds@users.sourceforge.net>
14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15 * Adam Sulmicki <adam@cfar.umd.edu>
16 * Pekka Riikonen <priikone@poesidon.pspt.fi>
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
20 * to 2 if register_netdev gets called
21 * before net_dev_init & also removed a
22 * few lines of code in the process.
23 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant
25 * stunts to keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into
31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
32 * Alan Cox : 100 backlog just doesn't cut it when
33 * you start doing multicast video 8)
34 * Alan Cox : Rewrote net_bh and list manager.
35 * Alan Cox : Fix ETH_P_ALL echoback lengths.
36 * Alan Cox : Took out transmit every packet pass
37 * Saved a few bytes in the ioctl handler
38 * Alan Cox : Network driver sets packet type before
39 * calling netif_rx. Saves a function
41 * Alan Cox : Hashed net_bh()
42 * Richard Kooijman: Timestamp fixes.
43 * Alan Cox : Wrong field in SIOCGIFDSTADDR
44 * Alan Cox : Device lock protection.
45 * Alan Cox : Fixed nasty side effect of device close
47 * Rudi Cilibrasi : Pass the right thing to
49 * Dave Miller : 32bit quantity for the device lock to
50 * make it work out on a Sparc.
51 * Bjorn Ekwall : Added KERNELD hack.
52 * Alan Cox : Cleaned up the backlog initialise.
53 * Craig Metz : SIOCGIFCONF fix if space for under
55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
56 * is no device open function.
57 * Andi Kleen : Fix error reporting for SIOCGIFCONF
58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
59 * Cyrus Durgin : Cleaned for KMOD
60 * Adam Sulmicki : Bug Fix : Network Device Unload
61 * A network device unload needs to purge
63 * Paul Rusty Russell : SIOCSIFNAME
64 * Pekka Riikonen : Netdev boot-time settings code
65 * Andrew Morton : Make unregister_netdevice wait
66 * indefinitely on dev->refcnt
67 * J Hadi Salim : - Backlog queue sampling
68 * - netif_rx() feedback
71 #include <linux/uaccess.h>
72 #include <linux/bitops.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/mm.h>
81 #include <linux/mutex.h>
82 #include <linux/string.h>
84 #include <linux/socket.h>
85 #include <linux/sockios.h>
86 #include <linux/errno.h>
87 #include <linux/interrupt.h>
88 #include <linux/if_ether.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/ethtool.h>
92 #include <linux/skbuff.h>
93 #include <linux/bpf.h>
94 #include <linux/bpf_trace.h>
95 #include <net/net_namespace.h>
97 #include <net/busy_poll.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/stat.h>
101 #include <net/dst_metadata.h>
102 #include <net/pkt_sched.h>
103 #include <net/pkt_cls.h>
104 #include <net/checksum.h>
105 #include <net/xfrm.h>
106 #include <linux/highmem.h>
107 #include <linux/init.h>
108 #include <linux/module.h>
109 #include <linux/netpoll.h>
110 #include <linux/rcupdate.h>
111 #include <linux/delay.h>
112 #include <net/iw_handler.h>
113 #include <asm/current.h>
114 #include <linux/audit.h>
115 #include <linux/dmaengine.h>
116 #include <linux/err.h>
117 #include <linux/ctype.h>
118 #include <linux/if_arp.h>
119 #include <linux/if_vlan.h>
120 #include <linux/ip.h>
122 #include <net/mpls.h>
123 #include <linux/ipv6.h>
124 #include <linux/in.h>
125 #include <linux/jhash.h>
126 #include <linux/random.h>
127 #include <trace/events/napi.h>
128 #include <trace/events/net.h>
129 #include <trace/events/skb.h>
130 #include <linux/inetdevice.h>
131 #include <linux/cpu_rmap.h>
132 #include <linux/static_key.h>
133 #include <linux/hashtable.h>
134 #include <linux/vmalloc.h>
135 #include <linux/if_macvlan.h>
136 #include <linux/errqueue.h>
137 #include <linux/hrtimer.h>
138 #include <linux/netfilter_ingress.h>
139 #include <linux/crash_dump.h>
140 #include <linux/sctp.h>
141 #include <net/udp_tunnel.h>
142 #include <linux/net_namespace.h>
143 #include <linux/indirect_call_wrapper.h>
144 #include <net/devlink.h>
146 #include "net-sysfs.h"
148 #define MAX_GRO_SKBS 8
150 /* This should be increased if a protocol with a bigger head is added. */
151 #define GRO_MAX_HEAD (MAX_HEADER + 128)
153 static DEFINE_SPINLOCK(ptype_lock
);
154 static DEFINE_SPINLOCK(offload_lock
);
155 struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
156 struct list_head ptype_all __read_mostly
; /* Taps */
157 static struct list_head offload_base __read_mostly
;
159 static int netif_rx_internal(struct sk_buff
*skb
);
160 static int call_netdevice_notifiers_info(unsigned long val
,
161 struct netdev_notifier_info
*info
);
162 static int call_netdevice_notifiers_extack(unsigned long val
,
163 struct net_device
*dev
,
164 struct netlink_ext_ack
*extack
);
165 static struct napi_struct
*napi_by_id(unsigned int napi_id
);
168 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
171 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
173 * Writers must hold the rtnl semaphore while they loop through the
174 * dev_base_head list, and hold dev_base_lock for writing when they do the
175 * actual updates. This allows pure readers to access the list even
176 * while a writer is preparing to update it.
178 * To put it another way, dev_base_lock is held for writing only to
179 * protect against pure readers; the rtnl semaphore provides the
180 * protection against other writers.
182 * See, for example usages, register_netdevice() and
183 * unregister_netdevice(), which must be called with the rtnl
186 DEFINE_RWLOCK(dev_base_lock
);
187 EXPORT_SYMBOL(dev_base_lock
);
189 static DEFINE_MUTEX(ifalias_mutex
);
191 /* protects napi_hash addition/deletion and napi_gen_id */
192 static DEFINE_SPINLOCK(napi_hash_lock
);
194 static unsigned int napi_gen_id
= NR_CPUS
;
195 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash
, 8);
197 static seqcount_t devnet_rename_seq
;
199 static inline void dev_base_seq_inc(struct net
*net
)
201 while (++net
->dev_base_seq
== 0)
205 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
207 unsigned int hash
= full_name_hash(net
, name
, strnlen(name
, IFNAMSIZ
));
209 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
212 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
214 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
217 static inline void rps_lock(struct softnet_data
*sd
)
220 spin_lock(&sd
->input_pkt_queue
.lock
);
224 static inline void rps_unlock(struct softnet_data
*sd
)
227 spin_unlock(&sd
->input_pkt_queue
.lock
);
231 static struct netdev_name_node
*netdev_name_node_alloc(struct net_device
*dev
,
234 struct netdev_name_node
*name_node
;
236 name_node
= kmalloc(sizeof(*name_node
), GFP_KERNEL
);
239 INIT_HLIST_NODE(&name_node
->hlist
);
240 name_node
->dev
= dev
;
241 name_node
->name
= name
;
245 static struct netdev_name_node
*
246 netdev_name_node_head_alloc(struct net_device
*dev
)
248 struct netdev_name_node
*name_node
;
250 name_node
= netdev_name_node_alloc(dev
, dev
->name
);
253 INIT_LIST_HEAD(&name_node
->list
);
257 static void netdev_name_node_free(struct netdev_name_node
*name_node
)
262 static void netdev_name_node_add(struct net
*net
,
263 struct netdev_name_node
*name_node
)
265 hlist_add_head_rcu(&name_node
->hlist
,
266 dev_name_hash(net
, name_node
->name
));
269 static void netdev_name_node_del(struct netdev_name_node
*name_node
)
271 hlist_del_rcu(&name_node
->hlist
);
274 static struct netdev_name_node
*netdev_name_node_lookup(struct net
*net
,
277 struct hlist_head
*head
= dev_name_hash(net
, name
);
278 struct netdev_name_node
*name_node
;
280 hlist_for_each_entry(name_node
, head
, hlist
)
281 if (!strcmp(name_node
->name
, name
))
286 static struct netdev_name_node
*netdev_name_node_lookup_rcu(struct net
*net
,
289 struct hlist_head
*head
= dev_name_hash(net
, name
);
290 struct netdev_name_node
*name_node
;
292 hlist_for_each_entry_rcu(name_node
, head
, hlist
)
293 if (!strcmp(name_node
->name
, name
))
298 int netdev_name_node_alt_create(struct net_device
*dev
, const char *name
)
300 struct netdev_name_node
*name_node
;
301 struct net
*net
= dev_net(dev
);
303 name_node
= netdev_name_node_lookup(net
, name
);
306 name_node
= netdev_name_node_alloc(dev
, name
);
309 netdev_name_node_add(net
, name_node
);
310 /* The node that holds dev->name acts as a head of per-device list. */
311 list_add_tail(&name_node
->list
, &dev
->name_node
->list
);
315 EXPORT_SYMBOL(netdev_name_node_alt_create
);
317 static void __netdev_name_node_alt_destroy(struct netdev_name_node
*name_node
)
319 list_del(&name_node
->list
);
320 netdev_name_node_del(name_node
);
321 kfree(name_node
->name
);
322 netdev_name_node_free(name_node
);
325 int netdev_name_node_alt_destroy(struct net_device
*dev
, const char *name
)
327 struct netdev_name_node
*name_node
;
328 struct net
*net
= dev_net(dev
);
330 name_node
= netdev_name_node_lookup(net
, name
);
333 __netdev_name_node_alt_destroy(name_node
);
337 EXPORT_SYMBOL(netdev_name_node_alt_destroy
);
339 static void netdev_name_node_alt_flush(struct net_device
*dev
)
341 struct netdev_name_node
*name_node
, *tmp
;
343 list_for_each_entry_safe(name_node
, tmp
, &dev
->name_node
->list
, list
)
344 __netdev_name_node_alt_destroy(name_node
);
347 /* Device list insertion */
348 static void list_netdevice(struct net_device
*dev
)
350 struct net
*net
= dev_net(dev
);
354 write_lock_bh(&dev_base_lock
);
355 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
356 netdev_name_node_add(net
, dev
->name_node
);
357 hlist_add_head_rcu(&dev
->index_hlist
,
358 dev_index_hash(net
, dev
->ifindex
));
359 write_unlock_bh(&dev_base_lock
);
361 dev_base_seq_inc(net
);
364 /* Device list removal
365 * caller must respect a RCU grace period before freeing/reusing dev
367 static void unlist_netdevice(struct net_device
*dev
)
371 /* Unlink dev from the device chain */
372 write_lock_bh(&dev_base_lock
);
373 list_del_rcu(&dev
->dev_list
);
374 netdev_name_node_del(dev
->name_node
);
375 hlist_del_rcu(&dev
->index_hlist
);
376 write_unlock_bh(&dev_base_lock
);
378 dev_base_seq_inc(dev_net(dev
));
385 static RAW_NOTIFIER_HEAD(netdev_chain
);
388 * Device drivers call our routines to queue packets here. We empty the
389 * queue in the local softnet handler.
392 DEFINE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
393 EXPORT_PER_CPU_SYMBOL(softnet_data
);
395 #ifdef CONFIG_LOCKDEP
397 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
398 * according to dev->type
400 static const unsigned short netdev_lock_type
[] = {
401 ARPHRD_NETROM
, ARPHRD_ETHER
, ARPHRD_EETHER
, ARPHRD_AX25
,
402 ARPHRD_PRONET
, ARPHRD_CHAOS
, ARPHRD_IEEE802
, ARPHRD_ARCNET
,
403 ARPHRD_APPLETLK
, ARPHRD_DLCI
, ARPHRD_ATM
, ARPHRD_METRICOM
,
404 ARPHRD_IEEE1394
, ARPHRD_EUI64
, ARPHRD_INFINIBAND
, ARPHRD_SLIP
,
405 ARPHRD_CSLIP
, ARPHRD_SLIP6
, ARPHRD_CSLIP6
, ARPHRD_RSRVD
,
406 ARPHRD_ADAPT
, ARPHRD_ROSE
, ARPHRD_X25
, ARPHRD_HWX25
,
407 ARPHRD_PPP
, ARPHRD_CISCO
, ARPHRD_LAPB
, ARPHRD_DDCMP
,
408 ARPHRD_RAWHDLC
, ARPHRD_TUNNEL
, ARPHRD_TUNNEL6
, ARPHRD_FRAD
,
409 ARPHRD_SKIP
, ARPHRD_LOOPBACK
, ARPHRD_LOCALTLK
, ARPHRD_FDDI
,
410 ARPHRD_BIF
, ARPHRD_SIT
, ARPHRD_IPDDP
, ARPHRD_IPGRE
,
411 ARPHRD_PIMREG
, ARPHRD_HIPPI
, ARPHRD_ASH
, ARPHRD_ECONET
,
412 ARPHRD_IRDA
, ARPHRD_FCPP
, ARPHRD_FCAL
, ARPHRD_FCPL
,
413 ARPHRD_FCFABRIC
, ARPHRD_IEEE80211
, ARPHRD_IEEE80211_PRISM
,
414 ARPHRD_IEEE80211_RADIOTAP
, ARPHRD_PHONET
, ARPHRD_PHONET_PIPE
,
415 ARPHRD_IEEE802154
, ARPHRD_VOID
, ARPHRD_NONE
};
417 static const char *const netdev_lock_name
[] = {
418 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
419 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
420 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
421 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
422 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
423 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
424 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
425 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
426 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
427 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
428 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
429 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
430 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
431 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
432 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
434 static struct lock_class_key netdev_xmit_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
435 static struct lock_class_key netdev_addr_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
437 static inline unsigned short netdev_lock_pos(unsigned short dev_type
)
441 for (i
= 0; i
< ARRAY_SIZE(netdev_lock_type
); i
++)
442 if (netdev_lock_type
[i
] == dev_type
)
444 /* the last key is used by default */
445 return ARRAY_SIZE(netdev_lock_type
) - 1;
448 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
449 unsigned short dev_type
)
453 i
= netdev_lock_pos(dev_type
);
454 lockdep_set_class_and_name(lock
, &netdev_xmit_lock_key
[i
],
455 netdev_lock_name
[i
]);
458 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
462 i
= netdev_lock_pos(dev
->type
);
463 lockdep_set_class_and_name(&dev
->addr_list_lock
,
464 &netdev_addr_lock_key
[i
],
465 netdev_lock_name
[i
]);
468 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
469 unsigned short dev_type
)
472 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
477 /*******************************************************************************
479 * Protocol management and registration routines
481 *******************************************************************************/
485 * Add a protocol ID to the list. Now that the input handler is
486 * smarter we can dispense with all the messy stuff that used to be
489 * BEWARE!!! Protocol handlers, mangling input packets,
490 * MUST BE last in hash buckets and checking protocol handlers
491 * MUST start from promiscuous ptype_all chain in net_bh.
492 * It is true now, do not change it.
493 * Explanation follows: if protocol handler, mangling packet, will
494 * be the first on list, it is not able to sense, that packet
495 * is cloned and should be copied-on-write, so that it will
496 * change it and subsequent readers will get broken packet.
500 static inline struct list_head
*ptype_head(const struct packet_type
*pt
)
502 if (pt
->type
== htons(ETH_P_ALL
))
503 return pt
->dev
? &pt
->dev
->ptype_all
: &ptype_all
;
505 return pt
->dev
? &pt
->dev
->ptype_specific
:
506 &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
510 * dev_add_pack - add packet handler
511 * @pt: packet type declaration
513 * Add a protocol handler to the networking stack. The passed &packet_type
514 * is linked into kernel lists and may not be freed until it has been
515 * removed from the kernel lists.
517 * This call does not sleep therefore it can not
518 * guarantee all CPU's that are in middle of receiving packets
519 * will see the new packet type (until the next received packet).
522 void dev_add_pack(struct packet_type
*pt
)
524 struct list_head
*head
= ptype_head(pt
);
526 spin_lock(&ptype_lock
);
527 list_add_rcu(&pt
->list
, head
);
528 spin_unlock(&ptype_lock
);
530 EXPORT_SYMBOL(dev_add_pack
);
533 * __dev_remove_pack - remove packet handler
534 * @pt: packet type declaration
536 * Remove a protocol handler that was previously added to the kernel
537 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
538 * from the kernel lists and can be freed or reused once this function
541 * The packet type might still be in use by receivers
542 * and must not be freed until after all the CPU's have gone
543 * through a quiescent state.
545 void __dev_remove_pack(struct packet_type
*pt
)
547 struct list_head
*head
= ptype_head(pt
);
548 struct packet_type
*pt1
;
550 spin_lock(&ptype_lock
);
552 list_for_each_entry(pt1
, head
, list
) {
554 list_del_rcu(&pt
->list
);
559 pr_warn("dev_remove_pack: %p not found\n", pt
);
561 spin_unlock(&ptype_lock
);
563 EXPORT_SYMBOL(__dev_remove_pack
);
566 * dev_remove_pack - remove packet handler
567 * @pt: packet type declaration
569 * Remove a protocol handler that was previously added to the kernel
570 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
571 * from the kernel lists and can be freed or reused once this function
574 * This call sleeps to guarantee that no CPU is looking at the packet
577 void dev_remove_pack(struct packet_type
*pt
)
579 __dev_remove_pack(pt
);
583 EXPORT_SYMBOL(dev_remove_pack
);
587 * dev_add_offload - register offload handlers
588 * @po: protocol offload declaration
590 * Add protocol offload handlers to the networking stack. The passed
591 * &proto_offload is linked into kernel lists and may not be freed until
592 * it has been removed from the kernel lists.
594 * This call does not sleep therefore it can not
595 * guarantee all CPU's that are in middle of receiving packets
596 * will see the new offload handlers (until the next received packet).
598 void dev_add_offload(struct packet_offload
*po
)
600 struct packet_offload
*elem
;
602 spin_lock(&offload_lock
);
603 list_for_each_entry(elem
, &offload_base
, list
) {
604 if (po
->priority
< elem
->priority
)
607 list_add_rcu(&po
->list
, elem
->list
.prev
);
608 spin_unlock(&offload_lock
);
610 EXPORT_SYMBOL(dev_add_offload
);
613 * __dev_remove_offload - remove offload handler
614 * @po: packet offload declaration
616 * Remove a protocol offload handler that was previously added to the
617 * kernel offload handlers by dev_add_offload(). The passed &offload_type
618 * is removed from the kernel lists and can be freed or reused once this
621 * The packet type might still be in use by receivers
622 * and must not be freed until after all the CPU's have gone
623 * through a quiescent state.
625 static void __dev_remove_offload(struct packet_offload
*po
)
627 struct list_head
*head
= &offload_base
;
628 struct packet_offload
*po1
;
630 spin_lock(&offload_lock
);
632 list_for_each_entry(po1
, head
, list
) {
634 list_del_rcu(&po
->list
);
639 pr_warn("dev_remove_offload: %p not found\n", po
);
641 spin_unlock(&offload_lock
);
645 * dev_remove_offload - remove packet offload handler
646 * @po: packet offload declaration
648 * Remove a packet offload handler that was previously added to the kernel
649 * offload handlers by dev_add_offload(). The passed &offload_type is
650 * removed from the kernel lists and can be freed or reused once this
653 * This call sleeps to guarantee that no CPU is looking at the packet
656 void dev_remove_offload(struct packet_offload
*po
)
658 __dev_remove_offload(po
);
662 EXPORT_SYMBOL(dev_remove_offload
);
664 /******************************************************************************
666 * Device Boot-time Settings Routines
668 ******************************************************************************/
670 /* Boot time configuration table */
671 static struct netdev_boot_setup dev_boot_setup
[NETDEV_BOOT_SETUP_MAX
];
674 * netdev_boot_setup_add - add new setup entry
675 * @name: name of the device
676 * @map: configured settings for the device
678 * Adds new setup entry to the dev_boot_setup list. The function
679 * returns 0 on error and 1 on success. This is a generic routine to
682 static int netdev_boot_setup_add(char *name
, struct ifmap
*map
)
684 struct netdev_boot_setup
*s
;
688 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
689 if (s
[i
].name
[0] == '\0' || s
[i
].name
[0] == ' ') {
690 memset(s
[i
].name
, 0, sizeof(s
[i
].name
));
691 strlcpy(s
[i
].name
, name
, IFNAMSIZ
);
692 memcpy(&s
[i
].map
, map
, sizeof(s
[i
].map
));
697 return i
>= NETDEV_BOOT_SETUP_MAX
? 0 : 1;
701 * netdev_boot_setup_check - check boot time settings
702 * @dev: the netdevice
704 * Check boot time settings for the device.
705 * The found settings are set for the device to be used
706 * later in the device probing.
707 * Returns 0 if no settings found, 1 if they are.
709 int netdev_boot_setup_check(struct net_device
*dev
)
711 struct netdev_boot_setup
*s
= dev_boot_setup
;
714 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
715 if (s
[i
].name
[0] != '\0' && s
[i
].name
[0] != ' ' &&
716 !strcmp(dev
->name
, s
[i
].name
)) {
717 dev
->irq
= s
[i
].map
.irq
;
718 dev
->base_addr
= s
[i
].map
.base_addr
;
719 dev
->mem_start
= s
[i
].map
.mem_start
;
720 dev
->mem_end
= s
[i
].map
.mem_end
;
726 EXPORT_SYMBOL(netdev_boot_setup_check
);
730 * netdev_boot_base - get address from boot time settings
731 * @prefix: prefix for network device
732 * @unit: id for network device
734 * Check boot time settings for the base address of device.
735 * The found settings are set for the device to be used
736 * later in the device probing.
737 * Returns 0 if no settings found.
739 unsigned long netdev_boot_base(const char *prefix
, int unit
)
741 const struct netdev_boot_setup
*s
= dev_boot_setup
;
745 sprintf(name
, "%s%d", prefix
, unit
);
748 * If device already registered then return base of 1
749 * to indicate not to probe for this interface
751 if (__dev_get_by_name(&init_net
, name
))
754 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++)
755 if (!strcmp(name
, s
[i
].name
))
756 return s
[i
].map
.base_addr
;
761 * Saves at boot time configured settings for any netdevice.
763 int __init
netdev_boot_setup(char *str
)
768 str
= get_options(str
, ARRAY_SIZE(ints
), ints
);
773 memset(&map
, 0, sizeof(map
));
777 map
.base_addr
= ints
[2];
779 map
.mem_start
= ints
[3];
781 map
.mem_end
= ints
[4];
783 /* Add new entry to the list */
784 return netdev_boot_setup_add(str
, &map
);
787 __setup("netdev=", netdev_boot_setup
);
789 /*******************************************************************************
791 * Device Interface Subroutines
793 *******************************************************************************/
796 * dev_get_iflink - get 'iflink' value of a interface
797 * @dev: targeted interface
799 * Indicates the ifindex the interface is linked to.
800 * Physical interfaces have the same 'ifindex' and 'iflink' values.
803 int dev_get_iflink(const struct net_device
*dev
)
805 if (dev
->netdev_ops
&& dev
->netdev_ops
->ndo_get_iflink
)
806 return dev
->netdev_ops
->ndo_get_iflink(dev
);
810 EXPORT_SYMBOL(dev_get_iflink
);
813 * dev_fill_metadata_dst - Retrieve tunnel egress information.
814 * @dev: targeted interface
817 * For better visibility of tunnel traffic OVS needs to retrieve
818 * egress tunnel information for a packet. Following API allows
819 * user to get this info.
821 int dev_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
823 struct ip_tunnel_info
*info
;
825 if (!dev
->netdev_ops
|| !dev
->netdev_ops
->ndo_fill_metadata_dst
)
828 info
= skb_tunnel_info_unclone(skb
);
831 if (unlikely(!(info
->mode
& IP_TUNNEL_INFO_TX
)))
834 return dev
->netdev_ops
->ndo_fill_metadata_dst(dev
, skb
);
836 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst
);
839 * __dev_get_by_name - find a device by its name
840 * @net: the applicable net namespace
841 * @name: name to find
843 * Find an interface by name. Must be called under RTNL semaphore
844 * or @dev_base_lock. If the name is found a pointer to the device
845 * is returned. If the name is not found then %NULL is returned. The
846 * reference counters are not incremented so the caller must be
847 * careful with locks.
850 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
852 struct netdev_name_node
*node_name
;
854 node_name
= netdev_name_node_lookup(net
, name
);
855 return node_name
? node_name
->dev
: NULL
;
857 EXPORT_SYMBOL(__dev_get_by_name
);
860 * dev_get_by_name_rcu - find a device by its name
861 * @net: the applicable net namespace
862 * @name: name to find
864 * Find an interface by name.
865 * If the name is found a pointer to the device is returned.
866 * If the name is not found then %NULL is returned.
867 * The reference counters are not incremented so the caller must be
868 * careful with locks. The caller must hold RCU lock.
871 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
873 struct netdev_name_node
*node_name
;
875 node_name
= netdev_name_node_lookup_rcu(net
, name
);
876 return node_name
? node_name
->dev
: NULL
;
878 EXPORT_SYMBOL(dev_get_by_name_rcu
);
881 * dev_get_by_name - find a device by its name
882 * @net: the applicable net namespace
883 * @name: name to find
885 * Find an interface by name. This can be called from any
886 * context and does its own locking. The returned handle has
887 * the usage count incremented and the caller must use dev_put() to
888 * release it when it is no longer needed. %NULL is returned if no
889 * matching device is found.
892 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
894 struct net_device
*dev
;
897 dev
= dev_get_by_name_rcu(net
, name
);
903 EXPORT_SYMBOL(dev_get_by_name
);
906 * __dev_get_by_index - find a device by its ifindex
907 * @net: the applicable net namespace
908 * @ifindex: index of device
910 * Search for an interface by index. Returns %NULL if the device
911 * is not found or a pointer to the device. The device has not
912 * had its reference counter increased so the caller must be careful
913 * about locking. The caller must hold either the RTNL semaphore
917 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
919 struct net_device
*dev
;
920 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
922 hlist_for_each_entry(dev
, head
, index_hlist
)
923 if (dev
->ifindex
== ifindex
)
928 EXPORT_SYMBOL(__dev_get_by_index
);
931 * dev_get_by_index_rcu - find a device by its ifindex
932 * @net: the applicable net namespace
933 * @ifindex: index of device
935 * Search for an interface by index. Returns %NULL if the device
936 * is not found or a pointer to the device. The device has not
937 * had its reference counter increased so the caller must be careful
938 * about locking. The caller must hold RCU lock.
941 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
943 struct net_device
*dev
;
944 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
946 hlist_for_each_entry_rcu(dev
, head
, index_hlist
)
947 if (dev
->ifindex
== ifindex
)
952 EXPORT_SYMBOL(dev_get_by_index_rcu
);
956 * dev_get_by_index - find a device by its ifindex
957 * @net: the applicable net namespace
958 * @ifindex: index of device
960 * Search for an interface by index. Returns NULL if the device
961 * is not found or a pointer to the device. The device returned has
962 * had a reference added and the pointer is safe until the user calls
963 * dev_put to indicate they have finished with it.
966 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
968 struct net_device
*dev
;
971 dev
= dev_get_by_index_rcu(net
, ifindex
);
977 EXPORT_SYMBOL(dev_get_by_index
);
980 * dev_get_by_napi_id - find a device by napi_id
981 * @napi_id: ID of the NAPI struct
983 * Search for an interface by NAPI ID. Returns %NULL if the device
984 * is not found or a pointer to the device. The device has not had
985 * its reference counter increased so the caller must be careful
986 * about locking. The caller must hold RCU lock.
989 struct net_device
*dev_get_by_napi_id(unsigned int napi_id
)
991 struct napi_struct
*napi
;
993 WARN_ON_ONCE(!rcu_read_lock_held());
995 if (napi_id
< MIN_NAPI_ID
)
998 napi
= napi_by_id(napi_id
);
1000 return napi
? napi
->dev
: NULL
;
1002 EXPORT_SYMBOL(dev_get_by_napi_id
);
1005 * netdev_get_name - get a netdevice name, knowing its ifindex.
1006 * @net: network namespace
1007 * @name: a pointer to the buffer where the name will be stored.
1008 * @ifindex: the ifindex of the interface to get the name from.
1010 * The use of raw_seqcount_begin() and cond_resched() before
1011 * retrying is required as we want to give the writers a chance
1012 * to complete when CONFIG_PREEMPT is not set.
1014 int netdev_get_name(struct net
*net
, char *name
, int ifindex
)
1016 struct net_device
*dev
;
1020 seq
= raw_seqcount_begin(&devnet_rename_seq
);
1022 dev
= dev_get_by_index_rcu(net
, ifindex
);
1028 strcpy(name
, dev
->name
);
1030 if (read_seqcount_retry(&devnet_rename_seq
, seq
)) {
1039 * dev_getbyhwaddr_rcu - find a device by its hardware address
1040 * @net: the applicable net namespace
1041 * @type: media type of device
1042 * @ha: hardware address
1044 * Search for an interface by MAC address. Returns NULL if the device
1045 * is not found or a pointer to the device.
1046 * The caller must hold RCU or RTNL.
1047 * The returned device has not had its ref count increased
1048 * and the caller must therefore be careful about locking
1052 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
1055 struct net_device
*dev
;
1057 for_each_netdev_rcu(net
, dev
)
1058 if (dev
->type
== type
&&
1059 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
1064 EXPORT_SYMBOL(dev_getbyhwaddr_rcu
);
1066 struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
1068 struct net_device
*dev
;
1071 for_each_netdev(net
, dev
)
1072 if (dev
->type
== type
)
1077 EXPORT_SYMBOL(__dev_getfirstbyhwtype
);
1079 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
1081 struct net_device
*dev
, *ret
= NULL
;
1084 for_each_netdev_rcu(net
, dev
)
1085 if (dev
->type
== type
) {
1093 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
1096 * __dev_get_by_flags - find any device with given flags
1097 * @net: the applicable net namespace
1098 * @if_flags: IFF_* values
1099 * @mask: bitmask of bits in if_flags to check
1101 * Search for any interface with the given flags. Returns NULL if a device
1102 * is not found or a pointer to the device. Must be called inside
1103 * rtnl_lock(), and result refcount is unchanged.
1106 struct net_device
*__dev_get_by_flags(struct net
*net
, unsigned short if_flags
,
1107 unsigned short mask
)
1109 struct net_device
*dev
, *ret
;
1114 for_each_netdev(net
, dev
) {
1115 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
1122 EXPORT_SYMBOL(__dev_get_by_flags
);
1125 * dev_valid_name - check if name is okay for network device
1126 * @name: name string
1128 * Network device names need to be valid file names to
1129 * to allow sysfs to work. We also disallow any kind of
1132 bool dev_valid_name(const char *name
)
1136 if (strnlen(name
, IFNAMSIZ
) == IFNAMSIZ
)
1138 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
1142 if (*name
== '/' || *name
== ':' || isspace(*name
))
1148 EXPORT_SYMBOL(dev_valid_name
);
1151 * __dev_alloc_name - allocate a name for a device
1152 * @net: network namespace to allocate the device name in
1153 * @name: name format string
1154 * @buf: scratch buffer and result name string
1156 * Passed a format string - eg "lt%d" it will try and find a suitable
1157 * id. It scans list of devices to build up a free map, then chooses
1158 * the first empty slot. The caller must hold the dev_base or rtnl lock
1159 * while allocating the name and adding the device in order to avoid
1161 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1162 * Returns the number of the unit assigned or a negative errno code.
1165 static int __dev_alloc_name(struct net
*net
, const char *name
, char *buf
)
1169 const int max_netdevices
= 8*PAGE_SIZE
;
1170 unsigned long *inuse
;
1171 struct net_device
*d
;
1173 if (!dev_valid_name(name
))
1176 p
= strchr(name
, '%');
1179 * Verify the string as this thing may have come from
1180 * the user. There must be either one "%d" and no other "%"
1183 if (p
[1] != 'd' || strchr(p
+ 2, '%'))
1186 /* Use one page as a bit array of possible slots */
1187 inuse
= (unsigned long *) get_zeroed_page(GFP_ATOMIC
);
1191 for_each_netdev(net
, d
) {
1192 if (!sscanf(d
->name
, name
, &i
))
1194 if (i
< 0 || i
>= max_netdevices
)
1197 /* avoid cases where sscanf is not exact inverse of printf */
1198 snprintf(buf
, IFNAMSIZ
, name
, i
);
1199 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
1203 i
= find_first_zero_bit(inuse
, max_netdevices
);
1204 free_page((unsigned long) inuse
);
1207 snprintf(buf
, IFNAMSIZ
, name
, i
);
1208 if (!__dev_get_by_name(net
, buf
))
1211 /* It is possible to run out of possible slots
1212 * when the name is long and there isn't enough space left
1213 * for the digits, or if all bits are used.
1218 static int dev_alloc_name_ns(struct net
*net
,
1219 struct net_device
*dev
,
1226 ret
= __dev_alloc_name(net
, name
, buf
);
1228 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1233 * dev_alloc_name - allocate a name for a device
1235 * @name: name format string
1237 * Passed a format string - eg "lt%d" it will try and find a suitable
1238 * id. It scans list of devices to build up a free map, then chooses
1239 * the first empty slot. The caller must hold the dev_base or rtnl lock
1240 * while allocating the name and adding the device in order to avoid
1242 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1243 * Returns the number of the unit assigned or a negative errno code.
1246 int dev_alloc_name(struct net_device
*dev
, const char *name
)
1248 return dev_alloc_name_ns(dev_net(dev
), dev
, name
);
1250 EXPORT_SYMBOL(dev_alloc_name
);
1252 int dev_get_valid_name(struct net
*net
, struct net_device
*dev
,
1257 if (!dev_valid_name(name
))
1260 if (strchr(name
, '%'))
1261 return dev_alloc_name_ns(net
, dev
, name
);
1262 else if (__dev_get_by_name(net
, name
))
1264 else if (dev
->name
!= name
)
1265 strlcpy(dev
->name
, name
, IFNAMSIZ
);
1269 EXPORT_SYMBOL(dev_get_valid_name
);
1272 * dev_change_name - change name of a device
1274 * @newname: name (or format string) must be at least IFNAMSIZ
1276 * Change name of a device, can pass format strings "eth%d".
1279 int dev_change_name(struct net_device
*dev
, const char *newname
)
1281 unsigned char old_assign_type
;
1282 char oldname
[IFNAMSIZ
];
1288 BUG_ON(!dev_net(dev
));
1292 /* Some auto-enslaved devices e.g. failover slaves are
1293 * special, as userspace might rename the device after
1294 * the interface had been brought up and running since
1295 * the point kernel initiated auto-enslavement. Allow
1296 * live name change even when these slave devices are
1299 * Typically, users of these auto-enslaving devices
1300 * don't actually care about slave name change, as
1301 * they are supposed to operate on master interface
1304 if (dev
->flags
& IFF_UP
&&
1305 likely(!(dev
->priv_flags
& IFF_LIVE_RENAME_OK
)))
1308 write_seqcount_begin(&devnet_rename_seq
);
1310 if (strncmp(newname
, dev
->name
, IFNAMSIZ
) == 0) {
1311 write_seqcount_end(&devnet_rename_seq
);
1315 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
1317 err
= dev_get_valid_name(net
, dev
, newname
);
1319 write_seqcount_end(&devnet_rename_seq
);
1323 if (oldname
[0] && !strchr(oldname
, '%'))
1324 netdev_info(dev
, "renamed from %s\n", oldname
);
1326 old_assign_type
= dev
->name_assign_type
;
1327 dev
->name_assign_type
= NET_NAME_RENAMED
;
1330 ret
= device_rename(&dev
->dev
, dev
->name
);
1332 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1333 dev
->name_assign_type
= old_assign_type
;
1334 write_seqcount_end(&devnet_rename_seq
);
1338 write_seqcount_end(&devnet_rename_seq
);
1340 netdev_adjacent_rename_links(dev
, oldname
);
1342 write_lock_bh(&dev_base_lock
);
1343 netdev_name_node_del(dev
->name_node
);
1344 write_unlock_bh(&dev_base_lock
);
1348 write_lock_bh(&dev_base_lock
);
1349 netdev_name_node_add(net
, dev
->name_node
);
1350 write_unlock_bh(&dev_base_lock
);
1352 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1353 ret
= notifier_to_errno(ret
);
1356 /* err >= 0 after dev_alloc_name() or stores the first errno */
1359 write_seqcount_begin(&devnet_rename_seq
);
1360 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1361 memcpy(oldname
, newname
, IFNAMSIZ
);
1362 dev
->name_assign_type
= old_assign_type
;
1363 old_assign_type
= NET_NAME_RENAMED
;
1366 pr_err("%s: name change rollback failed: %d\n",
1375 * dev_set_alias - change ifalias of a device
1377 * @alias: name up to IFALIASZ
1378 * @len: limit of bytes to copy from info
1380 * Set ifalias for a device,
1382 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1384 struct dev_ifalias
*new_alias
= NULL
;
1386 if (len
>= IFALIASZ
)
1390 new_alias
= kmalloc(sizeof(*new_alias
) + len
+ 1, GFP_KERNEL
);
1394 memcpy(new_alias
->ifalias
, alias
, len
);
1395 new_alias
->ifalias
[len
] = 0;
1398 mutex_lock(&ifalias_mutex
);
1399 rcu_swap_protected(dev
->ifalias
, new_alias
,
1400 mutex_is_locked(&ifalias_mutex
));
1401 mutex_unlock(&ifalias_mutex
);
1404 kfree_rcu(new_alias
, rcuhead
);
1408 EXPORT_SYMBOL(dev_set_alias
);
1411 * dev_get_alias - get ifalias of a device
1413 * @name: buffer to store name of ifalias
1414 * @len: size of buffer
1416 * get ifalias for a device. Caller must make sure dev cannot go
1417 * away, e.g. rcu read lock or own a reference count to device.
1419 int dev_get_alias(const struct net_device
*dev
, char *name
, size_t len
)
1421 const struct dev_ifalias
*alias
;
1425 alias
= rcu_dereference(dev
->ifalias
);
1427 ret
= snprintf(name
, len
, "%s", alias
->ifalias
);
1434 * netdev_features_change - device changes features
1435 * @dev: device to cause notification
1437 * Called to indicate a device has changed features.
1439 void netdev_features_change(struct net_device
*dev
)
1441 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1443 EXPORT_SYMBOL(netdev_features_change
);
1446 * netdev_state_change - device changes state
1447 * @dev: device to cause notification
1449 * Called to indicate a device has changed state. This function calls
1450 * the notifier chains for netdev_chain and sends a NEWLINK message
1451 * to the routing socket.
1453 void netdev_state_change(struct net_device
*dev
)
1455 if (dev
->flags
& IFF_UP
) {
1456 struct netdev_notifier_change_info change_info
= {
1460 call_netdevice_notifiers_info(NETDEV_CHANGE
,
1462 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0, GFP_KERNEL
);
1465 EXPORT_SYMBOL(netdev_state_change
);
1468 * netdev_notify_peers - notify network peers about existence of @dev
1469 * @dev: network device
1471 * Generate traffic such that interested network peers are aware of
1472 * @dev, such as by generating a gratuitous ARP. This may be used when
1473 * a device wants to inform the rest of the network about some sort of
1474 * reconfiguration such as a failover event or virtual machine
1477 void netdev_notify_peers(struct net_device
*dev
)
1480 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, dev
);
1481 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, dev
);
1484 EXPORT_SYMBOL(netdev_notify_peers
);
1486 static int __dev_open(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
1488 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1493 if (!netif_device_present(dev
))
1496 /* Block netpoll from trying to do any rx path servicing.
1497 * If we don't do this there is a chance ndo_poll_controller
1498 * or ndo_poll may be running while we open the device
1500 netpoll_poll_disable(dev
);
1502 ret
= call_netdevice_notifiers_extack(NETDEV_PRE_UP
, dev
, extack
);
1503 ret
= notifier_to_errno(ret
);
1507 set_bit(__LINK_STATE_START
, &dev
->state
);
1509 if (ops
->ndo_validate_addr
)
1510 ret
= ops
->ndo_validate_addr(dev
);
1512 if (!ret
&& ops
->ndo_open
)
1513 ret
= ops
->ndo_open(dev
);
1515 netpoll_poll_enable(dev
);
1518 clear_bit(__LINK_STATE_START
, &dev
->state
);
1520 dev
->flags
|= IFF_UP
;
1521 dev_set_rx_mode(dev
);
1523 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
1530 * dev_open - prepare an interface for use.
1531 * @dev: device to open
1532 * @extack: netlink extended ack
1534 * Takes a device from down to up state. The device's private open
1535 * function is invoked and then the multicast lists are loaded. Finally
1536 * the device is moved into the up state and a %NETDEV_UP message is
1537 * sent to the netdev notifier chain.
1539 * Calling this function on an active interface is a nop. On a failure
1540 * a negative errno code is returned.
1542 int dev_open(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
1546 if (dev
->flags
& IFF_UP
)
1549 ret
= __dev_open(dev
, extack
);
1553 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1554 call_netdevice_notifiers(NETDEV_UP
, dev
);
1558 EXPORT_SYMBOL(dev_open
);
1560 static void __dev_close_many(struct list_head
*head
)
1562 struct net_device
*dev
;
1567 list_for_each_entry(dev
, head
, close_list
) {
1568 /* Temporarily disable netpoll until the interface is down */
1569 netpoll_poll_disable(dev
);
1571 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1573 clear_bit(__LINK_STATE_START
, &dev
->state
);
1575 /* Synchronize to scheduled poll. We cannot touch poll list, it
1576 * can be even on different cpu. So just clear netif_running().
1578 * dev->stop() will invoke napi_disable() on all of it's
1579 * napi_struct instances on this device.
1581 smp_mb__after_atomic(); /* Commit netif_running(). */
1584 dev_deactivate_many(head
);
1586 list_for_each_entry(dev
, head
, close_list
) {
1587 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1590 * Call the device specific close. This cannot fail.
1591 * Only if device is UP
1593 * We allow it to be called even after a DETACH hot-plug
1599 dev
->flags
&= ~IFF_UP
;
1600 netpoll_poll_enable(dev
);
1604 static void __dev_close(struct net_device
*dev
)
1608 list_add(&dev
->close_list
, &single
);
1609 __dev_close_many(&single
);
1613 void dev_close_many(struct list_head
*head
, bool unlink
)
1615 struct net_device
*dev
, *tmp
;
1617 /* Remove the devices that don't need to be closed */
1618 list_for_each_entry_safe(dev
, tmp
, head
, close_list
)
1619 if (!(dev
->flags
& IFF_UP
))
1620 list_del_init(&dev
->close_list
);
1622 __dev_close_many(head
);
1624 list_for_each_entry_safe(dev
, tmp
, head
, close_list
) {
1625 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1626 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1628 list_del_init(&dev
->close_list
);
1631 EXPORT_SYMBOL(dev_close_many
);
1634 * dev_close - shutdown an interface.
1635 * @dev: device to shutdown
1637 * This function moves an active device into down state. A
1638 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1639 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1642 void dev_close(struct net_device
*dev
)
1644 if (dev
->flags
& IFF_UP
) {
1647 list_add(&dev
->close_list
, &single
);
1648 dev_close_many(&single
, true);
1652 EXPORT_SYMBOL(dev_close
);
1656 * dev_disable_lro - disable Large Receive Offload on a device
1659 * Disable Large Receive Offload (LRO) on a net device. Must be
1660 * called under RTNL. This is needed if received packets may be
1661 * forwarded to another interface.
1663 void dev_disable_lro(struct net_device
*dev
)
1665 struct net_device
*lower_dev
;
1666 struct list_head
*iter
;
1668 dev
->wanted_features
&= ~NETIF_F_LRO
;
1669 netdev_update_features(dev
);
1671 if (unlikely(dev
->features
& NETIF_F_LRO
))
1672 netdev_WARN(dev
, "failed to disable LRO!\n");
1674 netdev_for_each_lower_dev(dev
, lower_dev
, iter
)
1675 dev_disable_lro(lower_dev
);
1677 EXPORT_SYMBOL(dev_disable_lro
);
1680 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1683 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1684 * called under RTNL. This is needed if Generic XDP is installed on
1687 static void dev_disable_gro_hw(struct net_device
*dev
)
1689 dev
->wanted_features
&= ~NETIF_F_GRO_HW
;
1690 netdev_update_features(dev
);
1692 if (unlikely(dev
->features
& NETIF_F_GRO_HW
))
1693 netdev_WARN(dev
, "failed to disable GRO_HW!\n");
1696 const char *netdev_cmd_to_name(enum netdev_cmd cmd
)
1699 case NETDEV_##val: \
1700 return "NETDEV_" __stringify(val);
1702 N(UP
) N(DOWN
) N(REBOOT
) N(CHANGE
) N(REGISTER
) N(UNREGISTER
)
1703 N(CHANGEMTU
) N(CHANGEADDR
) N(GOING_DOWN
) N(CHANGENAME
) N(FEAT_CHANGE
)
1704 N(BONDING_FAILOVER
) N(PRE_UP
) N(PRE_TYPE_CHANGE
) N(POST_TYPE_CHANGE
)
1705 N(POST_INIT
) N(RELEASE
) N(NOTIFY_PEERS
) N(JOIN
) N(CHANGEUPPER
)
1706 N(RESEND_IGMP
) N(PRECHANGEMTU
) N(CHANGEINFODATA
) N(BONDING_INFO
)
1707 N(PRECHANGEUPPER
) N(CHANGELOWERSTATE
) N(UDP_TUNNEL_PUSH_INFO
)
1708 N(UDP_TUNNEL_DROP_INFO
) N(CHANGE_TX_QUEUE_LEN
)
1709 N(CVLAN_FILTER_PUSH_INFO
) N(CVLAN_FILTER_DROP_INFO
)
1710 N(SVLAN_FILTER_PUSH_INFO
) N(SVLAN_FILTER_DROP_INFO
)
1714 return "UNKNOWN_NETDEV_EVENT";
1716 EXPORT_SYMBOL_GPL(netdev_cmd_to_name
);
1718 static int call_netdevice_notifier(struct notifier_block
*nb
, unsigned long val
,
1719 struct net_device
*dev
)
1721 struct netdev_notifier_info info
= {
1725 return nb
->notifier_call(nb
, val
, &info
);
1728 static int dev_boot_phase
= 1;
1731 * register_netdevice_notifier - register a network notifier block
1734 * Register a notifier to be called when network device events occur.
1735 * The notifier passed is linked into the kernel structures and must
1736 * not be reused until it has been unregistered. A negative errno code
1737 * is returned on a failure.
1739 * When registered all registration and up events are replayed
1740 * to the new notifier to allow device to have a race free
1741 * view of the network device list.
1744 int register_netdevice_notifier(struct notifier_block
*nb
)
1746 struct net_device
*dev
;
1747 struct net_device
*last
;
1751 /* Close race with setup_net() and cleanup_net() */
1752 down_write(&pernet_ops_rwsem
);
1754 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1760 for_each_netdev(net
, dev
) {
1761 err
= call_netdevice_notifier(nb
, NETDEV_REGISTER
, dev
);
1762 err
= notifier_to_errno(err
);
1766 if (!(dev
->flags
& IFF_UP
))
1769 call_netdevice_notifier(nb
, NETDEV_UP
, dev
);
1775 up_write(&pernet_ops_rwsem
);
1781 for_each_netdev(net
, dev
) {
1785 if (dev
->flags
& IFF_UP
) {
1786 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1788 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1790 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1795 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1798 EXPORT_SYMBOL(register_netdevice_notifier
);
1801 * unregister_netdevice_notifier - unregister a network notifier block
1804 * Unregister a notifier previously registered by
1805 * register_netdevice_notifier(). The notifier is unlinked into the
1806 * kernel structures and may then be reused. A negative errno code
1807 * is returned on a failure.
1809 * After unregistering unregister and down device events are synthesized
1810 * for all devices on the device list to the removed notifier to remove
1811 * the need for special case cleanup code.
1814 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1816 struct net_device
*dev
;
1820 /* Close race with setup_net() and cleanup_net() */
1821 down_write(&pernet_ops_rwsem
);
1823 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1828 for_each_netdev(net
, dev
) {
1829 if (dev
->flags
& IFF_UP
) {
1830 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1832 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1834 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1839 up_write(&pernet_ops_rwsem
);
1842 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1845 * call_netdevice_notifiers_info - call all network notifier blocks
1846 * @val: value passed unmodified to notifier function
1847 * @info: notifier information data
1849 * Call all network notifier blocks. Parameters and return value
1850 * are as for raw_notifier_call_chain().
1853 static int call_netdevice_notifiers_info(unsigned long val
,
1854 struct netdev_notifier_info
*info
)
1857 return raw_notifier_call_chain(&netdev_chain
, val
, info
);
1860 static int call_netdevice_notifiers_extack(unsigned long val
,
1861 struct net_device
*dev
,
1862 struct netlink_ext_ack
*extack
)
1864 struct netdev_notifier_info info
= {
1869 return call_netdevice_notifiers_info(val
, &info
);
1873 * call_netdevice_notifiers - call all network notifier blocks
1874 * @val: value passed unmodified to notifier function
1875 * @dev: net_device pointer passed unmodified to notifier function
1877 * Call all network notifier blocks. Parameters and return value
1878 * are as for raw_notifier_call_chain().
1881 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
1883 return call_netdevice_notifiers_extack(val
, dev
, NULL
);
1885 EXPORT_SYMBOL(call_netdevice_notifiers
);
1888 * call_netdevice_notifiers_mtu - call all network notifier blocks
1889 * @val: value passed unmodified to notifier function
1890 * @dev: net_device pointer passed unmodified to notifier function
1891 * @arg: additional u32 argument passed to the notifier function
1893 * Call all network notifier blocks. Parameters and return value
1894 * are as for raw_notifier_call_chain().
1896 static int call_netdevice_notifiers_mtu(unsigned long val
,
1897 struct net_device
*dev
, u32 arg
)
1899 struct netdev_notifier_info_ext info
= {
1904 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext
, info
) != 0);
1906 return call_netdevice_notifiers_info(val
, &info
.info
);
1909 #ifdef CONFIG_NET_INGRESS
1910 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key
);
1912 void net_inc_ingress_queue(void)
1914 static_branch_inc(&ingress_needed_key
);
1916 EXPORT_SYMBOL_GPL(net_inc_ingress_queue
);
1918 void net_dec_ingress_queue(void)
1920 static_branch_dec(&ingress_needed_key
);
1922 EXPORT_SYMBOL_GPL(net_dec_ingress_queue
);
1925 #ifdef CONFIG_NET_EGRESS
1926 static DEFINE_STATIC_KEY_FALSE(egress_needed_key
);
1928 void net_inc_egress_queue(void)
1930 static_branch_inc(&egress_needed_key
);
1932 EXPORT_SYMBOL_GPL(net_inc_egress_queue
);
1934 void net_dec_egress_queue(void)
1936 static_branch_dec(&egress_needed_key
);
1938 EXPORT_SYMBOL_GPL(net_dec_egress_queue
);
1941 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key
);
1942 #ifdef CONFIG_JUMP_LABEL
1943 static atomic_t netstamp_needed_deferred
;
1944 static atomic_t netstamp_wanted
;
1945 static void netstamp_clear(struct work_struct
*work
)
1947 int deferred
= atomic_xchg(&netstamp_needed_deferred
, 0);
1950 wanted
= atomic_add_return(deferred
, &netstamp_wanted
);
1952 static_branch_enable(&netstamp_needed_key
);
1954 static_branch_disable(&netstamp_needed_key
);
1956 static DECLARE_WORK(netstamp_work
, netstamp_clear
);
1959 void net_enable_timestamp(void)
1961 #ifdef CONFIG_JUMP_LABEL
1965 wanted
= atomic_read(&netstamp_wanted
);
1968 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
+ 1) == wanted
)
1971 atomic_inc(&netstamp_needed_deferred
);
1972 schedule_work(&netstamp_work
);
1974 static_branch_inc(&netstamp_needed_key
);
1977 EXPORT_SYMBOL(net_enable_timestamp
);
1979 void net_disable_timestamp(void)
1981 #ifdef CONFIG_JUMP_LABEL
1985 wanted
= atomic_read(&netstamp_wanted
);
1988 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
- 1) == wanted
)
1991 atomic_dec(&netstamp_needed_deferred
);
1992 schedule_work(&netstamp_work
);
1994 static_branch_dec(&netstamp_needed_key
);
1997 EXPORT_SYMBOL(net_disable_timestamp
);
1999 static inline void net_timestamp_set(struct sk_buff
*skb
)
2002 if (static_branch_unlikely(&netstamp_needed_key
))
2003 __net_timestamp(skb
);
2006 #define net_timestamp_check(COND, SKB) \
2007 if (static_branch_unlikely(&netstamp_needed_key)) { \
2008 if ((COND) && !(SKB)->tstamp) \
2009 __net_timestamp(SKB); \
2012 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2016 if (!(dev
->flags
& IFF_UP
))
2019 len
= dev
->mtu
+ dev
->hard_header_len
+ VLAN_HLEN
;
2020 if (skb
->len
<= len
)
2023 /* if TSO is enabled, we don't care about the length as the packet
2024 * could be forwarded without being segmented before
2026 if (skb_is_gso(skb
))
2031 EXPORT_SYMBOL_GPL(is_skb_forwardable
);
2033 int __dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
2035 int ret
= ____dev_forward_skb(dev
, skb
);
2038 skb
->protocol
= eth_type_trans(skb
, dev
);
2039 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
2044 EXPORT_SYMBOL_GPL(__dev_forward_skb
);
2047 * dev_forward_skb - loopback an skb to another netif
2049 * @dev: destination network device
2050 * @skb: buffer to forward
2053 * NET_RX_SUCCESS (no congestion)
2054 * NET_RX_DROP (packet was dropped, but freed)
2056 * dev_forward_skb can be used for injecting an skb from the
2057 * start_xmit function of one device into the receive queue
2058 * of another device.
2060 * The receiving device may be in another namespace, so
2061 * we have to clear all information in the skb that could
2062 * impact namespace isolation.
2064 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
2066 return __dev_forward_skb(dev
, skb
) ?: netif_rx_internal(skb
);
2068 EXPORT_SYMBOL_GPL(dev_forward_skb
);
2070 static inline int deliver_skb(struct sk_buff
*skb
,
2071 struct packet_type
*pt_prev
,
2072 struct net_device
*orig_dev
)
2074 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
2076 refcount_inc(&skb
->users
);
2077 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
2080 static inline void deliver_ptype_list_skb(struct sk_buff
*skb
,
2081 struct packet_type
**pt
,
2082 struct net_device
*orig_dev
,
2084 struct list_head
*ptype_list
)
2086 struct packet_type
*ptype
, *pt_prev
= *pt
;
2088 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
2089 if (ptype
->type
!= type
)
2092 deliver_skb(skb
, pt_prev
, orig_dev
);
2098 static inline bool skb_loop_sk(struct packet_type
*ptype
, struct sk_buff
*skb
)
2100 if (!ptype
->af_packet_priv
|| !skb
->sk
)
2103 if (ptype
->id_match
)
2104 return ptype
->id_match(ptype
, skb
->sk
);
2105 else if ((struct sock
*)ptype
->af_packet_priv
== skb
->sk
)
2112 * dev_nit_active - return true if any network interface taps are in use
2114 * @dev: network device to check for the presence of taps
2116 bool dev_nit_active(struct net_device
*dev
)
2118 return !list_empty(&ptype_all
) || !list_empty(&dev
->ptype_all
);
2120 EXPORT_SYMBOL_GPL(dev_nit_active
);
2123 * Support routine. Sends outgoing frames to any network
2124 * taps currently in use.
2127 void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
2129 struct packet_type
*ptype
;
2130 struct sk_buff
*skb2
= NULL
;
2131 struct packet_type
*pt_prev
= NULL
;
2132 struct list_head
*ptype_list
= &ptype_all
;
2136 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
2137 if (ptype
->ignore_outgoing
)
2140 /* Never send packets back to the socket
2141 * they originated from - MvS (miquels@drinkel.ow.org)
2143 if (skb_loop_sk(ptype
, skb
))
2147 deliver_skb(skb2
, pt_prev
, skb
->dev
);
2152 /* need to clone skb, done only once */
2153 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2157 net_timestamp_set(skb2
);
2159 /* skb->nh should be correctly
2160 * set by sender, so that the second statement is
2161 * just protection against buggy protocols.
2163 skb_reset_mac_header(skb2
);
2165 if (skb_network_header(skb2
) < skb2
->data
||
2166 skb_network_header(skb2
) > skb_tail_pointer(skb2
)) {
2167 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2168 ntohs(skb2
->protocol
),
2170 skb_reset_network_header(skb2
);
2173 skb2
->transport_header
= skb2
->network_header
;
2174 skb2
->pkt_type
= PACKET_OUTGOING
;
2178 if (ptype_list
== &ptype_all
) {
2179 ptype_list
= &dev
->ptype_all
;
2184 if (!skb_orphan_frags_rx(skb2
, GFP_ATOMIC
))
2185 pt_prev
->func(skb2
, skb
->dev
, pt_prev
, skb
->dev
);
2191 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit
);
2194 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2195 * @dev: Network device
2196 * @txq: number of queues available
2198 * If real_num_tx_queues is changed the tc mappings may no longer be
2199 * valid. To resolve this verify the tc mapping remains valid and if
2200 * not NULL the mapping. With no priorities mapping to this
2201 * offset/count pair it will no longer be used. In the worst case TC0
2202 * is invalid nothing can be done so disable priority mappings. If is
2203 * expected that drivers will fix this mapping if they can before
2204 * calling netif_set_real_num_tx_queues.
2206 static void netif_setup_tc(struct net_device
*dev
, unsigned int txq
)
2209 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2211 /* If TC0 is invalidated disable TC mapping */
2212 if (tc
->offset
+ tc
->count
> txq
) {
2213 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2218 /* Invalidated prio to tc mappings set to TC0 */
2219 for (i
= 1; i
< TC_BITMASK
+ 1; i
++) {
2220 int q
= netdev_get_prio_tc_map(dev
, i
);
2222 tc
= &dev
->tc_to_txq
[q
];
2223 if (tc
->offset
+ tc
->count
> txq
) {
2224 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2226 netdev_set_prio_tc_map(dev
, i
, 0);
2231 int netdev_txq_to_tc(struct net_device
*dev
, unsigned int txq
)
2234 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2237 /* walk through the TCs and see if it falls into any of them */
2238 for (i
= 0; i
< TC_MAX_QUEUE
; i
++, tc
++) {
2239 if ((txq
- tc
->offset
) < tc
->count
)
2243 /* didn't find it, just return -1 to indicate no match */
2249 EXPORT_SYMBOL(netdev_txq_to_tc
);
2252 struct static_key xps_needed __read_mostly
;
2253 EXPORT_SYMBOL(xps_needed
);
2254 struct static_key xps_rxqs_needed __read_mostly
;
2255 EXPORT_SYMBOL(xps_rxqs_needed
);
2256 static DEFINE_MUTEX(xps_map_mutex
);
2257 #define xmap_dereference(P) \
2258 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2260 static bool remove_xps_queue(struct xps_dev_maps
*dev_maps
,
2263 struct xps_map
*map
= NULL
;
2267 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2271 for (pos
= map
->len
; pos
--;) {
2272 if (map
->queues
[pos
] != index
)
2276 map
->queues
[pos
] = map
->queues
[--map
->len
];
2280 RCU_INIT_POINTER(dev_maps
->attr_map
[tci
], NULL
);
2281 kfree_rcu(map
, rcu
);
2288 static bool remove_xps_queue_cpu(struct net_device
*dev
,
2289 struct xps_dev_maps
*dev_maps
,
2290 int cpu
, u16 offset
, u16 count
)
2292 int num_tc
= dev
->num_tc
? : 1;
2293 bool active
= false;
2296 for (tci
= cpu
* num_tc
; num_tc
--; tci
++) {
2299 for (i
= count
, j
= offset
; i
--; j
++) {
2300 if (!remove_xps_queue(dev_maps
, tci
, j
))
2310 static void reset_xps_maps(struct net_device
*dev
,
2311 struct xps_dev_maps
*dev_maps
,
2315 static_key_slow_dec_cpuslocked(&xps_rxqs_needed
);
2316 RCU_INIT_POINTER(dev
->xps_rxqs_map
, NULL
);
2318 RCU_INIT_POINTER(dev
->xps_cpus_map
, NULL
);
2320 static_key_slow_dec_cpuslocked(&xps_needed
);
2321 kfree_rcu(dev_maps
, rcu
);
2324 static void clean_xps_maps(struct net_device
*dev
, const unsigned long *mask
,
2325 struct xps_dev_maps
*dev_maps
, unsigned int nr_ids
,
2326 u16 offset
, u16 count
, bool is_rxqs_map
)
2328 bool active
= false;
2331 for (j
= -1; j
= netif_attrmask_next(j
, mask
, nr_ids
),
2333 active
|= remove_xps_queue_cpu(dev
, dev_maps
, j
, offset
,
2336 reset_xps_maps(dev
, dev_maps
, is_rxqs_map
);
2339 for (i
= offset
+ (count
- 1); count
--; i
--) {
2340 netdev_queue_numa_node_write(
2341 netdev_get_tx_queue(dev
, i
),
2347 static void netif_reset_xps_queues(struct net_device
*dev
, u16 offset
,
2350 const unsigned long *possible_mask
= NULL
;
2351 struct xps_dev_maps
*dev_maps
;
2352 unsigned int nr_ids
;
2354 if (!static_key_false(&xps_needed
))
2358 mutex_lock(&xps_map_mutex
);
2360 if (static_key_false(&xps_rxqs_needed
)) {
2361 dev_maps
= xmap_dereference(dev
->xps_rxqs_map
);
2363 nr_ids
= dev
->num_rx_queues
;
2364 clean_xps_maps(dev
, possible_mask
, dev_maps
, nr_ids
,
2365 offset
, count
, true);
2369 dev_maps
= xmap_dereference(dev
->xps_cpus_map
);
2373 if (num_possible_cpus() > 1)
2374 possible_mask
= cpumask_bits(cpu_possible_mask
);
2375 nr_ids
= nr_cpu_ids
;
2376 clean_xps_maps(dev
, possible_mask
, dev_maps
, nr_ids
, offset
, count
,
2380 mutex_unlock(&xps_map_mutex
);
2384 static void netif_reset_xps_queues_gt(struct net_device
*dev
, u16 index
)
2386 netif_reset_xps_queues(dev
, index
, dev
->num_tx_queues
- index
);
2389 static struct xps_map
*expand_xps_map(struct xps_map
*map
, int attr_index
,
2390 u16 index
, bool is_rxqs_map
)
2392 struct xps_map
*new_map
;
2393 int alloc_len
= XPS_MIN_MAP_ALLOC
;
2396 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
2397 if (map
->queues
[pos
] != index
)
2402 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2404 if (pos
< map
->alloc_len
)
2407 alloc_len
= map
->alloc_len
* 2;
2410 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2414 new_map
= kzalloc(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
);
2416 new_map
= kzalloc_node(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
,
2417 cpu_to_node(attr_index
));
2421 for (i
= 0; i
< pos
; i
++)
2422 new_map
->queues
[i
] = map
->queues
[i
];
2423 new_map
->alloc_len
= alloc_len
;
2429 /* Must be called under cpus_read_lock */
2430 int __netif_set_xps_queue(struct net_device
*dev
, const unsigned long *mask
,
2431 u16 index
, bool is_rxqs_map
)
2433 const unsigned long *online_mask
= NULL
, *possible_mask
= NULL
;
2434 struct xps_dev_maps
*dev_maps
, *new_dev_maps
= NULL
;
2435 int i
, j
, tci
, numa_node_id
= -2;
2436 int maps_sz
, num_tc
= 1, tc
= 0;
2437 struct xps_map
*map
, *new_map
;
2438 bool active
= false;
2439 unsigned int nr_ids
;
2442 /* Do not allow XPS on subordinate device directly */
2443 num_tc
= dev
->num_tc
;
2447 /* If queue belongs to subordinate dev use its map */
2448 dev
= netdev_get_tx_queue(dev
, index
)->sb_dev
? : dev
;
2450 tc
= netdev_txq_to_tc(dev
, index
);
2455 mutex_lock(&xps_map_mutex
);
2457 maps_sz
= XPS_RXQ_DEV_MAPS_SIZE(num_tc
, dev
->num_rx_queues
);
2458 dev_maps
= xmap_dereference(dev
->xps_rxqs_map
);
2459 nr_ids
= dev
->num_rx_queues
;
2461 maps_sz
= XPS_CPU_DEV_MAPS_SIZE(num_tc
);
2462 if (num_possible_cpus() > 1) {
2463 online_mask
= cpumask_bits(cpu_online_mask
);
2464 possible_mask
= cpumask_bits(cpu_possible_mask
);
2466 dev_maps
= xmap_dereference(dev
->xps_cpus_map
);
2467 nr_ids
= nr_cpu_ids
;
2470 if (maps_sz
< L1_CACHE_BYTES
)
2471 maps_sz
= L1_CACHE_BYTES
;
2473 /* allocate memory for queue storage */
2474 for (j
= -1; j
= netif_attrmask_next_and(j
, online_mask
, mask
, nr_ids
),
2477 new_dev_maps
= kzalloc(maps_sz
, GFP_KERNEL
);
2478 if (!new_dev_maps
) {
2479 mutex_unlock(&xps_map_mutex
);
2483 tci
= j
* num_tc
+ tc
;
2484 map
= dev_maps
? xmap_dereference(dev_maps
->attr_map
[tci
]) :
2487 map
= expand_xps_map(map
, j
, index
, is_rxqs_map
);
2491 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2495 goto out_no_new_maps
;
2498 /* Increment static keys at most once per type */
2499 static_key_slow_inc_cpuslocked(&xps_needed
);
2501 static_key_slow_inc_cpuslocked(&xps_rxqs_needed
);
2504 for (j
= -1; j
= netif_attrmask_next(j
, possible_mask
, nr_ids
),
2506 /* copy maps belonging to foreign traffic classes */
2507 for (i
= tc
, tci
= j
* num_tc
; dev_maps
&& i
--; tci
++) {
2508 /* fill in the new device map from the old device map */
2509 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2510 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2513 /* We need to explicitly update tci as prevous loop
2514 * could break out early if dev_maps is NULL.
2516 tci
= j
* num_tc
+ tc
;
2518 if (netif_attr_test_mask(j
, mask
, nr_ids
) &&
2519 netif_attr_test_online(j
, online_mask
, nr_ids
)) {
2520 /* add tx-queue to CPU/rx-queue maps */
2523 map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2524 while ((pos
< map
->len
) && (map
->queues
[pos
] != index
))
2527 if (pos
== map
->len
)
2528 map
->queues
[map
->len
++] = index
;
2531 if (numa_node_id
== -2)
2532 numa_node_id
= cpu_to_node(j
);
2533 else if (numa_node_id
!= cpu_to_node(j
))
2537 } else if (dev_maps
) {
2538 /* fill in the new device map from the old device map */
2539 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2540 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2543 /* copy maps belonging to foreign traffic classes */
2544 for (i
= num_tc
- tc
, tci
++; dev_maps
&& --i
; tci
++) {
2545 /* fill in the new device map from the old device map */
2546 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2547 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2552 rcu_assign_pointer(dev
->xps_rxqs_map
, new_dev_maps
);
2554 rcu_assign_pointer(dev
->xps_cpus_map
, new_dev_maps
);
2556 /* Cleanup old maps */
2558 goto out_no_old_maps
;
2560 for (j
= -1; j
= netif_attrmask_next(j
, possible_mask
, nr_ids
),
2562 for (i
= num_tc
, tci
= j
* num_tc
; i
--; tci
++) {
2563 new_map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2564 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2565 if (map
&& map
!= new_map
)
2566 kfree_rcu(map
, rcu
);
2570 kfree_rcu(dev_maps
, rcu
);
2573 dev_maps
= new_dev_maps
;
2578 /* update Tx queue numa node */
2579 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, index
),
2580 (numa_node_id
>= 0) ?
2581 numa_node_id
: NUMA_NO_NODE
);
2587 /* removes tx-queue from unused CPUs/rx-queues */
2588 for (j
= -1; j
= netif_attrmask_next(j
, possible_mask
, nr_ids
),
2590 for (i
= tc
, tci
= j
* num_tc
; i
--; tci
++)
2591 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2592 if (!netif_attr_test_mask(j
, mask
, nr_ids
) ||
2593 !netif_attr_test_online(j
, online_mask
, nr_ids
))
2594 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2595 for (i
= num_tc
- tc
, tci
++; --i
; tci
++)
2596 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2599 /* free map if not active */
2601 reset_xps_maps(dev
, dev_maps
, is_rxqs_map
);
2604 mutex_unlock(&xps_map_mutex
);
2608 /* remove any maps that we added */
2609 for (j
= -1; j
= netif_attrmask_next(j
, possible_mask
, nr_ids
),
2611 for (i
= num_tc
, tci
= j
* num_tc
; i
--; tci
++) {
2612 new_map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2614 xmap_dereference(dev_maps
->attr_map
[tci
]) :
2616 if (new_map
&& new_map
!= map
)
2621 mutex_unlock(&xps_map_mutex
);
2623 kfree(new_dev_maps
);
2626 EXPORT_SYMBOL_GPL(__netif_set_xps_queue
);
2628 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
2634 ret
= __netif_set_xps_queue(dev
, cpumask_bits(mask
), index
, false);
2639 EXPORT_SYMBOL(netif_set_xps_queue
);
2642 static void netdev_unbind_all_sb_channels(struct net_device
*dev
)
2644 struct netdev_queue
*txq
= &dev
->_tx
[dev
->num_tx_queues
];
2646 /* Unbind any subordinate channels */
2647 while (txq
-- != &dev
->_tx
[0]) {
2649 netdev_unbind_sb_channel(dev
, txq
->sb_dev
);
2653 void netdev_reset_tc(struct net_device
*dev
)
2656 netif_reset_xps_queues_gt(dev
, 0);
2658 netdev_unbind_all_sb_channels(dev
);
2660 /* Reset TC configuration of device */
2662 memset(dev
->tc_to_txq
, 0, sizeof(dev
->tc_to_txq
));
2663 memset(dev
->prio_tc_map
, 0, sizeof(dev
->prio_tc_map
));
2665 EXPORT_SYMBOL(netdev_reset_tc
);
2667 int netdev_set_tc_queue(struct net_device
*dev
, u8 tc
, u16 count
, u16 offset
)
2669 if (tc
>= dev
->num_tc
)
2673 netif_reset_xps_queues(dev
, offset
, count
);
2675 dev
->tc_to_txq
[tc
].count
= count
;
2676 dev
->tc_to_txq
[tc
].offset
= offset
;
2679 EXPORT_SYMBOL(netdev_set_tc_queue
);
2681 int netdev_set_num_tc(struct net_device
*dev
, u8 num_tc
)
2683 if (num_tc
> TC_MAX_QUEUE
)
2687 netif_reset_xps_queues_gt(dev
, 0);
2689 netdev_unbind_all_sb_channels(dev
);
2691 dev
->num_tc
= num_tc
;
2694 EXPORT_SYMBOL(netdev_set_num_tc
);
2696 void netdev_unbind_sb_channel(struct net_device
*dev
,
2697 struct net_device
*sb_dev
)
2699 struct netdev_queue
*txq
= &dev
->_tx
[dev
->num_tx_queues
];
2702 netif_reset_xps_queues_gt(sb_dev
, 0);
2704 memset(sb_dev
->tc_to_txq
, 0, sizeof(sb_dev
->tc_to_txq
));
2705 memset(sb_dev
->prio_tc_map
, 0, sizeof(sb_dev
->prio_tc_map
));
2707 while (txq
-- != &dev
->_tx
[0]) {
2708 if (txq
->sb_dev
== sb_dev
)
2712 EXPORT_SYMBOL(netdev_unbind_sb_channel
);
2714 int netdev_bind_sb_channel_queue(struct net_device
*dev
,
2715 struct net_device
*sb_dev
,
2716 u8 tc
, u16 count
, u16 offset
)
2718 /* Make certain the sb_dev and dev are already configured */
2719 if (sb_dev
->num_tc
>= 0 || tc
>= dev
->num_tc
)
2722 /* We cannot hand out queues we don't have */
2723 if ((offset
+ count
) > dev
->real_num_tx_queues
)
2726 /* Record the mapping */
2727 sb_dev
->tc_to_txq
[tc
].count
= count
;
2728 sb_dev
->tc_to_txq
[tc
].offset
= offset
;
2730 /* Provide a way for Tx queue to find the tc_to_txq map or
2731 * XPS map for itself.
2734 netdev_get_tx_queue(dev
, count
+ offset
)->sb_dev
= sb_dev
;
2738 EXPORT_SYMBOL(netdev_bind_sb_channel_queue
);
2740 int netdev_set_sb_channel(struct net_device
*dev
, u16 channel
)
2742 /* Do not use a multiqueue device to represent a subordinate channel */
2743 if (netif_is_multiqueue(dev
))
2746 /* We allow channels 1 - 32767 to be used for subordinate channels.
2747 * Channel 0 is meant to be "native" mode and used only to represent
2748 * the main root device. We allow writing 0 to reset the device back
2749 * to normal mode after being used as a subordinate channel.
2751 if (channel
> S16_MAX
)
2754 dev
->num_tc
= -channel
;
2758 EXPORT_SYMBOL(netdev_set_sb_channel
);
2761 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2762 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2764 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
)
2769 disabling
= txq
< dev
->real_num_tx_queues
;
2771 if (txq
< 1 || txq
> dev
->num_tx_queues
)
2774 if (dev
->reg_state
== NETREG_REGISTERED
||
2775 dev
->reg_state
== NETREG_UNREGISTERING
) {
2778 rc
= netdev_queue_update_kobjects(dev
, dev
->real_num_tx_queues
,
2784 netif_setup_tc(dev
, txq
);
2786 dev
->real_num_tx_queues
= txq
;
2790 qdisc_reset_all_tx_gt(dev
, txq
);
2792 netif_reset_xps_queues_gt(dev
, txq
);
2796 dev
->real_num_tx_queues
= txq
;
2801 EXPORT_SYMBOL(netif_set_real_num_tx_queues
);
2805 * netif_set_real_num_rx_queues - set actual number of RX queues used
2806 * @dev: Network device
2807 * @rxq: Actual number of RX queues
2809 * This must be called either with the rtnl_lock held or before
2810 * registration of the net device. Returns 0 on success, or a
2811 * negative error code. If called before registration, it always
2814 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
)
2818 if (rxq
< 1 || rxq
> dev
->num_rx_queues
)
2821 if (dev
->reg_state
== NETREG_REGISTERED
) {
2824 rc
= net_rx_queue_update_kobjects(dev
, dev
->real_num_rx_queues
,
2830 dev
->real_num_rx_queues
= rxq
;
2833 EXPORT_SYMBOL(netif_set_real_num_rx_queues
);
2837 * netif_get_num_default_rss_queues - default number of RSS queues
2839 * This routine should set an upper limit on the number of RSS queues
2840 * used by default by multiqueue devices.
2842 int netif_get_num_default_rss_queues(void)
2844 return is_kdump_kernel() ?
2845 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES
, num_online_cpus());
2847 EXPORT_SYMBOL(netif_get_num_default_rss_queues
);
2849 static void __netif_reschedule(struct Qdisc
*q
)
2851 struct softnet_data
*sd
;
2852 unsigned long flags
;
2854 local_irq_save(flags
);
2855 sd
= this_cpu_ptr(&softnet_data
);
2856 q
->next_sched
= NULL
;
2857 *sd
->output_queue_tailp
= q
;
2858 sd
->output_queue_tailp
= &q
->next_sched
;
2859 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2860 local_irq_restore(flags
);
2863 void __netif_schedule(struct Qdisc
*q
)
2865 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
2866 __netif_reschedule(q
);
2868 EXPORT_SYMBOL(__netif_schedule
);
2870 struct dev_kfree_skb_cb
{
2871 enum skb_free_reason reason
;
2874 static struct dev_kfree_skb_cb
*get_kfree_skb_cb(const struct sk_buff
*skb
)
2876 return (struct dev_kfree_skb_cb
*)skb
->cb
;
2879 void netif_schedule_queue(struct netdev_queue
*txq
)
2882 if (!netif_xmit_stopped(txq
)) {
2883 struct Qdisc
*q
= rcu_dereference(txq
->qdisc
);
2885 __netif_schedule(q
);
2889 EXPORT_SYMBOL(netif_schedule_queue
);
2891 void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
2893 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
)) {
2897 q
= rcu_dereference(dev_queue
->qdisc
);
2898 __netif_schedule(q
);
2902 EXPORT_SYMBOL(netif_tx_wake_queue
);
2904 void __dev_kfree_skb_irq(struct sk_buff
*skb
, enum skb_free_reason reason
)
2906 unsigned long flags
;
2911 if (likely(refcount_read(&skb
->users
) == 1)) {
2913 refcount_set(&skb
->users
, 0);
2914 } else if (likely(!refcount_dec_and_test(&skb
->users
))) {
2917 get_kfree_skb_cb(skb
)->reason
= reason
;
2918 local_irq_save(flags
);
2919 skb
->next
= __this_cpu_read(softnet_data
.completion_queue
);
2920 __this_cpu_write(softnet_data
.completion_queue
, skb
);
2921 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2922 local_irq_restore(flags
);
2924 EXPORT_SYMBOL(__dev_kfree_skb_irq
);
2926 void __dev_kfree_skb_any(struct sk_buff
*skb
, enum skb_free_reason reason
)
2928 if (in_irq() || irqs_disabled())
2929 __dev_kfree_skb_irq(skb
, reason
);
2933 EXPORT_SYMBOL(__dev_kfree_skb_any
);
2937 * netif_device_detach - mark device as removed
2938 * @dev: network device
2940 * Mark device as removed from system and therefore no longer available.
2942 void netif_device_detach(struct net_device
*dev
)
2944 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2945 netif_running(dev
)) {
2946 netif_tx_stop_all_queues(dev
);
2949 EXPORT_SYMBOL(netif_device_detach
);
2952 * netif_device_attach - mark device as attached
2953 * @dev: network device
2955 * Mark device as attached from system and restart if needed.
2957 void netif_device_attach(struct net_device
*dev
)
2959 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2960 netif_running(dev
)) {
2961 netif_tx_wake_all_queues(dev
);
2962 __netdev_watchdog_up(dev
);
2965 EXPORT_SYMBOL(netif_device_attach
);
2968 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2969 * to be used as a distribution range.
2971 static u16
skb_tx_hash(const struct net_device
*dev
,
2972 const struct net_device
*sb_dev
,
2973 struct sk_buff
*skb
)
2977 u16 qcount
= dev
->real_num_tx_queues
;
2980 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
2982 qoffset
= sb_dev
->tc_to_txq
[tc
].offset
;
2983 qcount
= sb_dev
->tc_to_txq
[tc
].count
;
2986 if (skb_rx_queue_recorded(skb
)) {
2987 hash
= skb_get_rx_queue(skb
);
2988 while (unlikely(hash
>= qcount
))
2990 return hash
+ qoffset
;
2993 return (u16
) reciprocal_scale(skb_get_hash(skb
), qcount
) + qoffset
;
2996 static void skb_warn_bad_offload(const struct sk_buff
*skb
)
2998 static const netdev_features_t null_features
;
2999 struct net_device
*dev
= skb
->dev
;
3000 const char *name
= "";
3002 if (!net_ratelimit())
3006 if (dev
->dev
.parent
)
3007 name
= dev_driver_string(dev
->dev
.parent
);
3009 name
= netdev_name(dev
);
3011 skb_dump(KERN_WARNING
, skb
, false);
3012 WARN(1, "%s: caps=(%pNF, %pNF)\n",
3013 name
, dev
? &dev
->features
: &null_features
,
3014 skb
->sk
? &skb
->sk
->sk_route_caps
: &null_features
);
3018 * Invalidate hardware checksum when packet is to be mangled, and
3019 * complete checksum manually on outgoing path.
3021 int skb_checksum_help(struct sk_buff
*skb
)
3024 int ret
= 0, offset
;
3026 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
3027 goto out_set_summed
;
3029 if (unlikely(skb_shinfo(skb
)->gso_size
)) {
3030 skb_warn_bad_offload(skb
);
3034 /* Before computing a checksum, we should make sure no frag could
3035 * be modified by an external entity : checksum could be wrong.
3037 if (skb_has_shared_frag(skb
)) {
3038 ret
= __skb_linearize(skb
);
3043 offset
= skb_checksum_start_offset(skb
);
3044 BUG_ON(offset
>= skb_headlen(skb
));
3045 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
3047 offset
+= skb
->csum_offset
;
3048 BUG_ON(offset
+ sizeof(__sum16
) > skb_headlen(skb
));
3050 if (skb_cloned(skb
) &&
3051 !skb_clone_writable(skb
, offset
+ sizeof(__sum16
))) {
3052 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
3057 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
) ?: CSUM_MANGLED_0
;
3059 skb
->ip_summed
= CHECKSUM_NONE
;
3063 EXPORT_SYMBOL(skb_checksum_help
);
3065 int skb_crc32c_csum_help(struct sk_buff
*skb
)
3068 int ret
= 0, offset
, start
;
3070 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3073 if (unlikely(skb_is_gso(skb
)))
3076 /* Before computing a checksum, we should make sure no frag could
3077 * be modified by an external entity : checksum could be wrong.
3079 if (unlikely(skb_has_shared_frag(skb
))) {
3080 ret
= __skb_linearize(skb
);
3084 start
= skb_checksum_start_offset(skb
);
3085 offset
= start
+ offsetof(struct sctphdr
, checksum
);
3086 if (WARN_ON_ONCE(offset
>= skb_headlen(skb
))) {
3090 if (skb_cloned(skb
) &&
3091 !skb_clone_writable(skb
, offset
+ sizeof(__le32
))) {
3092 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
3096 crc32c_csum
= cpu_to_le32(~__skb_checksum(skb
, start
,
3097 skb
->len
- start
, ~(__u32
)0,
3099 *(__le32
*)(skb
->data
+ offset
) = crc32c_csum
;
3100 skb
->ip_summed
= CHECKSUM_NONE
;
3101 skb
->csum_not_inet
= 0;
3106 __be16
skb_network_protocol(struct sk_buff
*skb
, int *depth
)
3108 __be16 type
= skb
->protocol
;
3110 /* Tunnel gso handlers can set protocol to ethernet. */
3111 if (type
== htons(ETH_P_TEB
)) {
3114 if (unlikely(!pskb_may_pull(skb
, sizeof(struct ethhdr
))))
3117 eth
= (struct ethhdr
*)skb
->data
;
3118 type
= eth
->h_proto
;
3121 return __vlan_get_protocol(skb
, type
, depth
);
3125 * skb_mac_gso_segment - mac layer segmentation handler.
3126 * @skb: buffer to segment
3127 * @features: features for the output path (see dev->features)
3129 struct sk_buff
*skb_mac_gso_segment(struct sk_buff
*skb
,
3130 netdev_features_t features
)
3132 struct sk_buff
*segs
= ERR_PTR(-EPROTONOSUPPORT
);
3133 struct packet_offload
*ptype
;
3134 int vlan_depth
= skb
->mac_len
;
3135 __be16 type
= skb_network_protocol(skb
, &vlan_depth
);
3137 if (unlikely(!type
))
3138 return ERR_PTR(-EINVAL
);
3140 __skb_pull(skb
, vlan_depth
);
3143 list_for_each_entry_rcu(ptype
, &offload_base
, list
) {
3144 if (ptype
->type
== type
&& ptype
->callbacks
.gso_segment
) {
3145 segs
= ptype
->callbacks
.gso_segment(skb
, features
);
3151 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
3155 EXPORT_SYMBOL(skb_mac_gso_segment
);
3158 /* openvswitch calls this on rx path, so we need a different check.
3160 static inline bool skb_needs_check(struct sk_buff
*skb
, bool tx_path
)
3163 return skb
->ip_summed
!= CHECKSUM_PARTIAL
&&
3164 skb
->ip_summed
!= CHECKSUM_UNNECESSARY
;
3166 return skb
->ip_summed
== CHECKSUM_NONE
;
3170 * __skb_gso_segment - Perform segmentation on skb.
3171 * @skb: buffer to segment
3172 * @features: features for the output path (see dev->features)
3173 * @tx_path: whether it is called in TX path
3175 * This function segments the given skb and returns a list of segments.
3177 * It may return NULL if the skb requires no segmentation. This is
3178 * only possible when GSO is used for verifying header integrity.
3180 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
3182 struct sk_buff
*__skb_gso_segment(struct sk_buff
*skb
,
3183 netdev_features_t features
, bool tx_path
)
3185 struct sk_buff
*segs
;
3187 if (unlikely(skb_needs_check(skb
, tx_path
))) {
3190 /* We're going to init ->check field in TCP or UDP header */
3191 err
= skb_cow_head(skb
, 0);
3193 return ERR_PTR(err
);
3196 /* Only report GSO partial support if it will enable us to
3197 * support segmentation on this frame without needing additional
3200 if (features
& NETIF_F_GSO_PARTIAL
) {
3201 netdev_features_t partial_features
= NETIF_F_GSO_ROBUST
;
3202 struct net_device
*dev
= skb
->dev
;
3204 partial_features
|= dev
->features
& dev
->gso_partial_features
;
3205 if (!skb_gso_ok(skb
, features
| partial_features
))
3206 features
&= ~NETIF_F_GSO_PARTIAL
;
3209 BUILD_BUG_ON(SKB_SGO_CB_OFFSET
+
3210 sizeof(*SKB_GSO_CB(skb
)) > sizeof(skb
->cb
));
3212 SKB_GSO_CB(skb
)->mac_offset
= skb_headroom(skb
);
3213 SKB_GSO_CB(skb
)->encap_level
= 0;
3215 skb_reset_mac_header(skb
);
3216 skb_reset_mac_len(skb
);
3218 segs
= skb_mac_gso_segment(skb
, features
);
3220 if (unlikely(skb_needs_check(skb
, tx_path
) && !IS_ERR(segs
)))
3221 skb_warn_bad_offload(skb
);
3225 EXPORT_SYMBOL(__skb_gso_segment
);
3227 /* Take action when hardware reception checksum errors are detected. */
3229 void netdev_rx_csum_fault(struct net_device
*dev
, struct sk_buff
*skb
)
3231 if (net_ratelimit()) {
3232 pr_err("%s: hw csum failure\n", dev
? dev
->name
: "<unknown>");
3233 skb_dump(KERN_ERR
, skb
, true);
3237 EXPORT_SYMBOL(netdev_rx_csum_fault
);
3240 /* XXX: check that highmem exists at all on the given machine. */
3241 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
3243 #ifdef CONFIG_HIGHMEM
3246 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
3247 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3248 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3250 if (PageHighMem(skb_frag_page(frag
)))
3258 /* If MPLS offload request, verify we are testing hardware MPLS features
3259 * instead of standard features for the netdev.
3261 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3262 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
3263 netdev_features_t features
,
3266 if (eth_p_mpls(type
))
3267 features
&= skb
->dev
->mpls_features
;
3272 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
3273 netdev_features_t features
,
3280 static netdev_features_t
harmonize_features(struct sk_buff
*skb
,
3281 netdev_features_t features
)
3286 type
= skb_network_protocol(skb
, &tmp
);
3287 features
= net_mpls_features(skb
, features
, type
);
3289 if (skb
->ip_summed
!= CHECKSUM_NONE
&&
3290 !can_checksum_protocol(features
, type
)) {
3291 features
&= ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
3293 if (illegal_highdma(skb
->dev
, skb
))
3294 features
&= ~NETIF_F_SG
;
3299 netdev_features_t
passthru_features_check(struct sk_buff
*skb
,
3300 struct net_device
*dev
,
3301 netdev_features_t features
)
3305 EXPORT_SYMBOL(passthru_features_check
);
3307 static netdev_features_t
dflt_features_check(struct sk_buff
*skb
,
3308 struct net_device
*dev
,
3309 netdev_features_t features
)
3311 return vlan_features_check(skb
, features
);
3314 static netdev_features_t
gso_features_check(const struct sk_buff
*skb
,
3315 struct net_device
*dev
,
3316 netdev_features_t features
)
3318 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
;
3320 if (gso_segs
> dev
->gso_max_segs
)
3321 return features
& ~NETIF_F_GSO_MASK
;
3323 /* Support for GSO partial features requires software
3324 * intervention before we can actually process the packets
3325 * so we need to strip support for any partial features now
3326 * and we can pull them back in after we have partially
3327 * segmented the frame.
3329 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
))
3330 features
&= ~dev
->gso_partial_features
;
3332 /* Make sure to clear the IPv4 ID mangling feature if the
3333 * IPv4 header has the potential to be fragmented.
3335 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
3336 struct iphdr
*iph
= skb
->encapsulation
?
3337 inner_ip_hdr(skb
) : ip_hdr(skb
);
3339 if (!(iph
->frag_off
& htons(IP_DF
)))
3340 features
&= ~NETIF_F_TSO_MANGLEID
;
3346 netdev_features_t
netif_skb_features(struct sk_buff
*skb
)
3348 struct net_device
*dev
= skb
->dev
;
3349 netdev_features_t features
= dev
->features
;
3351 if (skb_is_gso(skb
))
3352 features
= gso_features_check(skb
, dev
, features
);
3354 /* If encapsulation offload request, verify we are testing
3355 * hardware encapsulation features instead of standard
3356 * features for the netdev
3358 if (skb
->encapsulation
)
3359 features
&= dev
->hw_enc_features
;
3361 if (skb_vlan_tagged(skb
))
3362 features
= netdev_intersect_features(features
,
3363 dev
->vlan_features
|
3364 NETIF_F_HW_VLAN_CTAG_TX
|
3365 NETIF_F_HW_VLAN_STAG_TX
);
3367 if (dev
->netdev_ops
->ndo_features_check
)
3368 features
&= dev
->netdev_ops
->ndo_features_check(skb
, dev
,
3371 features
&= dflt_features_check(skb
, dev
, features
);
3373 return harmonize_features(skb
, features
);
3375 EXPORT_SYMBOL(netif_skb_features
);
3377 static int xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
3378 struct netdev_queue
*txq
, bool more
)
3383 if (dev_nit_active(dev
))
3384 dev_queue_xmit_nit(skb
, dev
);
3387 trace_net_dev_start_xmit(skb
, dev
);
3388 rc
= netdev_start_xmit(skb
, dev
, txq
, more
);
3389 trace_net_dev_xmit(skb
, rc
, dev
, len
);
3394 struct sk_buff
*dev_hard_start_xmit(struct sk_buff
*first
, struct net_device
*dev
,
3395 struct netdev_queue
*txq
, int *ret
)
3397 struct sk_buff
*skb
= first
;
3398 int rc
= NETDEV_TX_OK
;
3401 struct sk_buff
*next
= skb
->next
;
3403 skb_mark_not_on_list(skb
);
3404 rc
= xmit_one(skb
, dev
, txq
, next
!= NULL
);
3405 if (unlikely(!dev_xmit_complete(rc
))) {
3411 if (netif_tx_queue_stopped(txq
) && skb
) {
3412 rc
= NETDEV_TX_BUSY
;
3422 static struct sk_buff
*validate_xmit_vlan(struct sk_buff
*skb
,
3423 netdev_features_t features
)
3425 if (skb_vlan_tag_present(skb
) &&
3426 !vlan_hw_offload_capable(features
, skb
->vlan_proto
))
3427 skb
= __vlan_hwaccel_push_inside(skb
);
3431 int skb_csum_hwoffload_help(struct sk_buff
*skb
,
3432 const netdev_features_t features
)
3434 if (unlikely(skb
->csum_not_inet
))
3435 return !!(features
& NETIF_F_SCTP_CRC
) ? 0 :
3436 skb_crc32c_csum_help(skb
);
3438 return !!(features
& NETIF_F_CSUM_MASK
) ? 0 : skb_checksum_help(skb
);
3440 EXPORT_SYMBOL(skb_csum_hwoffload_help
);
3442 static struct sk_buff
*validate_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3444 netdev_features_t features
;
3446 features
= netif_skb_features(skb
);
3447 skb
= validate_xmit_vlan(skb
, features
);
3451 skb
= sk_validate_xmit_skb(skb
, dev
);
3455 if (netif_needs_gso(skb
, features
)) {
3456 struct sk_buff
*segs
;
3458 segs
= skb_gso_segment(skb
, features
);
3466 if (skb_needs_linearize(skb
, features
) &&
3467 __skb_linearize(skb
))
3470 /* If packet is not checksummed and device does not
3471 * support checksumming for this protocol, complete
3472 * checksumming here.
3474 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3475 if (skb
->encapsulation
)
3476 skb_set_inner_transport_header(skb
,
3477 skb_checksum_start_offset(skb
));
3479 skb_set_transport_header(skb
,
3480 skb_checksum_start_offset(skb
));
3481 if (skb_csum_hwoffload_help(skb
, features
))
3486 skb
= validate_xmit_xfrm(skb
, features
, again
);
3493 atomic_long_inc(&dev
->tx_dropped
);
3497 struct sk_buff
*validate_xmit_skb_list(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3499 struct sk_buff
*next
, *head
= NULL
, *tail
;
3501 for (; skb
!= NULL
; skb
= next
) {
3503 skb_mark_not_on_list(skb
);
3505 /* in case skb wont be segmented, point to itself */
3508 skb
= validate_xmit_skb(skb
, dev
, again
);
3516 /* If skb was segmented, skb->prev points to
3517 * the last segment. If not, it still contains skb.
3523 EXPORT_SYMBOL_GPL(validate_xmit_skb_list
);
3525 static void qdisc_pkt_len_init(struct sk_buff
*skb
)
3527 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
3529 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
3531 /* To get more precise estimation of bytes sent on wire,
3532 * we add to pkt_len the headers size of all segments
3534 if (shinfo
->gso_size
&& skb_transport_header_was_set(skb
)) {
3535 unsigned int hdr_len
;
3536 u16 gso_segs
= shinfo
->gso_segs
;
3538 /* mac layer + network layer */
3539 hdr_len
= skb_transport_header(skb
) - skb_mac_header(skb
);
3541 /* + transport layer */
3542 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))) {
3543 const struct tcphdr
*th
;
3544 struct tcphdr _tcphdr
;
3546 th
= skb_header_pointer(skb
, skb_transport_offset(skb
),
3547 sizeof(_tcphdr
), &_tcphdr
);
3549 hdr_len
+= __tcp_hdrlen(th
);
3551 struct udphdr _udphdr
;
3553 if (skb_header_pointer(skb
, skb_transport_offset(skb
),
3554 sizeof(_udphdr
), &_udphdr
))
3555 hdr_len
+= sizeof(struct udphdr
);
3558 if (shinfo
->gso_type
& SKB_GSO_DODGY
)
3559 gso_segs
= DIV_ROUND_UP(skb
->len
- hdr_len
,
3562 qdisc_skb_cb(skb
)->pkt_len
+= (gso_segs
- 1) * hdr_len
;
3566 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
3567 struct net_device
*dev
,
3568 struct netdev_queue
*txq
)
3570 spinlock_t
*root_lock
= qdisc_lock(q
);
3571 struct sk_buff
*to_free
= NULL
;
3575 qdisc_calculate_pkt_len(skb
, q
);
3577 if (q
->flags
& TCQ_F_NOLOCK
) {
3578 if ((q
->flags
& TCQ_F_CAN_BYPASS
) && q
->empty
&&
3579 qdisc_run_begin(q
)) {
3580 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
,
3582 __qdisc_drop(skb
, &to_free
);
3586 qdisc_bstats_cpu_update(q
, skb
);
3588 rc
= NET_XMIT_SUCCESS
;
3589 if (sch_direct_xmit(skb
, q
, dev
, txq
, NULL
, true))
3595 rc
= q
->enqueue(skb
, q
, &to_free
) & NET_XMIT_MASK
;
3599 if (unlikely(to_free
))
3600 kfree_skb_list(to_free
);
3605 * Heuristic to force contended enqueues to serialize on a
3606 * separate lock before trying to get qdisc main lock.
3607 * This permits qdisc->running owner to get the lock more
3608 * often and dequeue packets faster.
3610 contended
= qdisc_is_running(q
);
3611 if (unlikely(contended
))
3612 spin_lock(&q
->busylock
);
3614 spin_lock(root_lock
);
3615 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
3616 __qdisc_drop(skb
, &to_free
);
3618 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
3619 qdisc_run_begin(q
)) {
3621 * This is a work-conserving queue; there are no old skbs
3622 * waiting to be sent out; and the qdisc is not running -
3623 * xmit the skb directly.
3626 qdisc_bstats_update(q
, skb
);
3628 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, true)) {
3629 if (unlikely(contended
)) {
3630 spin_unlock(&q
->busylock
);
3637 rc
= NET_XMIT_SUCCESS
;
3639 rc
= q
->enqueue(skb
, q
, &to_free
) & NET_XMIT_MASK
;
3640 if (qdisc_run_begin(q
)) {
3641 if (unlikely(contended
)) {
3642 spin_unlock(&q
->busylock
);
3649 spin_unlock(root_lock
);
3650 if (unlikely(to_free
))
3651 kfree_skb_list(to_free
);
3652 if (unlikely(contended
))
3653 spin_unlock(&q
->busylock
);
3657 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3658 static void skb_update_prio(struct sk_buff
*skb
)
3660 const struct netprio_map
*map
;
3661 const struct sock
*sk
;
3662 unsigned int prioidx
;
3666 map
= rcu_dereference_bh(skb
->dev
->priomap
);
3669 sk
= skb_to_full_sk(skb
);
3673 prioidx
= sock_cgroup_prioidx(&sk
->sk_cgrp_data
);
3675 if (prioidx
< map
->priomap_len
)
3676 skb
->priority
= map
->priomap
[prioidx
];
3679 #define skb_update_prio(skb)
3683 * dev_loopback_xmit - loop back @skb
3684 * @net: network namespace this loopback is happening in
3685 * @sk: sk needed to be a netfilter okfn
3686 * @skb: buffer to transmit
3688 int dev_loopback_xmit(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
3690 skb_reset_mac_header(skb
);
3691 __skb_pull(skb
, skb_network_offset(skb
));
3692 skb
->pkt_type
= PACKET_LOOPBACK
;
3693 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3694 WARN_ON(!skb_dst(skb
));
3699 EXPORT_SYMBOL(dev_loopback_xmit
);
3701 #ifdef CONFIG_NET_EGRESS
3702 static struct sk_buff
*
3703 sch_handle_egress(struct sk_buff
*skb
, int *ret
, struct net_device
*dev
)
3705 struct mini_Qdisc
*miniq
= rcu_dereference_bh(dev
->miniq_egress
);
3706 struct tcf_result cl_res
;
3711 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3712 mini_qdisc_bstats_cpu_update(miniq
, skb
);
3714 switch (tcf_classify(skb
, miniq
->filter_list
, &cl_res
, false)) {
3716 case TC_ACT_RECLASSIFY
:
3717 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
3720 mini_qdisc_qstats_cpu_drop(miniq
);
3721 *ret
= NET_XMIT_DROP
;
3727 *ret
= NET_XMIT_SUCCESS
;
3730 case TC_ACT_REDIRECT
:
3731 /* No need to push/pop skb's mac_header here on egress! */
3732 skb_do_redirect(skb
);
3733 *ret
= NET_XMIT_SUCCESS
;
3741 #endif /* CONFIG_NET_EGRESS */
3744 static int __get_xps_queue_idx(struct net_device
*dev
, struct sk_buff
*skb
,
3745 struct xps_dev_maps
*dev_maps
, unsigned int tci
)
3747 struct xps_map
*map
;
3748 int queue_index
= -1;
3752 tci
+= netdev_get_prio_tc_map(dev
, skb
->priority
);
3755 map
= rcu_dereference(dev_maps
->attr_map
[tci
]);
3758 queue_index
= map
->queues
[0];
3760 queue_index
= map
->queues
[reciprocal_scale(
3761 skb_get_hash(skb
), map
->len
)];
3762 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
3769 static int get_xps_queue(struct net_device
*dev
, struct net_device
*sb_dev
,
3770 struct sk_buff
*skb
)
3773 struct xps_dev_maps
*dev_maps
;
3774 struct sock
*sk
= skb
->sk
;
3775 int queue_index
= -1;
3777 if (!static_key_false(&xps_needed
))
3781 if (!static_key_false(&xps_rxqs_needed
))
3784 dev_maps
= rcu_dereference(sb_dev
->xps_rxqs_map
);
3786 int tci
= sk_rx_queue_get(sk
);
3788 if (tci
>= 0 && tci
< dev
->num_rx_queues
)
3789 queue_index
= __get_xps_queue_idx(dev
, skb
, dev_maps
,
3794 if (queue_index
< 0) {
3795 dev_maps
= rcu_dereference(sb_dev
->xps_cpus_map
);
3797 unsigned int tci
= skb
->sender_cpu
- 1;
3799 queue_index
= __get_xps_queue_idx(dev
, skb
, dev_maps
,
3811 u16
dev_pick_tx_zero(struct net_device
*dev
, struct sk_buff
*skb
,
3812 struct net_device
*sb_dev
)
3816 EXPORT_SYMBOL(dev_pick_tx_zero
);
3818 u16
dev_pick_tx_cpu_id(struct net_device
*dev
, struct sk_buff
*skb
,
3819 struct net_device
*sb_dev
)
3821 return (u16
)raw_smp_processor_id() % dev
->real_num_tx_queues
;
3823 EXPORT_SYMBOL(dev_pick_tx_cpu_id
);
3825 u16
netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
,
3826 struct net_device
*sb_dev
)
3828 struct sock
*sk
= skb
->sk
;
3829 int queue_index
= sk_tx_queue_get(sk
);
3831 sb_dev
= sb_dev
? : dev
;
3833 if (queue_index
< 0 || skb
->ooo_okay
||
3834 queue_index
>= dev
->real_num_tx_queues
) {
3835 int new_index
= get_xps_queue(dev
, sb_dev
, skb
);
3838 new_index
= skb_tx_hash(dev
, sb_dev
, skb
);
3840 if (queue_index
!= new_index
&& sk
&&
3842 rcu_access_pointer(sk
->sk_dst_cache
))
3843 sk_tx_queue_set(sk
, new_index
);
3845 queue_index
= new_index
;
3850 EXPORT_SYMBOL(netdev_pick_tx
);
3852 struct netdev_queue
*netdev_core_pick_tx(struct net_device
*dev
,
3853 struct sk_buff
*skb
,
3854 struct net_device
*sb_dev
)
3856 int queue_index
= 0;
3859 u32 sender_cpu
= skb
->sender_cpu
- 1;
3861 if (sender_cpu
>= (u32
)NR_CPUS
)
3862 skb
->sender_cpu
= raw_smp_processor_id() + 1;
3865 if (dev
->real_num_tx_queues
!= 1) {
3866 const struct net_device_ops
*ops
= dev
->netdev_ops
;
3868 if (ops
->ndo_select_queue
)
3869 queue_index
= ops
->ndo_select_queue(dev
, skb
, sb_dev
);
3871 queue_index
= netdev_pick_tx(dev
, skb
, sb_dev
);
3873 queue_index
= netdev_cap_txqueue(dev
, queue_index
);
3876 skb_set_queue_mapping(skb
, queue_index
);
3877 return netdev_get_tx_queue(dev
, queue_index
);
3881 * __dev_queue_xmit - transmit a buffer
3882 * @skb: buffer to transmit
3883 * @sb_dev: suboordinate device used for L2 forwarding offload
3885 * Queue a buffer for transmission to a network device. The caller must
3886 * have set the device and priority and built the buffer before calling
3887 * this function. The function can be called from an interrupt.
3889 * A negative errno code is returned on a failure. A success does not
3890 * guarantee the frame will be transmitted as it may be dropped due
3891 * to congestion or traffic shaping.
3893 * -----------------------------------------------------------------------------------
3894 * I notice this method can also return errors from the queue disciplines,
3895 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3898 * Regardless of the return value, the skb is consumed, so it is currently
3899 * difficult to retry a send to this method. (You can bump the ref count
3900 * before sending to hold a reference for retry if you are careful.)
3902 * When calling this method, interrupts MUST be enabled. This is because
3903 * the BH enable code must have IRQs enabled so that it will not deadlock.
3906 static int __dev_queue_xmit(struct sk_buff
*skb
, struct net_device
*sb_dev
)
3908 struct net_device
*dev
= skb
->dev
;
3909 struct netdev_queue
*txq
;
3914 skb_reset_mac_header(skb
);
3916 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_SCHED_TSTAMP
))
3917 __skb_tstamp_tx(skb
, NULL
, skb
->sk
, SCM_TSTAMP_SCHED
);
3919 /* Disable soft irqs for various locks below. Also
3920 * stops preemption for RCU.
3924 skb_update_prio(skb
);
3926 qdisc_pkt_len_init(skb
);
3927 #ifdef CONFIG_NET_CLS_ACT
3928 skb
->tc_at_ingress
= 0;
3929 # ifdef CONFIG_NET_EGRESS
3930 if (static_branch_unlikely(&egress_needed_key
)) {
3931 skb
= sch_handle_egress(skb
, &rc
, dev
);
3937 /* If device/qdisc don't need skb->dst, release it right now while
3938 * its hot in this cpu cache.
3940 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
3945 txq
= netdev_core_pick_tx(dev
, skb
, sb_dev
);
3946 q
= rcu_dereference_bh(txq
->qdisc
);
3948 trace_net_dev_queue(skb
);
3950 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
3954 /* The device has no queue. Common case for software devices:
3955 * loopback, all the sorts of tunnels...
3957 * Really, it is unlikely that netif_tx_lock protection is necessary
3958 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3960 * However, it is possible, that they rely on protection
3963 * Check this and shot the lock. It is not prone from deadlocks.
3964 *Either shot noqueue qdisc, it is even simpler 8)
3966 if (dev
->flags
& IFF_UP
) {
3967 int cpu
= smp_processor_id(); /* ok because BHs are off */
3969 if (txq
->xmit_lock_owner
!= cpu
) {
3970 if (dev_xmit_recursion())
3971 goto recursion_alert
;
3973 skb
= validate_xmit_skb(skb
, dev
, &again
);
3977 HARD_TX_LOCK(dev
, txq
, cpu
);
3979 if (!netif_xmit_stopped(txq
)) {
3980 dev_xmit_recursion_inc();
3981 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &rc
);
3982 dev_xmit_recursion_dec();
3983 if (dev_xmit_complete(rc
)) {
3984 HARD_TX_UNLOCK(dev
, txq
);
3988 HARD_TX_UNLOCK(dev
, txq
);
3989 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3992 /* Recursion is detected! It is possible,
3996 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4002 rcu_read_unlock_bh();
4004 atomic_long_inc(&dev
->tx_dropped
);
4005 kfree_skb_list(skb
);
4008 rcu_read_unlock_bh();
4012 int dev_queue_xmit(struct sk_buff
*skb
)
4014 return __dev_queue_xmit(skb
, NULL
);
4016 EXPORT_SYMBOL(dev_queue_xmit
);
4018 int dev_queue_xmit_accel(struct sk_buff
*skb
, struct net_device
*sb_dev
)
4020 return __dev_queue_xmit(skb
, sb_dev
);
4022 EXPORT_SYMBOL(dev_queue_xmit_accel
);
4024 int dev_direct_xmit(struct sk_buff
*skb
, u16 queue_id
)
4026 struct net_device
*dev
= skb
->dev
;
4027 struct sk_buff
*orig_skb
= skb
;
4028 struct netdev_queue
*txq
;
4029 int ret
= NETDEV_TX_BUSY
;
4032 if (unlikely(!netif_running(dev
) ||
4033 !netif_carrier_ok(dev
)))
4036 skb
= validate_xmit_skb_list(skb
, dev
, &again
);
4037 if (skb
!= orig_skb
)
4040 skb_set_queue_mapping(skb
, queue_id
);
4041 txq
= skb_get_tx_queue(dev
, skb
);
4045 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
4046 if (!netif_xmit_frozen_or_drv_stopped(txq
))
4047 ret
= netdev_start_xmit(skb
, dev
, txq
, false);
4048 HARD_TX_UNLOCK(dev
, txq
);
4052 if (!dev_xmit_complete(ret
))
4057 atomic_long_inc(&dev
->tx_dropped
);
4058 kfree_skb_list(skb
);
4059 return NET_XMIT_DROP
;
4061 EXPORT_SYMBOL(dev_direct_xmit
);
4063 /*************************************************************************
4065 *************************************************************************/
4067 int netdev_max_backlog __read_mostly
= 1000;
4068 EXPORT_SYMBOL(netdev_max_backlog
);
4070 int netdev_tstamp_prequeue __read_mostly
= 1;
4071 int netdev_budget __read_mostly
= 300;
4072 unsigned int __read_mostly netdev_budget_usecs
= 2000;
4073 int weight_p __read_mostly
= 64; /* old backlog weight */
4074 int dev_weight_rx_bias __read_mostly
= 1; /* bias for backlog weight */
4075 int dev_weight_tx_bias __read_mostly
= 1; /* bias for output_queue quota */
4076 int dev_rx_weight __read_mostly
= 64;
4077 int dev_tx_weight __read_mostly
= 64;
4078 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
4079 int gro_normal_batch __read_mostly
= 8;
4081 /* Called with irq disabled */
4082 static inline void ____napi_schedule(struct softnet_data
*sd
,
4083 struct napi_struct
*napi
)
4085 list_add_tail(&napi
->poll_list
, &sd
->poll_list
);
4086 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4091 /* One global table that all flow-based protocols share. */
4092 struct rps_sock_flow_table __rcu
*rps_sock_flow_table __read_mostly
;
4093 EXPORT_SYMBOL(rps_sock_flow_table
);
4094 u32 rps_cpu_mask __read_mostly
;
4095 EXPORT_SYMBOL(rps_cpu_mask
);
4097 struct static_key_false rps_needed __read_mostly
;
4098 EXPORT_SYMBOL(rps_needed
);
4099 struct static_key_false rfs_needed __read_mostly
;
4100 EXPORT_SYMBOL(rfs_needed
);
4102 static struct rps_dev_flow
*
4103 set_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
4104 struct rps_dev_flow
*rflow
, u16 next_cpu
)
4106 if (next_cpu
< nr_cpu_ids
) {
4107 #ifdef CONFIG_RFS_ACCEL
4108 struct netdev_rx_queue
*rxqueue
;
4109 struct rps_dev_flow_table
*flow_table
;
4110 struct rps_dev_flow
*old_rflow
;
4115 /* Should we steer this flow to a different hardware queue? */
4116 if (!skb_rx_queue_recorded(skb
) || !dev
->rx_cpu_rmap
||
4117 !(dev
->features
& NETIF_F_NTUPLE
))
4119 rxq_index
= cpu_rmap_lookup_index(dev
->rx_cpu_rmap
, next_cpu
);
4120 if (rxq_index
== skb_get_rx_queue(skb
))
4123 rxqueue
= dev
->_rx
+ rxq_index
;
4124 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4127 flow_id
= skb_get_hash(skb
) & flow_table
->mask
;
4128 rc
= dev
->netdev_ops
->ndo_rx_flow_steer(dev
, skb
,
4129 rxq_index
, flow_id
);
4133 rflow
= &flow_table
->flows
[flow_id
];
4135 if (old_rflow
->filter
== rflow
->filter
)
4136 old_rflow
->filter
= RPS_NO_FILTER
;
4140 per_cpu(softnet_data
, next_cpu
).input_queue_head
;
4143 rflow
->cpu
= next_cpu
;
4148 * get_rps_cpu is called from netif_receive_skb and returns the target
4149 * CPU from the RPS map of the receiving queue for a given skb.
4150 * rcu_read_lock must be held on entry.
4152 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
4153 struct rps_dev_flow
**rflowp
)
4155 const struct rps_sock_flow_table
*sock_flow_table
;
4156 struct netdev_rx_queue
*rxqueue
= dev
->_rx
;
4157 struct rps_dev_flow_table
*flow_table
;
4158 struct rps_map
*map
;
4163 if (skb_rx_queue_recorded(skb
)) {
4164 u16 index
= skb_get_rx_queue(skb
);
4166 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
4167 WARN_ONCE(dev
->real_num_rx_queues
> 1,
4168 "%s received packet on queue %u, but number "
4169 "of RX queues is %u\n",
4170 dev
->name
, index
, dev
->real_num_rx_queues
);
4176 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4178 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4179 map
= rcu_dereference(rxqueue
->rps_map
);
4180 if (!flow_table
&& !map
)
4183 skb_reset_network_header(skb
);
4184 hash
= skb_get_hash(skb
);
4188 sock_flow_table
= rcu_dereference(rps_sock_flow_table
);
4189 if (flow_table
&& sock_flow_table
) {
4190 struct rps_dev_flow
*rflow
;
4194 /* First check into global flow table if there is a match */
4195 ident
= sock_flow_table
->ents
[hash
& sock_flow_table
->mask
];
4196 if ((ident
^ hash
) & ~rps_cpu_mask
)
4199 next_cpu
= ident
& rps_cpu_mask
;
4201 /* OK, now we know there is a match,
4202 * we can look at the local (per receive queue) flow table
4204 rflow
= &flow_table
->flows
[hash
& flow_table
->mask
];
4208 * If the desired CPU (where last recvmsg was done) is
4209 * different from current CPU (one in the rx-queue flow
4210 * table entry), switch if one of the following holds:
4211 * - Current CPU is unset (>= nr_cpu_ids).
4212 * - Current CPU is offline.
4213 * - The current CPU's queue tail has advanced beyond the
4214 * last packet that was enqueued using this table entry.
4215 * This guarantees that all previous packets for the flow
4216 * have been dequeued, thus preserving in order delivery.
4218 if (unlikely(tcpu
!= next_cpu
) &&
4219 (tcpu
>= nr_cpu_ids
|| !cpu_online(tcpu
) ||
4220 ((int)(per_cpu(softnet_data
, tcpu
).input_queue_head
-
4221 rflow
->last_qtail
)) >= 0)) {
4223 rflow
= set_rps_cpu(dev
, skb
, rflow
, next_cpu
);
4226 if (tcpu
< nr_cpu_ids
&& cpu_online(tcpu
)) {
4236 tcpu
= map
->cpus
[reciprocal_scale(hash
, map
->len
)];
4237 if (cpu_online(tcpu
)) {
4247 #ifdef CONFIG_RFS_ACCEL
4250 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4251 * @dev: Device on which the filter was set
4252 * @rxq_index: RX queue index
4253 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4254 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4256 * Drivers that implement ndo_rx_flow_steer() should periodically call
4257 * this function for each installed filter and remove the filters for
4258 * which it returns %true.
4260 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
4261 u32 flow_id
, u16 filter_id
)
4263 struct netdev_rx_queue
*rxqueue
= dev
->_rx
+ rxq_index
;
4264 struct rps_dev_flow_table
*flow_table
;
4265 struct rps_dev_flow
*rflow
;
4270 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4271 if (flow_table
&& flow_id
<= flow_table
->mask
) {
4272 rflow
= &flow_table
->flows
[flow_id
];
4273 cpu
= READ_ONCE(rflow
->cpu
);
4274 if (rflow
->filter
== filter_id
&& cpu
< nr_cpu_ids
&&
4275 ((int)(per_cpu(softnet_data
, cpu
).input_queue_head
-
4276 rflow
->last_qtail
) <
4277 (int)(10 * flow_table
->mask
)))
4283 EXPORT_SYMBOL(rps_may_expire_flow
);
4285 #endif /* CONFIG_RFS_ACCEL */
4287 /* Called from hardirq (IPI) context */
4288 static void rps_trigger_softirq(void *data
)
4290 struct softnet_data
*sd
= data
;
4292 ____napi_schedule(sd
, &sd
->backlog
);
4296 #endif /* CONFIG_RPS */
4299 * Check if this softnet_data structure is another cpu one
4300 * If yes, queue it to our IPI list and return 1
4303 static int rps_ipi_queued(struct softnet_data
*sd
)
4306 struct softnet_data
*mysd
= this_cpu_ptr(&softnet_data
);
4309 sd
->rps_ipi_next
= mysd
->rps_ipi_list
;
4310 mysd
->rps_ipi_list
= sd
;
4312 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4315 #endif /* CONFIG_RPS */
4319 #ifdef CONFIG_NET_FLOW_LIMIT
4320 int netdev_flow_limit_table_len __read_mostly
= (1 << 12);
4323 static bool skb_flow_limit(struct sk_buff
*skb
, unsigned int qlen
)
4325 #ifdef CONFIG_NET_FLOW_LIMIT
4326 struct sd_flow_limit
*fl
;
4327 struct softnet_data
*sd
;
4328 unsigned int old_flow
, new_flow
;
4330 if (qlen
< (netdev_max_backlog
>> 1))
4333 sd
= this_cpu_ptr(&softnet_data
);
4336 fl
= rcu_dereference(sd
->flow_limit
);
4338 new_flow
= skb_get_hash(skb
) & (fl
->num_buckets
- 1);
4339 old_flow
= fl
->history
[fl
->history_head
];
4340 fl
->history
[fl
->history_head
] = new_flow
;
4343 fl
->history_head
&= FLOW_LIMIT_HISTORY
- 1;
4345 if (likely(fl
->buckets
[old_flow
]))
4346 fl
->buckets
[old_flow
]--;
4348 if (++fl
->buckets
[new_flow
] > (FLOW_LIMIT_HISTORY
>> 1)) {
4360 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4361 * queue (may be a remote CPU queue).
4363 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
,
4364 unsigned int *qtail
)
4366 struct softnet_data
*sd
;
4367 unsigned long flags
;
4370 sd
= &per_cpu(softnet_data
, cpu
);
4372 local_irq_save(flags
);
4375 if (!netif_running(skb
->dev
))
4377 qlen
= skb_queue_len(&sd
->input_pkt_queue
);
4378 if (qlen
<= netdev_max_backlog
&& !skb_flow_limit(skb
, qlen
)) {
4381 __skb_queue_tail(&sd
->input_pkt_queue
, skb
);
4382 input_queue_tail_incr_save(sd
, qtail
);
4384 local_irq_restore(flags
);
4385 return NET_RX_SUCCESS
;
4388 /* Schedule NAPI for backlog device
4389 * We can use non atomic operation since we own the queue lock
4391 if (!__test_and_set_bit(NAPI_STATE_SCHED
, &sd
->backlog
.state
)) {
4392 if (!rps_ipi_queued(sd
))
4393 ____napi_schedule(sd
, &sd
->backlog
);
4402 local_irq_restore(flags
);
4404 atomic_long_inc(&skb
->dev
->rx_dropped
);
4409 static struct netdev_rx_queue
*netif_get_rxqueue(struct sk_buff
*skb
)
4411 struct net_device
*dev
= skb
->dev
;
4412 struct netdev_rx_queue
*rxqueue
;
4416 if (skb_rx_queue_recorded(skb
)) {
4417 u16 index
= skb_get_rx_queue(skb
);
4419 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
4420 WARN_ONCE(dev
->real_num_rx_queues
> 1,
4421 "%s received packet on queue %u, but number "
4422 "of RX queues is %u\n",
4423 dev
->name
, index
, dev
->real_num_rx_queues
);
4425 return rxqueue
; /* Return first rxqueue */
4432 static u32
netif_receive_generic_xdp(struct sk_buff
*skb
,
4433 struct xdp_buff
*xdp
,
4434 struct bpf_prog
*xdp_prog
)
4436 struct netdev_rx_queue
*rxqueue
;
4437 void *orig_data
, *orig_data_end
;
4438 u32 metalen
, act
= XDP_DROP
;
4439 __be16 orig_eth_type
;
4445 /* Reinjected packets coming from act_mirred or similar should
4446 * not get XDP generic processing.
4448 if (skb_cloned(skb
) || skb_is_tc_redirected(skb
))
4451 /* XDP packets must be linear and must have sufficient headroom
4452 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4453 * native XDP provides, thus we need to do it here as well.
4455 if (skb_is_nonlinear(skb
) ||
4456 skb_headroom(skb
) < XDP_PACKET_HEADROOM
) {
4457 int hroom
= XDP_PACKET_HEADROOM
- skb_headroom(skb
);
4458 int troom
= skb
->tail
+ skb
->data_len
- skb
->end
;
4460 /* In case we have to go down the path and also linearize,
4461 * then lets do the pskb_expand_head() work just once here.
4463 if (pskb_expand_head(skb
,
4464 hroom
> 0 ? ALIGN(hroom
, NET_SKB_PAD
) : 0,
4465 troom
> 0 ? troom
+ 128 : 0, GFP_ATOMIC
))
4467 if (skb_linearize(skb
))
4471 /* The XDP program wants to see the packet starting at the MAC
4474 mac_len
= skb
->data
- skb_mac_header(skb
);
4475 hlen
= skb_headlen(skb
) + mac_len
;
4476 xdp
->data
= skb
->data
- mac_len
;
4477 xdp
->data_meta
= xdp
->data
;
4478 xdp
->data_end
= xdp
->data
+ hlen
;
4479 xdp
->data_hard_start
= skb
->data
- skb_headroom(skb
);
4480 orig_data_end
= xdp
->data_end
;
4481 orig_data
= xdp
->data
;
4482 eth
= (struct ethhdr
*)xdp
->data
;
4483 orig_bcast
= is_multicast_ether_addr_64bits(eth
->h_dest
);
4484 orig_eth_type
= eth
->h_proto
;
4486 rxqueue
= netif_get_rxqueue(skb
);
4487 xdp
->rxq
= &rxqueue
->xdp_rxq
;
4489 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
4491 /* check if bpf_xdp_adjust_head was used */
4492 off
= xdp
->data
- orig_data
;
4495 __skb_pull(skb
, off
);
4497 __skb_push(skb
, -off
);
4499 skb
->mac_header
+= off
;
4500 skb_reset_network_header(skb
);
4503 /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
4506 off
= orig_data_end
- xdp
->data_end
;
4508 skb_set_tail_pointer(skb
, xdp
->data_end
- xdp
->data
);
4513 /* check if XDP changed eth hdr such SKB needs update */
4514 eth
= (struct ethhdr
*)xdp
->data
;
4515 if ((orig_eth_type
!= eth
->h_proto
) ||
4516 (orig_bcast
!= is_multicast_ether_addr_64bits(eth
->h_dest
))) {
4517 __skb_push(skb
, ETH_HLEN
);
4518 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
4524 __skb_push(skb
, mac_len
);
4527 metalen
= xdp
->data
- xdp
->data_meta
;
4529 skb_metadata_set(skb
, metalen
);
4532 bpf_warn_invalid_xdp_action(act
);
4535 trace_xdp_exception(skb
->dev
, xdp_prog
, act
);
4546 /* When doing generic XDP we have to bypass the qdisc layer and the
4547 * network taps in order to match in-driver-XDP behavior.
4549 void generic_xdp_tx(struct sk_buff
*skb
, struct bpf_prog
*xdp_prog
)
4551 struct net_device
*dev
= skb
->dev
;
4552 struct netdev_queue
*txq
;
4553 bool free_skb
= true;
4556 txq
= netdev_core_pick_tx(dev
, skb
, NULL
);
4557 cpu
= smp_processor_id();
4558 HARD_TX_LOCK(dev
, txq
, cpu
);
4559 if (!netif_xmit_stopped(txq
)) {
4560 rc
= netdev_start_xmit(skb
, dev
, txq
, 0);
4561 if (dev_xmit_complete(rc
))
4564 HARD_TX_UNLOCK(dev
, txq
);
4566 trace_xdp_exception(dev
, xdp_prog
, XDP_TX
);
4570 EXPORT_SYMBOL_GPL(generic_xdp_tx
);
4572 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key
);
4574 int do_xdp_generic(struct bpf_prog
*xdp_prog
, struct sk_buff
*skb
)
4577 struct xdp_buff xdp
;
4581 act
= netif_receive_generic_xdp(skb
, &xdp
, xdp_prog
);
4582 if (act
!= XDP_PASS
) {
4585 err
= xdp_do_generic_redirect(skb
->dev
, skb
,
4591 generic_xdp_tx(skb
, xdp_prog
);
4602 EXPORT_SYMBOL_GPL(do_xdp_generic
);
4604 static int netif_rx_internal(struct sk_buff
*skb
)
4608 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
4610 trace_netif_rx(skb
);
4613 if (static_branch_unlikely(&rps_needed
)) {
4614 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
4620 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
4622 cpu
= smp_processor_id();
4624 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
4633 ret
= enqueue_to_backlog(skb
, get_cpu(), &qtail
);
4640 * netif_rx - post buffer to the network code
4641 * @skb: buffer to post
4643 * This function receives a packet from a device driver and queues it for
4644 * the upper (protocol) levels to process. It always succeeds. The buffer
4645 * may be dropped during processing for congestion control or by the
4649 * NET_RX_SUCCESS (no congestion)
4650 * NET_RX_DROP (packet was dropped)
4654 int netif_rx(struct sk_buff
*skb
)
4658 trace_netif_rx_entry(skb
);
4660 ret
= netif_rx_internal(skb
);
4661 trace_netif_rx_exit(ret
);
4665 EXPORT_SYMBOL(netif_rx
);
4667 int netif_rx_ni(struct sk_buff
*skb
)
4671 trace_netif_rx_ni_entry(skb
);
4674 err
= netif_rx_internal(skb
);
4675 if (local_softirq_pending())
4678 trace_netif_rx_ni_exit(err
);
4682 EXPORT_SYMBOL(netif_rx_ni
);
4684 static __latent_entropy
void net_tx_action(struct softirq_action
*h
)
4686 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
4688 if (sd
->completion_queue
) {
4689 struct sk_buff
*clist
;
4691 local_irq_disable();
4692 clist
= sd
->completion_queue
;
4693 sd
->completion_queue
= NULL
;
4697 struct sk_buff
*skb
= clist
;
4699 clist
= clist
->next
;
4701 WARN_ON(refcount_read(&skb
->users
));
4702 if (likely(get_kfree_skb_cb(skb
)->reason
== SKB_REASON_CONSUMED
))
4703 trace_consume_skb(skb
);
4705 trace_kfree_skb(skb
, net_tx_action
);
4707 if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
)
4710 __kfree_skb_defer(skb
);
4713 __kfree_skb_flush();
4716 if (sd
->output_queue
) {
4719 local_irq_disable();
4720 head
= sd
->output_queue
;
4721 sd
->output_queue
= NULL
;
4722 sd
->output_queue_tailp
= &sd
->output_queue
;
4726 struct Qdisc
*q
= head
;
4727 spinlock_t
*root_lock
= NULL
;
4729 head
= head
->next_sched
;
4731 if (!(q
->flags
& TCQ_F_NOLOCK
)) {
4732 root_lock
= qdisc_lock(q
);
4733 spin_lock(root_lock
);
4735 /* We need to make sure head->next_sched is read
4736 * before clearing __QDISC_STATE_SCHED
4738 smp_mb__before_atomic();
4739 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
4742 spin_unlock(root_lock
);
4746 xfrm_dev_backlog(sd
);
4749 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4750 /* This hook is defined here for ATM LANE */
4751 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
4752 unsigned char *addr
) __read_mostly
;
4753 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
4756 static inline struct sk_buff
*
4757 sch_handle_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
, int *ret
,
4758 struct net_device
*orig_dev
)
4760 #ifdef CONFIG_NET_CLS_ACT
4761 struct mini_Qdisc
*miniq
= rcu_dereference_bh(skb
->dev
->miniq_ingress
);
4762 struct tcf_result cl_res
;
4764 /* If there's at least one ingress present somewhere (so
4765 * we get here via enabled static key), remaining devices
4766 * that are not configured with an ingress qdisc will bail
4773 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4777 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
4778 skb
->tc_at_ingress
= 1;
4779 mini_qdisc_bstats_cpu_update(miniq
, skb
);
4781 switch (tcf_classify(skb
, miniq
->filter_list
, &cl_res
, false)) {
4783 case TC_ACT_RECLASSIFY
:
4784 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
4787 mini_qdisc_qstats_cpu_drop(miniq
);
4795 case TC_ACT_REDIRECT
:
4796 /* skb_mac_header check was done by cls/act_bpf, so
4797 * we can safely push the L2 header back before
4798 * redirecting to another netdev
4800 __skb_push(skb
, skb
->mac_len
);
4801 skb_do_redirect(skb
);
4803 case TC_ACT_CONSUMED
:
4808 #endif /* CONFIG_NET_CLS_ACT */
4813 * netdev_is_rx_handler_busy - check if receive handler is registered
4814 * @dev: device to check
4816 * Check if a receive handler is already registered for a given device.
4817 * Return true if there one.
4819 * The caller must hold the rtnl_mutex.
4821 bool netdev_is_rx_handler_busy(struct net_device
*dev
)
4824 return dev
&& rtnl_dereference(dev
->rx_handler
);
4826 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy
);
4829 * netdev_rx_handler_register - register receive handler
4830 * @dev: device to register a handler for
4831 * @rx_handler: receive handler to register
4832 * @rx_handler_data: data pointer that is used by rx handler
4834 * Register a receive handler for a device. This handler will then be
4835 * called from __netif_receive_skb. A negative errno code is returned
4838 * The caller must hold the rtnl_mutex.
4840 * For a general description of rx_handler, see enum rx_handler_result.
4842 int netdev_rx_handler_register(struct net_device
*dev
,
4843 rx_handler_func_t
*rx_handler
,
4844 void *rx_handler_data
)
4846 if (netdev_is_rx_handler_busy(dev
))
4849 if (dev
->priv_flags
& IFF_NO_RX_HANDLER
)
4852 /* Note: rx_handler_data must be set before rx_handler */
4853 rcu_assign_pointer(dev
->rx_handler_data
, rx_handler_data
);
4854 rcu_assign_pointer(dev
->rx_handler
, rx_handler
);
4858 EXPORT_SYMBOL_GPL(netdev_rx_handler_register
);
4861 * netdev_rx_handler_unregister - unregister receive handler
4862 * @dev: device to unregister a handler from
4864 * Unregister a receive handler from a device.
4866 * The caller must hold the rtnl_mutex.
4868 void netdev_rx_handler_unregister(struct net_device
*dev
)
4872 RCU_INIT_POINTER(dev
->rx_handler
, NULL
);
4873 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4874 * section has a guarantee to see a non NULL rx_handler_data
4878 RCU_INIT_POINTER(dev
->rx_handler_data
, NULL
);
4880 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister
);
4883 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4884 * the special handling of PFMEMALLOC skbs.
4886 static bool skb_pfmemalloc_protocol(struct sk_buff
*skb
)
4888 switch (skb
->protocol
) {
4889 case htons(ETH_P_ARP
):
4890 case htons(ETH_P_IP
):
4891 case htons(ETH_P_IPV6
):
4892 case htons(ETH_P_8021Q
):
4893 case htons(ETH_P_8021AD
):
4900 static inline int nf_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
,
4901 int *ret
, struct net_device
*orig_dev
)
4903 #ifdef CONFIG_NETFILTER_INGRESS
4904 if (nf_hook_ingress_active(skb
)) {
4908 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4913 ingress_retval
= nf_hook_ingress(skb
);
4915 return ingress_retval
;
4917 #endif /* CONFIG_NETFILTER_INGRESS */
4921 static int __netif_receive_skb_core(struct sk_buff
*skb
, bool pfmemalloc
,
4922 struct packet_type
**ppt_prev
)
4924 struct packet_type
*ptype
, *pt_prev
;
4925 rx_handler_func_t
*rx_handler
;
4926 struct net_device
*orig_dev
;
4927 bool deliver_exact
= false;
4928 int ret
= NET_RX_DROP
;
4931 net_timestamp_check(!netdev_tstamp_prequeue
, skb
);
4933 trace_netif_receive_skb(skb
);
4935 orig_dev
= skb
->dev
;
4937 skb_reset_network_header(skb
);
4938 if (!skb_transport_header_was_set(skb
))
4939 skb_reset_transport_header(skb
);
4940 skb_reset_mac_len(skb
);
4945 skb
->skb_iif
= skb
->dev
->ifindex
;
4947 __this_cpu_inc(softnet_data
.processed
);
4949 if (static_branch_unlikely(&generic_xdp_needed_key
)) {
4953 ret2
= do_xdp_generic(rcu_dereference(skb
->dev
->xdp_prog
), skb
);
4956 if (ret2
!= XDP_PASS
)
4958 skb_reset_mac_len(skb
);
4961 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
) ||
4962 skb
->protocol
== cpu_to_be16(ETH_P_8021AD
)) {
4963 skb
= skb_vlan_untag(skb
);
4968 if (skb_skip_tc_classify(skb
))
4974 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
4976 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4980 list_for_each_entry_rcu(ptype
, &skb
->dev
->ptype_all
, list
) {
4982 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4987 #ifdef CONFIG_NET_INGRESS
4988 if (static_branch_unlikely(&ingress_needed_key
)) {
4989 skb
= sch_handle_ingress(skb
, &pt_prev
, &ret
, orig_dev
);
4993 if (nf_ingress(skb
, &pt_prev
, &ret
, orig_dev
) < 0)
4999 if (pfmemalloc
&& !skb_pfmemalloc_protocol(skb
))
5002 if (skb_vlan_tag_present(skb
)) {
5004 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5007 if (vlan_do_receive(&skb
))
5009 else if (unlikely(!skb
))
5013 rx_handler
= rcu_dereference(skb
->dev
->rx_handler
);
5016 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5019 switch (rx_handler(&skb
)) {
5020 case RX_HANDLER_CONSUMED
:
5021 ret
= NET_RX_SUCCESS
;
5023 case RX_HANDLER_ANOTHER
:
5025 case RX_HANDLER_EXACT
:
5026 deliver_exact
= true;
5027 case RX_HANDLER_PASS
:
5034 if (unlikely(skb_vlan_tag_present(skb
))) {
5036 if (skb_vlan_tag_get_id(skb
)) {
5037 /* Vlan id is non 0 and vlan_do_receive() above couldn't
5040 skb
->pkt_type
= PACKET_OTHERHOST
;
5041 } else if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
) ||
5042 skb
->protocol
== cpu_to_be16(ETH_P_8021AD
)) {
5043 /* Outer header is 802.1P with vlan 0, inner header is
5044 * 802.1Q or 802.1AD and vlan_do_receive() above could
5045 * not find vlan dev for vlan id 0.
5047 __vlan_hwaccel_clear_tag(skb
);
5048 skb
= skb_vlan_untag(skb
);
5051 if (vlan_do_receive(&skb
))
5052 /* After stripping off 802.1P header with vlan 0
5053 * vlan dev is found for inner header.
5056 else if (unlikely(!skb
))
5059 /* We have stripped outer 802.1P vlan 0 header.
5060 * But could not find vlan dev.
5061 * check again for vlan id to set OTHERHOST.
5065 /* Note: we might in the future use prio bits
5066 * and set skb->priority like in vlan_do_receive()
5067 * For the time being, just ignore Priority Code Point
5069 __vlan_hwaccel_clear_tag(skb
);
5072 type
= skb
->protocol
;
5074 /* deliver only exact match when indicated */
5075 if (likely(!deliver_exact
)) {
5076 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
5077 &ptype_base
[ntohs(type
) &
5081 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
5082 &orig_dev
->ptype_specific
);
5084 if (unlikely(skb
->dev
!= orig_dev
)) {
5085 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
5086 &skb
->dev
->ptype_specific
);
5090 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
5092 *ppt_prev
= pt_prev
;
5096 atomic_long_inc(&skb
->dev
->rx_dropped
);
5098 atomic_long_inc(&skb
->dev
->rx_nohandler
);
5100 /* Jamal, now you will not able to escape explaining
5101 * me how you were going to use this. :-)
5110 static int __netif_receive_skb_one_core(struct sk_buff
*skb
, bool pfmemalloc
)
5112 struct net_device
*orig_dev
= skb
->dev
;
5113 struct packet_type
*pt_prev
= NULL
;
5116 ret
= __netif_receive_skb_core(skb
, pfmemalloc
, &pt_prev
);
5118 ret
= INDIRECT_CALL_INET(pt_prev
->func
, ipv6_rcv
, ip_rcv
, skb
,
5119 skb
->dev
, pt_prev
, orig_dev
);
5124 * netif_receive_skb_core - special purpose version of netif_receive_skb
5125 * @skb: buffer to process
5127 * More direct receive version of netif_receive_skb(). It should
5128 * only be used by callers that have a need to skip RPS and Generic XDP.
5129 * Caller must also take care of handling if (page_is_)pfmemalloc.
5131 * This function may only be called from softirq context and interrupts
5132 * should be enabled.
5134 * Return values (usually ignored):
5135 * NET_RX_SUCCESS: no congestion
5136 * NET_RX_DROP: packet was dropped
5138 int netif_receive_skb_core(struct sk_buff
*skb
)
5143 ret
= __netif_receive_skb_one_core(skb
, false);
5148 EXPORT_SYMBOL(netif_receive_skb_core
);
5150 static inline void __netif_receive_skb_list_ptype(struct list_head
*head
,
5151 struct packet_type
*pt_prev
,
5152 struct net_device
*orig_dev
)
5154 struct sk_buff
*skb
, *next
;
5158 if (list_empty(head
))
5160 if (pt_prev
->list_func
!= NULL
)
5161 INDIRECT_CALL_INET(pt_prev
->list_func
, ipv6_list_rcv
,
5162 ip_list_rcv
, head
, pt_prev
, orig_dev
);
5164 list_for_each_entry_safe(skb
, next
, head
, list
) {
5165 skb_list_del_init(skb
);
5166 pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
5170 static void __netif_receive_skb_list_core(struct list_head
*head
, bool pfmemalloc
)
5172 /* Fast-path assumptions:
5173 * - There is no RX handler.
5174 * - Only one packet_type matches.
5175 * If either of these fails, we will end up doing some per-packet
5176 * processing in-line, then handling the 'last ptype' for the whole
5177 * sublist. This can't cause out-of-order delivery to any single ptype,
5178 * because the 'last ptype' must be constant across the sublist, and all
5179 * other ptypes are handled per-packet.
5181 /* Current (common) ptype of sublist */
5182 struct packet_type
*pt_curr
= NULL
;
5183 /* Current (common) orig_dev of sublist */
5184 struct net_device
*od_curr
= NULL
;
5185 struct list_head sublist
;
5186 struct sk_buff
*skb
, *next
;
5188 INIT_LIST_HEAD(&sublist
);
5189 list_for_each_entry_safe(skb
, next
, head
, list
) {
5190 struct net_device
*orig_dev
= skb
->dev
;
5191 struct packet_type
*pt_prev
= NULL
;
5193 skb_list_del_init(skb
);
5194 __netif_receive_skb_core(skb
, pfmemalloc
, &pt_prev
);
5197 if (pt_curr
!= pt_prev
|| od_curr
!= orig_dev
) {
5198 /* dispatch old sublist */
5199 __netif_receive_skb_list_ptype(&sublist
, pt_curr
, od_curr
);
5200 /* start new sublist */
5201 INIT_LIST_HEAD(&sublist
);
5205 list_add_tail(&skb
->list
, &sublist
);
5208 /* dispatch final sublist */
5209 __netif_receive_skb_list_ptype(&sublist
, pt_curr
, od_curr
);
5212 static int __netif_receive_skb(struct sk_buff
*skb
)
5216 if (sk_memalloc_socks() && skb_pfmemalloc(skb
)) {
5217 unsigned int noreclaim_flag
;
5220 * PFMEMALLOC skbs are special, they should
5221 * - be delivered to SOCK_MEMALLOC sockets only
5222 * - stay away from userspace
5223 * - have bounded memory usage
5225 * Use PF_MEMALLOC as this saves us from propagating the allocation
5226 * context down to all allocation sites.
5228 noreclaim_flag
= memalloc_noreclaim_save();
5229 ret
= __netif_receive_skb_one_core(skb
, true);
5230 memalloc_noreclaim_restore(noreclaim_flag
);
5232 ret
= __netif_receive_skb_one_core(skb
, false);
5237 static void __netif_receive_skb_list(struct list_head
*head
)
5239 unsigned long noreclaim_flag
= 0;
5240 struct sk_buff
*skb
, *next
;
5241 bool pfmemalloc
= false; /* Is current sublist PF_MEMALLOC? */
5243 list_for_each_entry_safe(skb
, next
, head
, list
) {
5244 if ((sk_memalloc_socks() && skb_pfmemalloc(skb
)) != pfmemalloc
) {
5245 struct list_head sublist
;
5247 /* Handle the previous sublist */
5248 list_cut_before(&sublist
, head
, &skb
->list
);
5249 if (!list_empty(&sublist
))
5250 __netif_receive_skb_list_core(&sublist
, pfmemalloc
);
5251 pfmemalloc
= !pfmemalloc
;
5252 /* See comments in __netif_receive_skb */
5254 noreclaim_flag
= memalloc_noreclaim_save();
5256 memalloc_noreclaim_restore(noreclaim_flag
);
5259 /* Handle the remaining sublist */
5260 if (!list_empty(head
))
5261 __netif_receive_skb_list_core(head
, pfmemalloc
);
5262 /* Restore pflags */
5264 memalloc_noreclaim_restore(noreclaim_flag
);
5267 static int generic_xdp_install(struct net_device
*dev
, struct netdev_bpf
*xdp
)
5269 struct bpf_prog
*old
= rtnl_dereference(dev
->xdp_prog
);
5270 struct bpf_prog
*new = xdp
->prog
;
5273 switch (xdp
->command
) {
5274 case XDP_SETUP_PROG
:
5275 rcu_assign_pointer(dev
->xdp_prog
, new);
5280 static_branch_dec(&generic_xdp_needed_key
);
5281 } else if (new && !old
) {
5282 static_branch_inc(&generic_xdp_needed_key
);
5283 dev_disable_lro(dev
);
5284 dev_disable_gro_hw(dev
);
5288 case XDP_QUERY_PROG
:
5289 xdp
->prog_id
= old
? old
->aux
->id
: 0;
5300 static int netif_receive_skb_internal(struct sk_buff
*skb
)
5304 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
5306 if (skb_defer_rx_timestamp(skb
))
5307 return NET_RX_SUCCESS
;
5311 if (static_branch_unlikely(&rps_needed
)) {
5312 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
5313 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
5316 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
5322 ret
= __netif_receive_skb(skb
);
5327 static void netif_receive_skb_list_internal(struct list_head
*head
)
5329 struct sk_buff
*skb
, *next
;
5330 struct list_head sublist
;
5332 INIT_LIST_HEAD(&sublist
);
5333 list_for_each_entry_safe(skb
, next
, head
, list
) {
5334 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
5335 skb_list_del_init(skb
);
5336 if (!skb_defer_rx_timestamp(skb
))
5337 list_add_tail(&skb
->list
, &sublist
);
5339 list_splice_init(&sublist
, head
);
5343 if (static_branch_unlikely(&rps_needed
)) {
5344 list_for_each_entry_safe(skb
, next
, head
, list
) {
5345 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
5346 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
5349 /* Will be handled, remove from list */
5350 skb_list_del_init(skb
);
5351 enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
5356 __netif_receive_skb_list(head
);
5361 * netif_receive_skb - process receive buffer from network
5362 * @skb: buffer to process
5364 * netif_receive_skb() is the main receive data processing function.
5365 * It always succeeds. The buffer may be dropped during processing
5366 * for congestion control or by the protocol layers.
5368 * This function may only be called from softirq context and interrupts
5369 * should be enabled.
5371 * Return values (usually ignored):
5372 * NET_RX_SUCCESS: no congestion
5373 * NET_RX_DROP: packet was dropped
5375 int netif_receive_skb(struct sk_buff
*skb
)
5379 trace_netif_receive_skb_entry(skb
);
5381 ret
= netif_receive_skb_internal(skb
);
5382 trace_netif_receive_skb_exit(ret
);
5386 EXPORT_SYMBOL(netif_receive_skb
);
5389 * netif_receive_skb_list - process many receive buffers from network
5390 * @head: list of skbs to process.
5392 * Since return value of netif_receive_skb() is normally ignored, and
5393 * wouldn't be meaningful for a list, this function returns void.
5395 * This function may only be called from softirq context and interrupts
5396 * should be enabled.
5398 void netif_receive_skb_list(struct list_head
*head
)
5400 struct sk_buff
*skb
;
5402 if (list_empty(head
))
5404 if (trace_netif_receive_skb_list_entry_enabled()) {
5405 list_for_each_entry(skb
, head
, list
)
5406 trace_netif_receive_skb_list_entry(skb
);
5408 netif_receive_skb_list_internal(head
);
5409 trace_netif_receive_skb_list_exit(0);
5411 EXPORT_SYMBOL(netif_receive_skb_list
);
5413 DEFINE_PER_CPU(struct work_struct
, flush_works
);
5415 /* Network device is going away, flush any packets still pending */
5416 static void flush_backlog(struct work_struct
*work
)
5418 struct sk_buff
*skb
, *tmp
;
5419 struct softnet_data
*sd
;
5422 sd
= this_cpu_ptr(&softnet_data
);
5424 local_irq_disable();
5426 skb_queue_walk_safe(&sd
->input_pkt_queue
, skb
, tmp
) {
5427 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
5428 __skb_unlink(skb
, &sd
->input_pkt_queue
);
5430 input_queue_head_incr(sd
);
5436 skb_queue_walk_safe(&sd
->process_queue
, skb
, tmp
) {
5437 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
5438 __skb_unlink(skb
, &sd
->process_queue
);
5440 input_queue_head_incr(sd
);
5446 static void flush_all_backlogs(void)
5452 for_each_online_cpu(cpu
)
5453 queue_work_on(cpu
, system_highpri_wq
,
5454 per_cpu_ptr(&flush_works
, cpu
));
5456 for_each_online_cpu(cpu
)
5457 flush_work(per_cpu_ptr(&flush_works
, cpu
));
5462 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff
*, int));
5463 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff
*, int));
5464 static int napi_gro_complete(struct sk_buff
*skb
)
5466 struct packet_offload
*ptype
;
5467 __be16 type
= skb
->protocol
;
5468 struct list_head
*head
= &offload_base
;
5471 BUILD_BUG_ON(sizeof(struct napi_gro_cb
) > sizeof(skb
->cb
));
5473 if (NAPI_GRO_CB(skb
)->count
== 1) {
5474 skb_shinfo(skb
)->gso_size
= 0;
5479 list_for_each_entry_rcu(ptype
, head
, list
) {
5480 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
5483 err
= INDIRECT_CALL_INET(ptype
->callbacks
.gro_complete
,
5484 ipv6_gro_complete
, inet_gro_complete
,
5491 WARN_ON(&ptype
->list
== head
);
5493 return NET_RX_SUCCESS
;
5497 return netif_receive_skb_internal(skb
);
5500 static void __napi_gro_flush_chain(struct napi_struct
*napi
, u32 index
,
5503 struct list_head
*head
= &napi
->gro_hash
[index
].list
;
5504 struct sk_buff
*skb
, *p
;
5506 list_for_each_entry_safe_reverse(skb
, p
, head
, list
) {
5507 if (flush_old
&& NAPI_GRO_CB(skb
)->age
== jiffies
)
5509 skb_list_del_init(skb
);
5510 napi_gro_complete(skb
);
5511 napi
->gro_hash
[index
].count
--;
5514 if (!napi
->gro_hash
[index
].count
)
5515 __clear_bit(index
, &napi
->gro_bitmask
);
5518 /* napi->gro_hash[].list contains packets ordered by age.
5519 * youngest packets at the head of it.
5520 * Complete skbs in reverse order to reduce latencies.
5522 void napi_gro_flush(struct napi_struct
*napi
, bool flush_old
)
5524 unsigned long bitmask
= napi
->gro_bitmask
;
5525 unsigned int i
, base
= ~0U;
5527 while ((i
= ffs(bitmask
)) != 0) {
5530 __napi_gro_flush_chain(napi
, base
, flush_old
);
5533 EXPORT_SYMBOL(napi_gro_flush
);
5535 static struct list_head
*gro_list_prepare(struct napi_struct
*napi
,
5536 struct sk_buff
*skb
)
5538 unsigned int maclen
= skb
->dev
->hard_header_len
;
5539 u32 hash
= skb_get_hash_raw(skb
);
5540 struct list_head
*head
;
5543 head
= &napi
->gro_hash
[hash
& (GRO_HASH_BUCKETS
- 1)].list
;
5544 list_for_each_entry(p
, head
, list
) {
5545 unsigned long diffs
;
5547 NAPI_GRO_CB(p
)->flush
= 0;
5549 if (hash
!= skb_get_hash_raw(p
)) {
5550 NAPI_GRO_CB(p
)->same_flow
= 0;
5554 diffs
= (unsigned long)p
->dev
^ (unsigned long)skb
->dev
;
5555 diffs
|= skb_vlan_tag_present(p
) ^ skb_vlan_tag_present(skb
);
5556 if (skb_vlan_tag_present(p
))
5557 diffs
|= p
->vlan_tci
^ skb
->vlan_tci
;
5558 diffs
|= skb_metadata_dst_cmp(p
, skb
);
5559 diffs
|= skb_metadata_differs(p
, skb
);
5560 if (maclen
== ETH_HLEN
)
5561 diffs
|= compare_ether_header(skb_mac_header(p
),
5562 skb_mac_header(skb
));
5564 diffs
= memcmp(skb_mac_header(p
),
5565 skb_mac_header(skb
),
5567 NAPI_GRO_CB(p
)->same_flow
= !diffs
;
5573 static void skb_gro_reset_offset(struct sk_buff
*skb
)
5575 const struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
5576 const skb_frag_t
*frag0
= &pinfo
->frags
[0];
5578 NAPI_GRO_CB(skb
)->data_offset
= 0;
5579 NAPI_GRO_CB(skb
)->frag0
= NULL
;
5580 NAPI_GRO_CB(skb
)->frag0_len
= 0;
5582 if (skb_mac_header(skb
) == skb_tail_pointer(skb
) &&
5584 !PageHighMem(skb_frag_page(frag0
))) {
5585 NAPI_GRO_CB(skb
)->frag0
= skb_frag_address(frag0
);
5586 NAPI_GRO_CB(skb
)->frag0_len
= min_t(unsigned int,
5587 skb_frag_size(frag0
),
5588 skb
->end
- skb
->tail
);
5592 static void gro_pull_from_frag0(struct sk_buff
*skb
, int grow
)
5594 struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
5596 BUG_ON(skb
->end
- skb
->tail
< grow
);
5598 memcpy(skb_tail_pointer(skb
), NAPI_GRO_CB(skb
)->frag0
, grow
);
5600 skb
->data_len
-= grow
;
5603 skb_frag_off_add(&pinfo
->frags
[0], grow
);
5604 skb_frag_size_sub(&pinfo
->frags
[0], grow
);
5606 if (unlikely(!skb_frag_size(&pinfo
->frags
[0]))) {
5607 skb_frag_unref(skb
, 0);
5608 memmove(pinfo
->frags
, pinfo
->frags
+ 1,
5609 --pinfo
->nr_frags
* sizeof(pinfo
->frags
[0]));
5613 static void gro_flush_oldest(struct list_head
*head
)
5615 struct sk_buff
*oldest
;
5617 oldest
= list_last_entry(head
, struct sk_buff
, list
);
5619 /* We are called with head length >= MAX_GRO_SKBS, so this is
5622 if (WARN_ON_ONCE(!oldest
))
5625 /* Do not adjust napi->gro_hash[].count, caller is adding a new
5628 skb_list_del_init(oldest
);
5629 napi_gro_complete(oldest
);
5632 INDIRECT_CALLABLE_DECLARE(struct sk_buff
*inet_gro_receive(struct list_head
*,
5634 INDIRECT_CALLABLE_DECLARE(struct sk_buff
*ipv6_gro_receive(struct list_head
*,
5636 static enum gro_result
dev_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
5638 u32 hash
= skb_get_hash_raw(skb
) & (GRO_HASH_BUCKETS
- 1);
5639 struct list_head
*head
= &offload_base
;
5640 struct packet_offload
*ptype
;
5641 __be16 type
= skb
->protocol
;
5642 struct list_head
*gro_head
;
5643 struct sk_buff
*pp
= NULL
;
5644 enum gro_result ret
;
5648 if (netif_elide_gro(skb
->dev
))
5651 gro_head
= gro_list_prepare(napi
, skb
);
5654 list_for_each_entry_rcu(ptype
, head
, list
) {
5655 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
5658 skb_set_network_header(skb
, skb_gro_offset(skb
));
5659 skb_reset_mac_len(skb
);
5660 NAPI_GRO_CB(skb
)->same_flow
= 0;
5661 NAPI_GRO_CB(skb
)->flush
= skb_is_gso(skb
) || skb_has_frag_list(skb
);
5662 NAPI_GRO_CB(skb
)->free
= 0;
5663 NAPI_GRO_CB(skb
)->encap_mark
= 0;
5664 NAPI_GRO_CB(skb
)->recursion_counter
= 0;
5665 NAPI_GRO_CB(skb
)->is_fou
= 0;
5666 NAPI_GRO_CB(skb
)->is_atomic
= 1;
5667 NAPI_GRO_CB(skb
)->gro_remcsum_start
= 0;
5669 /* Setup for GRO checksum validation */
5670 switch (skb
->ip_summed
) {
5671 case CHECKSUM_COMPLETE
:
5672 NAPI_GRO_CB(skb
)->csum
= skb
->csum
;
5673 NAPI_GRO_CB(skb
)->csum_valid
= 1;
5674 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
5676 case CHECKSUM_UNNECESSARY
:
5677 NAPI_GRO_CB(skb
)->csum_cnt
= skb
->csum_level
+ 1;
5678 NAPI_GRO_CB(skb
)->csum_valid
= 0;
5681 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
5682 NAPI_GRO_CB(skb
)->csum_valid
= 0;
5685 pp
= INDIRECT_CALL_INET(ptype
->callbacks
.gro_receive
,
5686 ipv6_gro_receive
, inet_gro_receive
,
5692 if (&ptype
->list
== head
)
5695 if (IS_ERR(pp
) && PTR_ERR(pp
) == -EINPROGRESS
) {
5700 same_flow
= NAPI_GRO_CB(skb
)->same_flow
;
5701 ret
= NAPI_GRO_CB(skb
)->free
? GRO_MERGED_FREE
: GRO_MERGED
;
5704 skb_list_del_init(pp
);
5705 napi_gro_complete(pp
);
5706 napi
->gro_hash
[hash
].count
--;
5712 if (NAPI_GRO_CB(skb
)->flush
)
5715 if (unlikely(napi
->gro_hash
[hash
].count
>= MAX_GRO_SKBS
)) {
5716 gro_flush_oldest(gro_head
);
5718 napi
->gro_hash
[hash
].count
++;
5720 NAPI_GRO_CB(skb
)->count
= 1;
5721 NAPI_GRO_CB(skb
)->age
= jiffies
;
5722 NAPI_GRO_CB(skb
)->last
= skb
;
5723 skb_shinfo(skb
)->gso_size
= skb_gro_len(skb
);
5724 list_add(&skb
->list
, gro_head
);
5728 grow
= skb_gro_offset(skb
) - skb_headlen(skb
);
5730 gro_pull_from_frag0(skb
, grow
);
5732 if (napi
->gro_hash
[hash
].count
) {
5733 if (!test_bit(hash
, &napi
->gro_bitmask
))
5734 __set_bit(hash
, &napi
->gro_bitmask
);
5735 } else if (test_bit(hash
, &napi
->gro_bitmask
)) {
5736 __clear_bit(hash
, &napi
->gro_bitmask
);
5746 struct packet_offload
*gro_find_receive_by_type(__be16 type
)
5748 struct list_head
*offload_head
= &offload_base
;
5749 struct packet_offload
*ptype
;
5751 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
5752 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
5758 EXPORT_SYMBOL(gro_find_receive_by_type
);
5760 struct packet_offload
*gro_find_complete_by_type(__be16 type
)
5762 struct list_head
*offload_head
= &offload_base
;
5763 struct packet_offload
*ptype
;
5765 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
5766 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
5772 EXPORT_SYMBOL(gro_find_complete_by_type
);
5774 static void napi_skb_free_stolen_head(struct sk_buff
*skb
)
5778 kmem_cache_free(skbuff_head_cache
, skb
);
5781 static gro_result_t
napi_skb_finish(gro_result_t ret
, struct sk_buff
*skb
)
5785 if (netif_receive_skb_internal(skb
))
5793 case GRO_MERGED_FREE
:
5794 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
5795 napi_skb_free_stolen_head(skb
);
5809 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
5813 skb_mark_napi_id(skb
, napi
);
5814 trace_napi_gro_receive_entry(skb
);
5816 skb_gro_reset_offset(skb
);
5818 ret
= napi_skb_finish(dev_gro_receive(napi
, skb
), skb
);
5819 trace_napi_gro_receive_exit(ret
);
5823 EXPORT_SYMBOL(napi_gro_receive
);
5825 static void napi_reuse_skb(struct napi_struct
*napi
, struct sk_buff
*skb
)
5827 if (unlikely(skb
->pfmemalloc
)) {
5831 __skb_pull(skb
, skb_headlen(skb
));
5832 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
5833 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
- skb_headroom(skb
));
5834 __vlan_hwaccel_clear_tag(skb
);
5835 skb
->dev
= napi
->dev
;
5838 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
5839 skb
->pkt_type
= PACKET_HOST
;
5841 skb
->encapsulation
= 0;
5842 skb_shinfo(skb
)->gso_type
= 0;
5843 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
5849 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
)
5851 struct sk_buff
*skb
= napi
->skb
;
5854 skb
= napi_alloc_skb(napi
, GRO_MAX_HEAD
);
5857 skb_mark_napi_id(skb
, napi
);
5862 EXPORT_SYMBOL(napi_get_frags
);
5864 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
5865 static void gro_normal_list(struct napi_struct
*napi
)
5867 if (!napi
->rx_count
)
5869 netif_receive_skb_list_internal(&napi
->rx_list
);
5870 INIT_LIST_HEAD(&napi
->rx_list
);
5874 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
5875 * pass the whole batch up to the stack.
5877 static void gro_normal_one(struct napi_struct
*napi
, struct sk_buff
*skb
)
5879 list_add_tail(&skb
->list
, &napi
->rx_list
);
5880 if (++napi
->rx_count
>= gro_normal_batch
)
5881 gro_normal_list(napi
);
5884 static gro_result_t
napi_frags_finish(struct napi_struct
*napi
,
5885 struct sk_buff
*skb
,
5891 __skb_push(skb
, ETH_HLEN
);
5892 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
5893 if (ret
== GRO_NORMAL
)
5894 gro_normal_one(napi
, skb
);
5898 napi_reuse_skb(napi
, skb
);
5901 case GRO_MERGED_FREE
:
5902 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
5903 napi_skb_free_stolen_head(skb
);
5905 napi_reuse_skb(napi
, skb
);
5916 /* Upper GRO stack assumes network header starts at gro_offset=0
5917 * Drivers could call both napi_gro_frags() and napi_gro_receive()
5918 * We copy ethernet header into skb->data to have a common layout.
5920 static struct sk_buff
*napi_frags_skb(struct napi_struct
*napi
)
5922 struct sk_buff
*skb
= napi
->skb
;
5923 const struct ethhdr
*eth
;
5924 unsigned int hlen
= sizeof(*eth
);
5928 skb_reset_mac_header(skb
);
5929 skb_gro_reset_offset(skb
);
5931 if (unlikely(skb_gro_header_hard(skb
, hlen
))) {
5932 eth
= skb_gro_header_slow(skb
, hlen
, 0);
5933 if (unlikely(!eth
)) {
5934 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5935 __func__
, napi
->dev
->name
);
5936 napi_reuse_skb(napi
, skb
);
5940 eth
= (const struct ethhdr
*)skb
->data
;
5941 gro_pull_from_frag0(skb
, hlen
);
5942 NAPI_GRO_CB(skb
)->frag0
+= hlen
;
5943 NAPI_GRO_CB(skb
)->frag0_len
-= hlen
;
5945 __skb_pull(skb
, hlen
);
5948 * This works because the only protocols we care about don't require
5950 * We'll fix it up properly in napi_frags_finish()
5952 skb
->protocol
= eth
->h_proto
;
5957 gro_result_t
napi_gro_frags(struct napi_struct
*napi
)
5960 struct sk_buff
*skb
= napi_frags_skb(napi
);
5965 trace_napi_gro_frags_entry(skb
);
5967 ret
= napi_frags_finish(napi
, skb
, dev_gro_receive(napi
, skb
));
5968 trace_napi_gro_frags_exit(ret
);
5972 EXPORT_SYMBOL(napi_gro_frags
);
5974 /* Compute the checksum from gro_offset and return the folded value
5975 * after adding in any pseudo checksum.
5977 __sum16
__skb_gro_checksum_complete(struct sk_buff
*skb
)
5982 wsum
= skb_checksum(skb
, skb_gro_offset(skb
), skb_gro_len(skb
), 0);
5984 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5985 sum
= csum_fold(csum_add(NAPI_GRO_CB(skb
)->csum
, wsum
));
5986 /* See comments in __skb_checksum_complete(). */
5988 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
5989 !skb
->csum_complete_sw
)
5990 netdev_rx_csum_fault(skb
->dev
, skb
);
5993 NAPI_GRO_CB(skb
)->csum
= wsum
;
5994 NAPI_GRO_CB(skb
)->csum_valid
= 1;
5998 EXPORT_SYMBOL(__skb_gro_checksum_complete
);
6000 static void net_rps_send_ipi(struct softnet_data
*remsd
)
6004 struct softnet_data
*next
= remsd
->rps_ipi_next
;
6006 if (cpu_online(remsd
->cpu
))
6007 smp_call_function_single_async(remsd
->cpu
, &remsd
->csd
);
6014 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
6015 * Note: called with local irq disabled, but exits with local irq enabled.
6017 static void net_rps_action_and_irq_enable(struct softnet_data
*sd
)
6020 struct softnet_data
*remsd
= sd
->rps_ipi_list
;
6023 sd
->rps_ipi_list
= NULL
;
6027 /* Send pending IPI's to kick RPS processing on remote cpus. */
6028 net_rps_send_ipi(remsd
);
6034 static bool sd_has_rps_ipi_waiting(struct softnet_data
*sd
)
6037 return sd
->rps_ipi_list
!= NULL
;
6043 static int process_backlog(struct napi_struct
*napi
, int quota
)
6045 struct softnet_data
*sd
= container_of(napi
, struct softnet_data
, backlog
);
6049 /* Check if we have pending ipi, its better to send them now,
6050 * not waiting net_rx_action() end.
6052 if (sd_has_rps_ipi_waiting(sd
)) {
6053 local_irq_disable();
6054 net_rps_action_and_irq_enable(sd
);
6057 napi
->weight
= dev_rx_weight
;
6059 struct sk_buff
*skb
;
6061 while ((skb
= __skb_dequeue(&sd
->process_queue
))) {
6063 __netif_receive_skb(skb
);
6065 input_queue_head_incr(sd
);
6066 if (++work
>= quota
)
6071 local_irq_disable();
6073 if (skb_queue_empty(&sd
->input_pkt_queue
)) {
6075 * Inline a custom version of __napi_complete().
6076 * only current cpu owns and manipulates this napi,
6077 * and NAPI_STATE_SCHED is the only possible flag set
6079 * We can use a plain write instead of clear_bit(),
6080 * and we dont need an smp_mb() memory barrier.
6085 skb_queue_splice_tail_init(&sd
->input_pkt_queue
,
6086 &sd
->process_queue
);
6096 * __napi_schedule - schedule for receive
6097 * @n: entry to schedule
6099 * The entry's receive function will be scheduled to run.
6100 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
6102 void __napi_schedule(struct napi_struct
*n
)
6104 unsigned long flags
;
6106 local_irq_save(flags
);
6107 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
6108 local_irq_restore(flags
);
6110 EXPORT_SYMBOL(__napi_schedule
);
6113 * napi_schedule_prep - check if napi can be scheduled
6116 * Test if NAPI routine is already running, and if not mark
6117 * it as running. This is used as a condition variable
6118 * insure only one NAPI poll instance runs. We also make
6119 * sure there is no pending NAPI disable.
6121 bool napi_schedule_prep(struct napi_struct
*n
)
6123 unsigned long val
, new;
6126 val
= READ_ONCE(n
->state
);
6127 if (unlikely(val
& NAPIF_STATE_DISABLE
))
6129 new = val
| NAPIF_STATE_SCHED
;
6131 /* Sets STATE_MISSED bit if STATE_SCHED was already set
6132 * This was suggested by Alexander Duyck, as compiler
6133 * emits better code than :
6134 * if (val & NAPIF_STATE_SCHED)
6135 * new |= NAPIF_STATE_MISSED;
6137 new |= (val
& NAPIF_STATE_SCHED
) / NAPIF_STATE_SCHED
*
6139 } while (cmpxchg(&n
->state
, val
, new) != val
);
6141 return !(val
& NAPIF_STATE_SCHED
);
6143 EXPORT_SYMBOL(napi_schedule_prep
);
6146 * __napi_schedule_irqoff - schedule for receive
6147 * @n: entry to schedule
6149 * Variant of __napi_schedule() assuming hard irqs are masked
6151 void __napi_schedule_irqoff(struct napi_struct
*n
)
6153 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
6155 EXPORT_SYMBOL(__napi_schedule_irqoff
);
6157 bool napi_complete_done(struct napi_struct
*n
, int work_done
)
6159 unsigned long flags
, val
, new;
6162 * 1) Don't let napi dequeue from the cpu poll list
6163 * just in case its running on a different cpu.
6164 * 2) If we are busy polling, do nothing here, we have
6165 * the guarantee we will be called later.
6167 if (unlikely(n
->state
& (NAPIF_STATE_NPSVC
|
6168 NAPIF_STATE_IN_BUSY_POLL
)))
6173 if (n
->gro_bitmask
) {
6174 unsigned long timeout
= 0;
6177 timeout
= n
->dev
->gro_flush_timeout
;
6179 /* When the NAPI instance uses a timeout and keeps postponing
6180 * it, we need to bound somehow the time packets are kept in
6183 napi_gro_flush(n
, !!timeout
);
6185 hrtimer_start(&n
->timer
, ns_to_ktime(timeout
),
6186 HRTIMER_MODE_REL_PINNED
);
6188 if (unlikely(!list_empty(&n
->poll_list
))) {
6189 /* If n->poll_list is not empty, we need to mask irqs */
6190 local_irq_save(flags
);
6191 list_del_init(&n
->poll_list
);
6192 local_irq_restore(flags
);
6196 val
= READ_ONCE(n
->state
);
6198 WARN_ON_ONCE(!(val
& NAPIF_STATE_SCHED
));
6200 new = val
& ~(NAPIF_STATE_MISSED
| NAPIF_STATE_SCHED
);
6202 /* If STATE_MISSED was set, leave STATE_SCHED set,
6203 * because we will call napi->poll() one more time.
6204 * This C code was suggested by Alexander Duyck to help gcc.
6206 new |= (val
& NAPIF_STATE_MISSED
) / NAPIF_STATE_MISSED
*
6208 } while (cmpxchg(&n
->state
, val
, new) != val
);
6210 if (unlikely(val
& NAPIF_STATE_MISSED
)) {
6217 EXPORT_SYMBOL(napi_complete_done
);
6219 /* must be called under rcu_read_lock(), as we dont take a reference */
6220 static struct napi_struct
*napi_by_id(unsigned int napi_id
)
6222 unsigned int hash
= napi_id
% HASH_SIZE(napi_hash
);
6223 struct napi_struct
*napi
;
6225 hlist_for_each_entry_rcu(napi
, &napi_hash
[hash
], napi_hash_node
)
6226 if (napi
->napi_id
== napi_id
)
6232 #if defined(CONFIG_NET_RX_BUSY_POLL)
6234 #define BUSY_POLL_BUDGET 8
6236 static void busy_poll_stop(struct napi_struct
*napi
, void *have_poll_lock
)
6240 /* Busy polling means there is a high chance device driver hard irq
6241 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6242 * set in napi_schedule_prep().
6243 * Since we are about to call napi->poll() once more, we can safely
6244 * clear NAPI_STATE_MISSED.
6246 * Note: x86 could use a single "lock and ..." instruction
6247 * to perform these two clear_bit()
6249 clear_bit(NAPI_STATE_MISSED
, &napi
->state
);
6250 clear_bit(NAPI_STATE_IN_BUSY_POLL
, &napi
->state
);
6254 /* All we really want here is to re-enable device interrupts.
6255 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6257 rc
= napi
->poll(napi
, BUSY_POLL_BUDGET
);
6258 /* We can't gro_normal_list() here, because napi->poll() might have
6259 * rearmed the napi (napi_complete_done()) in which case it could
6260 * already be running on another CPU.
6262 trace_napi_poll(napi
, rc
, BUSY_POLL_BUDGET
);
6263 netpoll_poll_unlock(have_poll_lock
);
6264 if (rc
== BUSY_POLL_BUDGET
) {
6265 /* As the whole budget was spent, we still own the napi so can
6266 * safely handle the rx_list.
6268 gro_normal_list(napi
);
6269 __napi_schedule(napi
);
6274 void napi_busy_loop(unsigned int napi_id
,
6275 bool (*loop_end
)(void *, unsigned long),
6278 unsigned long start_time
= loop_end
? busy_loop_current_time() : 0;
6279 int (*napi_poll
)(struct napi_struct
*napi
, int budget
);
6280 void *have_poll_lock
= NULL
;
6281 struct napi_struct
*napi
;
6288 napi
= napi_by_id(napi_id
);
6298 unsigned long val
= READ_ONCE(napi
->state
);
6300 /* If multiple threads are competing for this napi,
6301 * we avoid dirtying napi->state as much as we can.
6303 if (val
& (NAPIF_STATE_DISABLE
| NAPIF_STATE_SCHED
|
6304 NAPIF_STATE_IN_BUSY_POLL
))
6306 if (cmpxchg(&napi
->state
, val
,
6307 val
| NAPIF_STATE_IN_BUSY_POLL
|
6308 NAPIF_STATE_SCHED
) != val
)
6310 have_poll_lock
= netpoll_poll_lock(napi
);
6311 napi_poll
= napi
->poll
;
6313 work
= napi_poll(napi
, BUSY_POLL_BUDGET
);
6314 trace_napi_poll(napi
, work
, BUSY_POLL_BUDGET
);
6315 gro_normal_list(napi
);
6318 __NET_ADD_STATS(dev_net(napi
->dev
),
6319 LINUX_MIB_BUSYPOLLRXPACKETS
, work
);
6322 if (!loop_end
|| loop_end(loop_end_arg
, start_time
))
6325 if (unlikely(need_resched())) {
6327 busy_poll_stop(napi
, have_poll_lock
);
6331 if (loop_end(loop_end_arg
, start_time
))
6338 busy_poll_stop(napi
, have_poll_lock
);
6343 EXPORT_SYMBOL(napi_busy_loop
);
6345 #endif /* CONFIG_NET_RX_BUSY_POLL */
6347 static void napi_hash_add(struct napi_struct
*napi
)
6349 if (test_bit(NAPI_STATE_NO_BUSY_POLL
, &napi
->state
) ||
6350 test_and_set_bit(NAPI_STATE_HASHED
, &napi
->state
))
6353 spin_lock(&napi_hash_lock
);
6355 /* 0..NR_CPUS range is reserved for sender_cpu use */
6357 if (unlikely(++napi_gen_id
< MIN_NAPI_ID
))
6358 napi_gen_id
= MIN_NAPI_ID
;
6359 } while (napi_by_id(napi_gen_id
));
6360 napi
->napi_id
= napi_gen_id
;
6362 hlist_add_head_rcu(&napi
->napi_hash_node
,
6363 &napi_hash
[napi
->napi_id
% HASH_SIZE(napi_hash
)]);
6365 spin_unlock(&napi_hash_lock
);
6368 /* Warning : caller is responsible to make sure rcu grace period
6369 * is respected before freeing memory containing @napi
6371 bool napi_hash_del(struct napi_struct
*napi
)
6373 bool rcu_sync_needed
= false;
6375 spin_lock(&napi_hash_lock
);
6377 if (test_and_clear_bit(NAPI_STATE_HASHED
, &napi
->state
)) {
6378 rcu_sync_needed
= true;
6379 hlist_del_rcu(&napi
->napi_hash_node
);
6381 spin_unlock(&napi_hash_lock
);
6382 return rcu_sync_needed
;
6384 EXPORT_SYMBOL_GPL(napi_hash_del
);
6386 static enum hrtimer_restart
napi_watchdog(struct hrtimer
*timer
)
6388 struct napi_struct
*napi
;
6390 napi
= container_of(timer
, struct napi_struct
, timer
);
6392 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6393 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6395 if (napi
->gro_bitmask
&& !napi_disable_pending(napi
) &&
6396 !test_and_set_bit(NAPI_STATE_SCHED
, &napi
->state
))
6397 __napi_schedule_irqoff(napi
);
6399 return HRTIMER_NORESTART
;
6402 static void init_gro_hash(struct napi_struct
*napi
)
6406 for (i
= 0; i
< GRO_HASH_BUCKETS
; i
++) {
6407 INIT_LIST_HEAD(&napi
->gro_hash
[i
].list
);
6408 napi
->gro_hash
[i
].count
= 0;
6410 napi
->gro_bitmask
= 0;
6413 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
6414 int (*poll
)(struct napi_struct
*, int), int weight
)
6416 INIT_LIST_HEAD(&napi
->poll_list
);
6417 hrtimer_init(&napi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
6418 napi
->timer
.function
= napi_watchdog
;
6419 init_gro_hash(napi
);
6421 INIT_LIST_HEAD(&napi
->rx_list
);
6424 if (weight
> NAPI_POLL_WEIGHT
)
6425 netdev_err_once(dev
, "%s() called with weight %d\n", __func__
,
6427 napi
->weight
= weight
;
6428 list_add(&napi
->dev_list
, &dev
->napi_list
);
6430 #ifdef CONFIG_NETPOLL
6431 napi
->poll_owner
= -1;
6433 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
6434 napi_hash_add(napi
);
6436 EXPORT_SYMBOL(netif_napi_add
);
6438 void napi_disable(struct napi_struct
*n
)
6441 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
6443 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
6445 while (test_and_set_bit(NAPI_STATE_NPSVC
, &n
->state
))
6448 hrtimer_cancel(&n
->timer
);
6450 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
6452 EXPORT_SYMBOL(napi_disable
);
6454 static void flush_gro_hash(struct napi_struct
*napi
)
6458 for (i
= 0; i
< GRO_HASH_BUCKETS
; i
++) {
6459 struct sk_buff
*skb
, *n
;
6461 list_for_each_entry_safe(skb
, n
, &napi
->gro_hash
[i
].list
, list
)
6463 napi
->gro_hash
[i
].count
= 0;
6467 /* Must be called in process context */
6468 void netif_napi_del(struct napi_struct
*napi
)
6471 if (napi_hash_del(napi
))
6473 list_del_init(&napi
->dev_list
);
6474 napi_free_frags(napi
);
6476 flush_gro_hash(napi
);
6477 napi
->gro_bitmask
= 0;
6479 EXPORT_SYMBOL(netif_napi_del
);
6481 static int napi_poll(struct napi_struct
*n
, struct list_head
*repoll
)
6486 list_del_init(&n
->poll_list
);
6488 have
= netpoll_poll_lock(n
);
6492 /* This NAPI_STATE_SCHED test is for avoiding a race
6493 * with netpoll's poll_napi(). Only the entity which
6494 * obtains the lock and sees NAPI_STATE_SCHED set will
6495 * actually make the ->poll() call. Therefore we avoid
6496 * accidentally calling ->poll() when NAPI is not scheduled.
6499 if (test_bit(NAPI_STATE_SCHED
, &n
->state
)) {
6500 work
= n
->poll(n
, weight
);
6501 trace_napi_poll(n
, work
, weight
);
6504 WARN_ON_ONCE(work
> weight
);
6506 if (likely(work
< weight
))
6509 /* Drivers must not modify the NAPI state if they
6510 * consume the entire weight. In such cases this code
6511 * still "owns" the NAPI instance and therefore can
6512 * move the instance around on the list at-will.
6514 if (unlikely(napi_disable_pending(n
))) {
6521 if (n
->gro_bitmask
) {
6522 /* flush too old packets
6523 * If HZ < 1000, flush all packets.
6525 napi_gro_flush(n
, HZ
>= 1000);
6528 /* Some drivers may have called napi_schedule
6529 * prior to exhausting their budget.
6531 if (unlikely(!list_empty(&n
->poll_list
))) {
6532 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6533 n
->dev
? n
->dev
->name
: "backlog");
6537 list_add_tail(&n
->poll_list
, repoll
);
6540 netpoll_poll_unlock(have
);
6545 static __latent_entropy
void net_rx_action(struct softirq_action
*h
)
6547 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
6548 unsigned long time_limit
= jiffies
+
6549 usecs_to_jiffies(netdev_budget_usecs
);
6550 int budget
= netdev_budget
;
6554 local_irq_disable();
6555 list_splice_init(&sd
->poll_list
, &list
);
6559 struct napi_struct
*n
;
6561 if (list_empty(&list
)) {
6562 if (!sd_has_rps_ipi_waiting(sd
) && list_empty(&repoll
))
6567 n
= list_first_entry(&list
, struct napi_struct
, poll_list
);
6568 budget
-= napi_poll(n
, &repoll
);
6570 /* If softirq window is exhausted then punt.
6571 * Allow this to run for 2 jiffies since which will allow
6572 * an average latency of 1.5/HZ.
6574 if (unlikely(budget
<= 0 ||
6575 time_after_eq(jiffies
, time_limit
))) {
6581 local_irq_disable();
6583 list_splice_tail_init(&sd
->poll_list
, &list
);
6584 list_splice_tail(&repoll
, &list
);
6585 list_splice(&list
, &sd
->poll_list
);
6586 if (!list_empty(&sd
->poll_list
))
6587 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
6589 net_rps_action_and_irq_enable(sd
);
6591 __kfree_skb_flush();
6594 struct netdev_adjacent
{
6595 struct net_device
*dev
;
6597 /* upper master flag, there can only be one master device per list */
6600 /* counter for the number of times this device was added to us */
6603 /* private field for the users */
6606 struct list_head list
;
6607 struct rcu_head rcu
;
6610 static struct netdev_adjacent
*__netdev_find_adj(struct net_device
*adj_dev
,
6611 struct list_head
*adj_list
)
6613 struct netdev_adjacent
*adj
;
6615 list_for_each_entry(adj
, adj_list
, list
) {
6616 if (adj
->dev
== adj_dev
)
6622 static int __netdev_has_upper_dev(struct net_device
*upper_dev
, void *data
)
6624 struct net_device
*dev
= data
;
6626 return upper_dev
== dev
;
6630 * netdev_has_upper_dev - Check if device is linked to an upper device
6632 * @upper_dev: upper device to check
6634 * Find out if a device is linked to specified upper device and return true
6635 * in case it is. Note that this checks only immediate upper device,
6636 * not through a complete stack of devices. The caller must hold the RTNL lock.
6638 bool netdev_has_upper_dev(struct net_device
*dev
,
6639 struct net_device
*upper_dev
)
6643 return netdev_walk_all_upper_dev_rcu(dev
, __netdev_has_upper_dev
,
6646 EXPORT_SYMBOL(netdev_has_upper_dev
);
6649 * netdev_has_upper_dev_all - Check if device is linked to an upper device
6651 * @upper_dev: upper device to check
6653 * Find out if a device is linked to specified upper device and return true
6654 * in case it is. Note that this checks the entire upper device chain.
6655 * The caller must hold rcu lock.
6658 bool netdev_has_upper_dev_all_rcu(struct net_device
*dev
,
6659 struct net_device
*upper_dev
)
6661 return !!netdev_walk_all_upper_dev_rcu(dev
, __netdev_has_upper_dev
,
6664 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu
);
6667 * netdev_has_any_upper_dev - Check if device is linked to some device
6670 * Find out if a device is linked to an upper device and return true in case
6671 * it is. The caller must hold the RTNL lock.
6673 bool netdev_has_any_upper_dev(struct net_device
*dev
)
6677 return !list_empty(&dev
->adj_list
.upper
);
6679 EXPORT_SYMBOL(netdev_has_any_upper_dev
);
6682 * netdev_master_upper_dev_get - Get master upper device
6685 * Find a master upper device and return pointer to it or NULL in case
6686 * it's not there. The caller must hold the RTNL lock.
6688 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
)
6690 struct netdev_adjacent
*upper
;
6694 if (list_empty(&dev
->adj_list
.upper
))
6697 upper
= list_first_entry(&dev
->adj_list
.upper
,
6698 struct netdev_adjacent
, list
);
6699 if (likely(upper
->master
))
6703 EXPORT_SYMBOL(netdev_master_upper_dev_get
);
6706 * netdev_has_any_lower_dev - Check if device is linked to some device
6709 * Find out if a device is linked to a lower device and return true in case
6710 * it is. The caller must hold the RTNL lock.
6712 static bool netdev_has_any_lower_dev(struct net_device
*dev
)
6716 return !list_empty(&dev
->adj_list
.lower
);
6719 void *netdev_adjacent_get_private(struct list_head
*adj_list
)
6721 struct netdev_adjacent
*adj
;
6723 adj
= list_entry(adj_list
, struct netdev_adjacent
, list
);
6725 return adj
->private;
6727 EXPORT_SYMBOL(netdev_adjacent_get_private
);
6730 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6732 * @iter: list_head ** of the current position
6734 * Gets the next device from the dev's upper list, starting from iter
6735 * position. The caller must hold RCU read lock.
6737 struct net_device
*netdev_upper_get_next_dev_rcu(struct net_device
*dev
,
6738 struct list_head
**iter
)
6740 struct netdev_adjacent
*upper
;
6742 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6744 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6746 if (&upper
->list
== &dev
->adj_list
.upper
)
6749 *iter
= &upper
->list
;
6753 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu
);
6755 static struct net_device
*netdev_next_upper_dev_rcu(struct net_device
*dev
,
6756 struct list_head
**iter
)
6758 struct netdev_adjacent
*upper
;
6760 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6762 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6764 if (&upper
->list
== &dev
->adj_list
.upper
)
6767 *iter
= &upper
->list
;
6772 int netdev_walk_all_upper_dev_rcu(struct net_device
*dev
,
6773 int (*fn
)(struct net_device
*dev
,
6777 struct net_device
*udev
;
6778 struct list_head
*iter
;
6781 for (iter
= &dev
->adj_list
.upper
,
6782 udev
= netdev_next_upper_dev_rcu(dev
, &iter
);
6784 udev
= netdev_next_upper_dev_rcu(dev
, &iter
)) {
6785 /* first is the upper device itself */
6786 ret
= fn(udev
, data
);
6790 /* then look at all of its upper devices */
6791 ret
= netdev_walk_all_upper_dev_rcu(udev
, fn
, data
);
6798 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu
);
6801 * netdev_lower_get_next_private - Get the next ->private from the
6802 * lower neighbour list
6804 * @iter: list_head ** of the current position
6806 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6807 * list, starting from iter position. The caller must hold either hold the
6808 * RTNL lock or its own locking that guarantees that the neighbour lower
6809 * list will remain unchanged.
6811 void *netdev_lower_get_next_private(struct net_device
*dev
,
6812 struct list_head
**iter
)
6814 struct netdev_adjacent
*lower
;
6816 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
6818 if (&lower
->list
== &dev
->adj_list
.lower
)
6821 *iter
= lower
->list
.next
;
6823 return lower
->private;
6825 EXPORT_SYMBOL(netdev_lower_get_next_private
);
6828 * netdev_lower_get_next_private_rcu - Get the next ->private from the
6829 * lower neighbour list, RCU
6832 * @iter: list_head ** of the current position
6834 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6835 * list, starting from iter position. The caller must hold RCU read lock.
6837 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
6838 struct list_head
**iter
)
6840 struct netdev_adjacent
*lower
;
6842 WARN_ON_ONCE(!rcu_read_lock_held());
6844 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6846 if (&lower
->list
== &dev
->adj_list
.lower
)
6849 *iter
= &lower
->list
;
6851 return lower
->private;
6853 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu
);
6856 * netdev_lower_get_next - Get the next device from the lower neighbour
6859 * @iter: list_head ** of the current position
6861 * Gets the next netdev_adjacent from the dev's lower neighbour
6862 * list, starting from iter position. The caller must hold RTNL lock or
6863 * its own locking that guarantees that the neighbour lower
6864 * list will remain unchanged.
6866 void *netdev_lower_get_next(struct net_device
*dev
, struct list_head
**iter
)
6868 struct netdev_adjacent
*lower
;
6870 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
6872 if (&lower
->list
== &dev
->adj_list
.lower
)
6875 *iter
= lower
->list
.next
;
6879 EXPORT_SYMBOL(netdev_lower_get_next
);
6881 static struct net_device
*netdev_next_lower_dev(struct net_device
*dev
,
6882 struct list_head
**iter
)
6884 struct netdev_adjacent
*lower
;
6886 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
6888 if (&lower
->list
== &dev
->adj_list
.lower
)
6891 *iter
= &lower
->list
;
6896 int netdev_walk_all_lower_dev(struct net_device
*dev
,
6897 int (*fn
)(struct net_device
*dev
,
6901 struct net_device
*ldev
;
6902 struct list_head
*iter
;
6905 for (iter
= &dev
->adj_list
.lower
,
6906 ldev
= netdev_next_lower_dev(dev
, &iter
);
6908 ldev
= netdev_next_lower_dev(dev
, &iter
)) {
6909 /* first is the lower device itself */
6910 ret
= fn(ldev
, data
);
6914 /* then look at all of its lower devices */
6915 ret
= netdev_walk_all_lower_dev(ldev
, fn
, data
);
6922 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev
);
6924 static struct net_device
*netdev_next_lower_dev_rcu(struct net_device
*dev
,
6925 struct list_head
**iter
)
6927 struct netdev_adjacent
*lower
;
6929 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6930 if (&lower
->list
== &dev
->adj_list
.lower
)
6933 *iter
= &lower
->list
;
6938 int netdev_walk_all_lower_dev_rcu(struct net_device
*dev
,
6939 int (*fn
)(struct net_device
*dev
,
6943 struct net_device
*ldev
;
6944 struct list_head
*iter
;
6947 for (iter
= &dev
->adj_list
.lower
,
6948 ldev
= netdev_next_lower_dev_rcu(dev
, &iter
);
6950 ldev
= netdev_next_lower_dev_rcu(dev
, &iter
)) {
6951 /* first is the lower device itself */
6952 ret
= fn(ldev
, data
);
6956 /* then look at all of its lower devices */
6957 ret
= netdev_walk_all_lower_dev_rcu(ldev
, fn
, data
);
6964 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu
);
6967 * netdev_lower_get_first_private_rcu - Get the first ->private from the
6968 * lower neighbour list, RCU
6972 * Gets the first netdev_adjacent->private from the dev's lower neighbour
6973 * list. The caller must hold RCU read lock.
6975 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
)
6977 struct netdev_adjacent
*lower
;
6979 lower
= list_first_or_null_rcu(&dev
->adj_list
.lower
,
6980 struct netdev_adjacent
, list
);
6982 return lower
->private;
6985 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu
);
6988 * netdev_master_upper_dev_get_rcu - Get master upper device
6991 * Find a master upper device and return pointer to it or NULL in case
6992 * it's not there. The caller must hold the RCU read lock.
6994 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
)
6996 struct netdev_adjacent
*upper
;
6998 upper
= list_first_or_null_rcu(&dev
->adj_list
.upper
,
6999 struct netdev_adjacent
, list
);
7000 if (upper
&& likely(upper
->master
))
7004 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu
);
7006 static int netdev_adjacent_sysfs_add(struct net_device
*dev
,
7007 struct net_device
*adj_dev
,
7008 struct list_head
*dev_list
)
7010 char linkname
[IFNAMSIZ
+7];
7012 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
7013 "upper_%s" : "lower_%s", adj_dev
->name
);
7014 return sysfs_create_link(&(dev
->dev
.kobj
), &(adj_dev
->dev
.kobj
),
7017 static void netdev_adjacent_sysfs_del(struct net_device
*dev
,
7019 struct list_head
*dev_list
)
7021 char linkname
[IFNAMSIZ
+7];
7023 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
7024 "upper_%s" : "lower_%s", name
);
7025 sysfs_remove_link(&(dev
->dev
.kobj
), linkname
);
7028 static inline bool netdev_adjacent_is_neigh_list(struct net_device
*dev
,
7029 struct net_device
*adj_dev
,
7030 struct list_head
*dev_list
)
7032 return (dev_list
== &dev
->adj_list
.upper
||
7033 dev_list
== &dev
->adj_list
.lower
) &&
7034 net_eq(dev_net(dev
), dev_net(adj_dev
));
7037 static int __netdev_adjacent_dev_insert(struct net_device
*dev
,
7038 struct net_device
*adj_dev
,
7039 struct list_head
*dev_list
,
7040 void *private, bool master
)
7042 struct netdev_adjacent
*adj
;
7045 adj
= __netdev_find_adj(adj_dev
, dev_list
);
7049 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7050 dev
->name
, adj_dev
->name
, adj
->ref_nr
);
7055 adj
= kmalloc(sizeof(*adj
), GFP_KERNEL
);
7060 adj
->master
= master
;
7062 adj
->private = private;
7065 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7066 dev
->name
, adj_dev
->name
, adj
->ref_nr
, adj_dev
->name
);
7068 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
)) {
7069 ret
= netdev_adjacent_sysfs_add(dev
, adj_dev
, dev_list
);
7074 /* Ensure that master link is always the first item in list. */
7076 ret
= sysfs_create_link(&(dev
->dev
.kobj
),
7077 &(adj_dev
->dev
.kobj
), "master");
7079 goto remove_symlinks
;
7081 list_add_rcu(&adj
->list
, dev_list
);
7083 list_add_tail_rcu(&adj
->list
, dev_list
);
7089 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
7090 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
7098 static void __netdev_adjacent_dev_remove(struct net_device
*dev
,
7099 struct net_device
*adj_dev
,
7101 struct list_head
*dev_list
)
7103 struct netdev_adjacent
*adj
;
7105 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7106 dev
->name
, adj_dev
->name
, ref_nr
);
7108 adj
= __netdev_find_adj(adj_dev
, dev_list
);
7111 pr_err("Adjacency does not exist for device %s from %s\n",
7112 dev
->name
, adj_dev
->name
);
7117 if (adj
->ref_nr
> ref_nr
) {
7118 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7119 dev
->name
, adj_dev
->name
, ref_nr
,
7120 adj
->ref_nr
- ref_nr
);
7121 adj
->ref_nr
-= ref_nr
;
7126 sysfs_remove_link(&(dev
->dev
.kobj
), "master");
7128 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
7129 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
7131 list_del_rcu(&adj
->list
);
7132 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7133 adj_dev
->name
, dev
->name
, adj_dev
->name
);
7135 kfree_rcu(adj
, rcu
);
7138 static int __netdev_adjacent_dev_link_lists(struct net_device
*dev
,
7139 struct net_device
*upper_dev
,
7140 struct list_head
*up_list
,
7141 struct list_head
*down_list
,
7142 void *private, bool master
)
7146 ret
= __netdev_adjacent_dev_insert(dev
, upper_dev
, up_list
,
7151 ret
= __netdev_adjacent_dev_insert(upper_dev
, dev
, down_list
,
7154 __netdev_adjacent_dev_remove(dev
, upper_dev
, 1, up_list
);
7161 static void __netdev_adjacent_dev_unlink_lists(struct net_device
*dev
,
7162 struct net_device
*upper_dev
,
7164 struct list_head
*up_list
,
7165 struct list_head
*down_list
)
7167 __netdev_adjacent_dev_remove(dev
, upper_dev
, ref_nr
, up_list
);
7168 __netdev_adjacent_dev_remove(upper_dev
, dev
, ref_nr
, down_list
);
7171 static int __netdev_adjacent_dev_link_neighbour(struct net_device
*dev
,
7172 struct net_device
*upper_dev
,
7173 void *private, bool master
)
7175 return __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
7176 &dev
->adj_list
.upper
,
7177 &upper_dev
->adj_list
.lower
,
7181 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device
*dev
,
7182 struct net_device
*upper_dev
)
7184 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
, 1,
7185 &dev
->adj_list
.upper
,
7186 &upper_dev
->adj_list
.lower
);
7189 static int __netdev_upper_dev_link(struct net_device
*dev
,
7190 struct net_device
*upper_dev
, bool master
,
7191 void *upper_priv
, void *upper_info
,
7192 struct netlink_ext_ack
*extack
)
7194 struct netdev_notifier_changeupper_info changeupper_info
= {
7199 .upper_dev
= upper_dev
,
7202 .upper_info
= upper_info
,
7204 struct net_device
*master_dev
;
7209 if (dev
== upper_dev
)
7212 /* To prevent loops, check if dev is not upper device to upper_dev. */
7213 if (netdev_has_upper_dev(upper_dev
, dev
))
7217 if (netdev_has_upper_dev(dev
, upper_dev
))
7220 master_dev
= netdev_master_upper_dev_get(dev
);
7222 return master_dev
== upper_dev
? -EEXIST
: -EBUSY
;
7225 ret
= call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
7226 &changeupper_info
.info
);
7227 ret
= notifier_to_errno(ret
);
7231 ret
= __netdev_adjacent_dev_link_neighbour(dev
, upper_dev
, upper_priv
,
7236 ret
= call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
7237 &changeupper_info
.info
);
7238 ret
= notifier_to_errno(ret
);
7245 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
7251 * netdev_upper_dev_link - Add a link to the upper device
7253 * @upper_dev: new upper device
7254 * @extack: netlink extended ack
7256 * Adds a link to device which is upper to this one. The caller must hold
7257 * the RTNL lock. On a failure a negative errno code is returned.
7258 * On success the reference counts are adjusted and the function
7261 int netdev_upper_dev_link(struct net_device
*dev
,
7262 struct net_device
*upper_dev
,
7263 struct netlink_ext_ack
*extack
)
7265 return __netdev_upper_dev_link(dev
, upper_dev
, false,
7266 NULL
, NULL
, extack
);
7268 EXPORT_SYMBOL(netdev_upper_dev_link
);
7271 * netdev_master_upper_dev_link - Add a master link to the upper device
7273 * @upper_dev: new upper device
7274 * @upper_priv: upper device private
7275 * @upper_info: upper info to be passed down via notifier
7276 * @extack: netlink extended ack
7278 * Adds a link to device which is upper to this one. In this case, only
7279 * one master upper device can be linked, although other non-master devices
7280 * might be linked as well. The caller must hold the RTNL lock.
7281 * On a failure a negative errno code is returned. On success the reference
7282 * counts are adjusted and the function returns zero.
7284 int netdev_master_upper_dev_link(struct net_device
*dev
,
7285 struct net_device
*upper_dev
,
7286 void *upper_priv
, void *upper_info
,
7287 struct netlink_ext_ack
*extack
)
7289 return __netdev_upper_dev_link(dev
, upper_dev
, true,
7290 upper_priv
, upper_info
, extack
);
7292 EXPORT_SYMBOL(netdev_master_upper_dev_link
);
7295 * netdev_upper_dev_unlink - Removes a link to upper device
7297 * @upper_dev: new upper device
7299 * Removes a link to device which is upper to this one. The caller must hold
7302 void netdev_upper_dev_unlink(struct net_device
*dev
,
7303 struct net_device
*upper_dev
)
7305 struct netdev_notifier_changeupper_info changeupper_info
= {
7309 .upper_dev
= upper_dev
,
7315 changeupper_info
.master
= netdev_master_upper_dev_get(dev
) == upper_dev
;
7317 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
7318 &changeupper_info
.info
);
7320 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
7322 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
7323 &changeupper_info
.info
);
7325 EXPORT_SYMBOL(netdev_upper_dev_unlink
);
7328 * netdev_bonding_info_change - Dispatch event about slave change
7330 * @bonding_info: info to dispatch
7332 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7333 * The caller must hold the RTNL lock.
7335 void netdev_bonding_info_change(struct net_device
*dev
,
7336 struct netdev_bonding_info
*bonding_info
)
7338 struct netdev_notifier_bonding_info info
= {
7342 memcpy(&info
.bonding_info
, bonding_info
,
7343 sizeof(struct netdev_bonding_info
));
7344 call_netdevice_notifiers_info(NETDEV_BONDING_INFO
,
7347 EXPORT_SYMBOL(netdev_bonding_info_change
);
7349 static void netdev_adjacent_add_links(struct net_device
*dev
)
7351 struct netdev_adjacent
*iter
;
7353 struct net
*net
= dev_net(dev
);
7355 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
7356 if (!net_eq(net
, dev_net(iter
->dev
)))
7358 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
7359 &iter
->dev
->adj_list
.lower
);
7360 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
7361 &dev
->adj_list
.upper
);
7364 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
7365 if (!net_eq(net
, dev_net(iter
->dev
)))
7367 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
7368 &iter
->dev
->adj_list
.upper
);
7369 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
7370 &dev
->adj_list
.lower
);
7374 static void netdev_adjacent_del_links(struct net_device
*dev
)
7376 struct netdev_adjacent
*iter
;
7378 struct net
*net
= dev_net(dev
);
7380 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
7381 if (!net_eq(net
, dev_net(iter
->dev
)))
7383 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
7384 &iter
->dev
->adj_list
.lower
);
7385 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
7386 &dev
->adj_list
.upper
);
7389 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
7390 if (!net_eq(net
, dev_net(iter
->dev
)))
7392 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
7393 &iter
->dev
->adj_list
.upper
);
7394 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
7395 &dev
->adj_list
.lower
);
7399 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
)
7401 struct netdev_adjacent
*iter
;
7403 struct net
*net
= dev_net(dev
);
7405 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
7406 if (!net_eq(net
, dev_net(iter
->dev
)))
7408 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
7409 &iter
->dev
->adj_list
.lower
);
7410 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
7411 &iter
->dev
->adj_list
.lower
);
7414 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
7415 if (!net_eq(net
, dev_net(iter
->dev
)))
7417 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
7418 &iter
->dev
->adj_list
.upper
);
7419 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
7420 &iter
->dev
->adj_list
.upper
);
7424 void *netdev_lower_dev_get_private(struct net_device
*dev
,
7425 struct net_device
*lower_dev
)
7427 struct netdev_adjacent
*lower
;
7431 lower
= __netdev_find_adj(lower_dev
, &dev
->adj_list
.lower
);
7435 return lower
->private;
7437 EXPORT_SYMBOL(netdev_lower_dev_get_private
);
7440 int dev_get_nest_level(struct net_device
*dev
)
7442 struct net_device
*lower
= NULL
;
7443 struct list_head
*iter
;
7449 netdev_for_each_lower_dev(dev
, lower
, iter
) {
7450 nest
= dev_get_nest_level(lower
);
7451 if (max_nest
< nest
)
7455 return max_nest
+ 1;
7457 EXPORT_SYMBOL(dev_get_nest_level
);
7460 * netdev_lower_change - Dispatch event about lower device state change
7461 * @lower_dev: device
7462 * @lower_state_info: state to dispatch
7464 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
7465 * The caller must hold the RTNL lock.
7467 void netdev_lower_state_changed(struct net_device
*lower_dev
,
7468 void *lower_state_info
)
7470 struct netdev_notifier_changelowerstate_info changelowerstate_info
= {
7471 .info
.dev
= lower_dev
,
7475 changelowerstate_info
.lower_state_info
= lower_state_info
;
7476 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE
,
7477 &changelowerstate_info
.info
);
7479 EXPORT_SYMBOL(netdev_lower_state_changed
);
7481 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
7483 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7485 if (ops
->ndo_change_rx_flags
)
7486 ops
->ndo_change_rx_flags(dev
, flags
);
7489 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
, bool notify
)
7491 unsigned int old_flags
= dev
->flags
;
7497 dev
->flags
|= IFF_PROMISC
;
7498 dev
->promiscuity
+= inc
;
7499 if (dev
->promiscuity
== 0) {
7502 * If inc causes overflow, untouch promisc and return error.
7505 dev
->flags
&= ~IFF_PROMISC
;
7507 dev
->promiscuity
-= inc
;
7508 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
7513 if (dev
->flags
!= old_flags
) {
7514 pr_info("device %s %s promiscuous mode\n",
7516 dev
->flags
& IFF_PROMISC
? "entered" : "left");
7517 if (audit_enabled
) {
7518 current_uid_gid(&uid
, &gid
);
7519 audit_log(audit_context(), GFP_ATOMIC
,
7520 AUDIT_ANOM_PROMISCUOUS
,
7521 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
7522 dev
->name
, (dev
->flags
& IFF_PROMISC
),
7523 (old_flags
& IFF_PROMISC
),
7524 from_kuid(&init_user_ns
, audit_get_loginuid(current
)),
7525 from_kuid(&init_user_ns
, uid
),
7526 from_kgid(&init_user_ns
, gid
),
7527 audit_get_sessionid(current
));
7530 dev_change_rx_flags(dev
, IFF_PROMISC
);
7533 __dev_notify_flags(dev
, old_flags
, IFF_PROMISC
);
7538 * dev_set_promiscuity - update promiscuity count on a device
7542 * Add or remove promiscuity from a device. While the count in the device
7543 * remains above zero the interface remains promiscuous. Once it hits zero
7544 * the device reverts back to normal filtering operation. A negative inc
7545 * value is used to drop promiscuity on the device.
7546 * Return 0 if successful or a negative errno code on error.
7548 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
7550 unsigned int old_flags
= dev
->flags
;
7553 err
= __dev_set_promiscuity(dev
, inc
, true);
7556 if (dev
->flags
!= old_flags
)
7557 dev_set_rx_mode(dev
);
7560 EXPORT_SYMBOL(dev_set_promiscuity
);
7562 static int __dev_set_allmulti(struct net_device
*dev
, int inc
, bool notify
)
7564 unsigned int old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
7568 dev
->flags
|= IFF_ALLMULTI
;
7569 dev
->allmulti
+= inc
;
7570 if (dev
->allmulti
== 0) {
7573 * If inc causes overflow, untouch allmulti and return error.
7576 dev
->flags
&= ~IFF_ALLMULTI
;
7578 dev
->allmulti
-= inc
;
7579 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
7584 if (dev
->flags
^ old_flags
) {
7585 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
7586 dev_set_rx_mode(dev
);
7588 __dev_notify_flags(dev
, old_flags
,
7589 dev
->gflags
^ old_gflags
);
7595 * dev_set_allmulti - update allmulti count on a device
7599 * Add or remove reception of all multicast frames to a device. While the
7600 * count in the device remains above zero the interface remains listening
7601 * to all interfaces. Once it hits zero the device reverts back to normal
7602 * filtering operation. A negative @inc value is used to drop the counter
7603 * when releasing a resource needing all multicasts.
7604 * Return 0 if successful or a negative errno code on error.
7607 int dev_set_allmulti(struct net_device
*dev
, int inc
)
7609 return __dev_set_allmulti(dev
, inc
, true);
7611 EXPORT_SYMBOL(dev_set_allmulti
);
7614 * Upload unicast and multicast address lists to device and
7615 * configure RX filtering. When the device doesn't support unicast
7616 * filtering it is put in promiscuous mode while unicast addresses
7619 void __dev_set_rx_mode(struct net_device
*dev
)
7621 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7623 /* dev_open will call this function so the list will stay sane. */
7624 if (!(dev
->flags
&IFF_UP
))
7627 if (!netif_device_present(dev
))
7630 if (!(dev
->priv_flags
& IFF_UNICAST_FLT
)) {
7631 /* Unicast addresses changes may only happen under the rtnl,
7632 * therefore calling __dev_set_promiscuity here is safe.
7634 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
7635 __dev_set_promiscuity(dev
, 1, false);
7636 dev
->uc_promisc
= true;
7637 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
7638 __dev_set_promiscuity(dev
, -1, false);
7639 dev
->uc_promisc
= false;
7643 if (ops
->ndo_set_rx_mode
)
7644 ops
->ndo_set_rx_mode(dev
);
7647 void dev_set_rx_mode(struct net_device
*dev
)
7649 netif_addr_lock_bh(dev
);
7650 __dev_set_rx_mode(dev
);
7651 netif_addr_unlock_bh(dev
);
7655 * dev_get_flags - get flags reported to userspace
7658 * Get the combination of flag bits exported through APIs to userspace.
7660 unsigned int dev_get_flags(const struct net_device
*dev
)
7664 flags
= (dev
->flags
& ~(IFF_PROMISC
|
7669 (dev
->gflags
& (IFF_PROMISC
|
7672 if (netif_running(dev
)) {
7673 if (netif_oper_up(dev
))
7674 flags
|= IFF_RUNNING
;
7675 if (netif_carrier_ok(dev
))
7676 flags
|= IFF_LOWER_UP
;
7677 if (netif_dormant(dev
))
7678 flags
|= IFF_DORMANT
;
7683 EXPORT_SYMBOL(dev_get_flags
);
7685 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
,
7686 struct netlink_ext_ack
*extack
)
7688 unsigned int old_flags
= dev
->flags
;
7694 * Set the flags on our device.
7697 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
7698 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
7700 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
7704 * Load in the correct multicast list now the flags have changed.
7707 if ((old_flags
^ flags
) & IFF_MULTICAST
)
7708 dev_change_rx_flags(dev
, IFF_MULTICAST
);
7710 dev_set_rx_mode(dev
);
7713 * Have we downed the interface. We handle IFF_UP ourselves
7714 * according to user attempts to set it, rather than blindly
7719 if ((old_flags
^ flags
) & IFF_UP
) {
7720 if (old_flags
& IFF_UP
)
7723 ret
= __dev_open(dev
, extack
);
7726 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
7727 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
7728 unsigned int old_flags
= dev
->flags
;
7730 dev
->gflags
^= IFF_PROMISC
;
7732 if (__dev_set_promiscuity(dev
, inc
, false) >= 0)
7733 if (dev
->flags
!= old_flags
)
7734 dev_set_rx_mode(dev
);
7737 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
7738 * is important. Some (broken) drivers set IFF_PROMISC, when
7739 * IFF_ALLMULTI is requested not asking us and not reporting.
7741 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
7742 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
7744 dev
->gflags
^= IFF_ALLMULTI
;
7745 __dev_set_allmulti(dev
, inc
, false);
7751 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
,
7752 unsigned int gchanges
)
7754 unsigned int changes
= dev
->flags
^ old_flags
;
7757 rtmsg_ifinfo(RTM_NEWLINK
, dev
, gchanges
, GFP_ATOMIC
);
7759 if (changes
& IFF_UP
) {
7760 if (dev
->flags
& IFF_UP
)
7761 call_netdevice_notifiers(NETDEV_UP
, dev
);
7763 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
7766 if (dev
->flags
& IFF_UP
&&
7767 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
))) {
7768 struct netdev_notifier_change_info change_info
= {
7772 .flags_changed
= changes
,
7775 call_netdevice_notifiers_info(NETDEV_CHANGE
, &change_info
.info
);
7780 * dev_change_flags - change device settings
7782 * @flags: device state flags
7783 * @extack: netlink extended ack
7785 * Change settings on device based state flags. The flags are
7786 * in the userspace exported format.
7788 int dev_change_flags(struct net_device
*dev
, unsigned int flags
,
7789 struct netlink_ext_ack
*extack
)
7792 unsigned int changes
, old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
7794 ret
= __dev_change_flags(dev
, flags
, extack
);
7798 changes
= (old_flags
^ dev
->flags
) | (old_gflags
^ dev
->gflags
);
7799 __dev_notify_flags(dev
, old_flags
, changes
);
7802 EXPORT_SYMBOL(dev_change_flags
);
7804 int __dev_set_mtu(struct net_device
*dev
, int new_mtu
)
7806 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7808 if (ops
->ndo_change_mtu
)
7809 return ops
->ndo_change_mtu(dev
, new_mtu
);
7814 EXPORT_SYMBOL(__dev_set_mtu
);
7817 * dev_set_mtu_ext - Change maximum transfer unit
7819 * @new_mtu: new transfer unit
7820 * @extack: netlink extended ack
7822 * Change the maximum transfer size of the network device.
7824 int dev_set_mtu_ext(struct net_device
*dev
, int new_mtu
,
7825 struct netlink_ext_ack
*extack
)
7829 if (new_mtu
== dev
->mtu
)
7832 /* MTU must be positive, and in range */
7833 if (new_mtu
< 0 || new_mtu
< dev
->min_mtu
) {
7834 NL_SET_ERR_MSG(extack
, "mtu less than device minimum");
7838 if (dev
->max_mtu
> 0 && new_mtu
> dev
->max_mtu
) {
7839 NL_SET_ERR_MSG(extack
, "mtu greater than device maximum");
7843 if (!netif_device_present(dev
))
7846 err
= call_netdevice_notifiers(NETDEV_PRECHANGEMTU
, dev
);
7847 err
= notifier_to_errno(err
);
7851 orig_mtu
= dev
->mtu
;
7852 err
= __dev_set_mtu(dev
, new_mtu
);
7855 err
= call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
7857 err
= notifier_to_errno(err
);
7859 /* setting mtu back and notifying everyone again,
7860 * so that they have a chance to revert changes.
7862 __dev_set_mtu(dev
, orig_mtu
);
7863 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
7870 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
7872 struct netlink_ext_ack extack
;
7875 memset(&extack
, 0, sizeof(extack
));
7876 err
= dev_set_mtu_ext(dev
, new_mtu
, &extack
);
7877 if (err
&& extack
._msg
)
7878 net_err_ratelimited("%s: %s\n", dev
->name
, extack
._msg
);
7881 EXPORT_SYMBOL(dev_set_mtu
);
7884 * dev_change_tx_queue_len - Change TX queue length of a netdevice
7886 * @new_len: new tx queue length
7888 int dev_change_tx_queue_len(struct net_device
*dev
, unsigned long new_len
)
7890 unsigned int orig_len
= dev
->tx_queue_len
;
7893 if (new_len
!= (unsigned int)new_len
)
7896 if (new_len
!= orig_len
) {
7897 dev
->tx_queue_len
= new_len
;
7898 res
= call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN
, dev
);
7899 res
= notifier_to_errno(res
);
7902 res
= dev_qdisc_change_tx_queue_len(dev
);
7910 netdev_err(dev
, "refused to change device tx_queue_len\n");
7911 dev
->tx_queue_len
= orig_len
;
7916 * dev_set_group - Change group this device belongs to
7918 * @new_group: group this device should belong to
7920 void dev_set_group(struct net_device
*dev
, int new_group
)
7922 dev
->group
= new_group
;
7924 EXPORT_SYMBOL(dev_set_group
);
7927 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
7929 * @addr: new address
7930 * @extack: netlink extended ack
7932 int dev_pre_changeaddr_notify(struct net_device
*dev
, const char *addr
,
7933 struct netlink_ext_ack
*extack
)
7935 struct netdev_notifier_pre_changeaddr_info info
= {
7937 .info
.extack
= extack
,
7942 rc
= call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR
, &info
.info
);
7943 return notifier_to_errno(rc
);
7945 EXPORT_SYMBOL(dev_pre_changeaddr_notify
);
7948 * dev_set_mac_address - Change Media Access Control Address
7951 * @extack: netlink extended ack
7953 * Change the hardware (MAC) address of the device
7955 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
,
7956 struct netlink_ext_ack
*extack
)
7958 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7961 if (!ops
->ndo_set_mac_address
)
7963 if (sa
->sa_family
!= dev
->type
)
7965 if (!netif_device_present(dev
))
7967 err
= dev_pre_changeaddr_notify(dev
, sa
->sa_data
, extack
);
7970 err
= ops
->ndo_set_mac_address(dev
, sa
);
7973 dev
->addr_assign_type
= NET_ADDR_SET
;
7974 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
7975 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
7978 EXPORT_SYMBOL(dev_set_mac_address
);
7981 * dev_change_carrier - Change device carrier
7983 * @new_carrier: new value
7985 * Change device carrier
7987 int dev_change_carrier(struct net_device
*dev
, bool new_carrier
)
7989 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7991 if (!ops
->ndo_change_carrier
)
7993 if (!netif_device_present(dev
))
7995 return ops
->ndo_change_carrier(dev
, new_carrier
);
7997 EXPORT_SYMBOL(dev_change_carrier
);
8000 * dev_get_phys_port_id - Get device physical port ID
8004 * Get device physical port ID
8006 int dev_get_phys_port_id(struct net_device
*dev
,
8007 struct netdev_phys_item_id
*ppid
)
8009 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8011 if (!ops
->ndo_get_phys_port_id
)
8013 return ops
->ndo_get_phys_port_id(dev
, ppid
);
8015 EXPORT_SYMBOL(dev_get_phys_port_id
);
8018 * dev_get_phys_port_name - Get device physical port name
8021 * @len: limit of bytes to copy to name
8023 * Get device physical port name
8025 int dev_get_phys_port_name(struct net_device
*dev
,
8026 char *name
, size_t len
)
8028 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8031 if (ops
->ndo_get_phys_port_name
) {
8032 err
= ops
->ndo_get_phys_port_name(dev
, name
, len
);
8033 if (err
!= -EOPNOTSUPP
)
8036 return devlink_compat_phys_port_name_get(dev
, name
, len
);
8038 EXPORT_SYMBOL(dev_get_phys_port_name
);
8041 * dev_get_port_parent_id - Get the device's port parent identifier
8042 * @dev: network device
8043 * @ppid: pointer to a storage for the port's parent identifier
8044 * @recurse: allow/disallow recursion to lower devices
8046 * Get the devices's port parent identifier
8048 int dev_get_port_parent_id(struct net_device
*dev
,
8049 struct netdev_phys_item_id
*ppid
,
8052 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8053 struct netdev_phys_item_id first
= { };
8054 struct net_device
*lower_dev
;
8055 struct list_head
*iter
;
8058 if (ops
->ndo_get_port_parent_id
) {
8059 err
= ops
->ndo_get_port_parent_id(dev
, ppid
);
8060 if (err
!= -EOPNOTSUPP
)
8064 err
= devlink_compat_switch_id_get(dev
, ppid
);
8065 if (!err
|| err
!= -EOPNOTSUPP
)
8071 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
8072 err
= dev_get_port_parent_id(lower_dev
, ppid
, recurse
);
8077 else if (memcmp(&first
, ppid
, sizeof(*ppid
)))
8083 EXPORT_SYMBOL(dev_get_port_parent_id
);
8086 * netdev_port_same_parent_id - Indicate if two network devices have
8087 * the same port parent identifier
8088 * @a: first network device
8089 * @b: second network device
8091 bool netdev_port_same_parent_id(struct net_device
*a
, struct net_device
*b
)
8093 struct netdev_phys_item_id a_id
= { };
8094 struct netdev_phys_item_id b_id
= { };
8096 if (dev_get_port_parent_id(a
, &a_id
, true) ||
8097 dev_get_port_parent_id(b
, &b_id
, true))
8100 return netdev_phys_item_id_same(&a_id
, &b_id
);
8102 EXPORT_SYMBOL(netdev_port_same_parent_id
);
8105 * dev_change_proto_down - update protocol port state information
8107 * @proto_down: new value
8109 * This info can be used by switch drivers to set the phys state of the
8112 int dev_change_proto_down(struct net_device
*dev
, bool proto_down
)
8114 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8116 if (!ops
->ndo_change_proto_down
)
8118 if (!netif_device_present(dev
))
8120 return ops
->ndo_change_proto_down(dev
, proto_down
);
8122 EXPORT_SYMBOL(dev_change_proto_down
);
8125 * dev_change_proto_down_generic - generic implementation for
8126 * ndo_change_proto_down that sets carrier according to
8130 * @proto_down: new value
8132 int dev_change_proto_down_generic(struct net_device
*dev
, bool proto_down
)
8135 netif_carrier_off(dev
);
8137 netif_carrier_on(dev
);
8138 dev
->proto_down
= proto_down
;
8141 EXPORT_SYMBOL(dev_change_proto_down_generic
);
8143 u32
__dev_xdp_query(struct net_device
*dev
, bpf_op_t bpf_op
,
8144 enum bpf_netdev_command cmd
)
8146 struct netdev_bpf xdp
;
8151 memset(&xdp
, 0, sizeof(xdp
));
8154 /* Query must always succeed. */
8155 WARN_ON(bpf_op(dev
, &xdp
) < 0 && cmd
== XDP_QUERY_PROG
);
8160 static int dev_xdp_install(struct net_device
*dev
, bpf_op_t bpf_op
,
8161 struct netlink_ext_ack
*extack
, u32 flags
,
8162 struct bpf_prog
*prog
)
8164 struct netdev_bpf xdp
;
8166 memset(&xdp
, 0, sizeof(xdp
));
8167 if (flags
& XDP_FLAGS_HW_MODE
)
8168 xdp
.command
= XDP_SETUP_PROG_HW
;
8170 xdp
.command
= XDP_SETUP_PROG
;
8171 xdp
.extack
= extack
;
8175 return bpf_op(dev
, &xdp
);
8178 static void dev_xdp_uninstall(struct net_device
*dev
)
8180 struct netdev_bpf xdp
;
8183 /* Remove generic XDP */
8184 WARN_ON(dev_xdp_install(dev
, generic_xdp_install
, NULL
, 0, NULL
));
8186 /* Remove from the driver */
8187 ndo_bpf
= dev
->netdev_ops
->ndo_bpf
;
8191 memset(&xdp
, 0, sizeof(xdp
));
8192 xdp
.command
= XDP_QUERY_PROG
;
8193 WARN_ON(ndo_bpf(dev
, &xdp
));
8195 WARN_ON(dev_xdp_install(dev
, ndo_bpf
, NULL
, xdp
.prog_flags
,
8198 /* Remove HW offload */
8199 memset(&xdp
, 0, sizeof(xdp
));
8200 xdp
.command
= XDP_QUERY_PROG_HW
;
8201 if (!ndo_bpf(dev
, &xdp
) && xdp
.prog_id
)
8202 WARN_ON(dev_xdp_install(dev
, ndo_bpf
, NULL
, xdp
.prog_flags
,
8207 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
8209 * @extack: netlink extended ack
8210 * @fd: new program fd or negative value to clear
8211 * @flags: xdp-related flags
8213 * Set or clear a bpf program for a device
8215 int dev_change_xdp_fd(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
8218 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8219 enum bpf_netdev_command query
;
8220 struct bpf_prog
*prog
= NULL
;
8221 bpf_op_t bpf_op
, bpf_chk
;
8227 offload
= flags
& XDP_FLAGS_HW_MODE
;
8228 query
= offload
? XDP_QUERY_PROG_HW
: XDP_QUERY_PROG
;
8230 bpf_op
= bpf_chk
= ops
->ndo_bpf
;
8231 if (!bpf_op
&& (flags
& (XDP_FLAGS_DRV_MODE
| XDP_FLAGS_HW_MODE
))) {
8232 NL_SET_ERR_MSG(extack
, "underlying driver does not support XDP in native mode");
8235 if (!bpf_op
|| (flags
& XDP_FLAGS_SKB_MODE
))
8236 bpf_op
= generic_xdp_install
;
8237 if (bpf_op
== bpf_chk
)
8238 bpf_chk
= generic_xdp_install
;
8243 if (!offload
&& __dev_xdp_query(dev
, bpf_chk
, XDP_QUERY_PROG
)) {
8244 NL_SET_ERR_MSG(extack
, "native and generic XDP can't be active at the same time");
8248 prog_id
= __dev_xdp_query(dev
, bpf_op
, query
);
8249 if ((flags
& XDP_FLAGS_UPDATE_IF_NOEXIST
) && prog_id
) {
8250 NL_SET_ERR_MSG(extack
, "XDP program already attached");
8254 prog
= bpf_prog_get_type_dev(fd
, BPF_PROG_TYPE_XDP
,
8255 bpf_op
== ops
->ndo_bpf
);
8257 return PTR_ERR(prog
);
8259 if (!offload
&& bpf_prog_is_dev_bound(prog
->aux
)) {
8260 NL_SET_ERR_MSG(extack
, "using device-bound program without HW_MODE flag is not supported");
8265 if (prog
->aux
->id
== prog_id
) {
8270 if (!__dev_xdp_query(dev
, bpf_op
, query
))
8274 err
= dev_xdp_install(dev
, bpf_op
, extack
, flags
, prog
);
8275 if (err
< 0 && prog
)
8282 * dev_new_index - allocate an ifindex
8283 * @net: the applicable net namespace
8285 * Returns a suitable unique value for a new device interface
8286 * number. The caller must hold the rtnl semaphore or the
8287 * dev_base_lock to be sure it remains unique.
8289 static int dev_new_index(struct net
*net
)
8291 int ifindex
= net
->ifindex
;
8296 if (!__dev_get_by_index(net
, ifindex
))
8297 return net
->ifindex
= ifindex
;
8301 /* Delayed registration/unregisteration */
8302 static LIST_HEAD(net_todo_list
);
8303 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq
);
8305 static void net_set_todo(struct net_device
*dev
)
8307 list_add_tail(&dev
->todo_list
, &net_todo_list
);
8308 dev_net(dev
)->dev_unreg_count
++;
8311 static void rollback_registered_many(struct list_head
*head
)
8313 struct net_device
*dev
, *tmp
;
8314 LIST_HEAD(close_head
);
8316 BUG_ON(dev_boot_phase
);
8319 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
8320 /* Some devices call without registering
8321 * for initialization unwind. Remove those
8322 * devices and proceed with the remaining.
8324 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
8325 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
8329 list_del(&dev
->unreg_list
);
8332 dev
->dismantle
= true;
8333 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
8336 /* If device is running, close it first. */
8337 list_for_each_entry(dev
, head
, unreg_list
)
8338 list_add_tail(&dev
->close_list
, &close_head
);
8339 dev_close_many(&close_head
, true);
8341 list_for_each_entry(dev
, head
, unreg_list
) {
8342 /* And unlink it from device chain. */
8343 unlist_netdevice(dev
);
8345 dev
->reg_state
= NETREG_UNREGISTERING
;
8347 flush_all_backlogs();
8351 list_for_each_entry(dev
, head
, unreg_list
) {
8352 struct sk_buff
*skb
= NULL
;
8354 /* Shutdown queueing discipline. */
8357 dev_xdp_uninstall(dev
);
8359 /* Notify protocols, that we are about to destroy
8360 * this device. They should clean all the things.
8362 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
8364 if (!dev
->rtnl_link_ops
||
8365 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
8366 skb
= rtmsg_ifinfo_build_skb(RTM_DELLINK
, dev
, ~0U, 0,
8367 GFP_KERNEL
, NULL
, 0);
8370 * Flush the unicast and multicast chains
8375 netdev_name_node_alt_flush(dev
);
8376 netdev_name_node_free(dev
->name_node
);
8378 if (dev
->netdev_ops
->ndo_uninit
)
8379 dev
->netdev_ops
->ndo_uninit(dev
);
8382 rtmsg_ifinfo_send(skb
, dev
, GFP_KERNEL
);
8384 /* Notifier chain MUST detach us all upper devices. */
8385 WARN_ON(netdev_has_any_upper_dev(dev
));
8386 WARN_ON(netdev_has_any_lower_dev(dev
));
8388 /* Remove entries from kobject tree */
8389 netdev_unregister_kobject(dev
);
8391 /* Remove XPS queueing entries */
8392 netif_reset_xps_queues_gt(dev
, 0);
8398 list_for_each_entry(dev
, head
, unreg_list
)
8402 static void rollback_registered(struct net_device
*dev
)
8406 list_add(&dev
->unreg_list
, &single
);
8407 rollback_registered_many(&single
);
8411 static netdev_features_t
netdev_sync_upper_features(struct net_device
*lower
,
8412 struct net_device
*upper
, netdev_features_t features
)
8414 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
8415 netdev_features_t feature
;
8418 for_each_netdev_feature(upper_disables
, feature_bit
) {
8419 feature
= __NETIF_F_BIT(feature_bit
);
8420 if (!(upper
->wanted_features
& feature
)
8421 && (features
& feature
)) {
8422 netdev_dbg(lower
, "Dropping feature %pNF, upper dev %s has it off.\n",
8423 &feature
, upper
->name
);
8424 features
&= ~feature
;
8431 static void netdev_sync_lower_features(struct net_device
*upper
,
8432 struct net_device
*lower
, netdev_features_t features
)
8434 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
8435 netdev_features_t feature
;
8438 for_each_netdev_feature(upper_disables
, feature_bit
) {
8439 feature
= __NETIF_F_BIT(feature_bit
);
8440 if (!(features
& feature
) && (lower
->features
& feature
)) {
8441 netdev_dbg(upper
, "Disabling feature %pNF on lower dev %s.\n",
8442 &feature
, lower
->name
);
8443 lower
->wanted_features
&= ~feature
;
8444 netdev_update_features(lower
);
8446 if (unlikely(lower
->features
& feature
))
8447 netdev_WARN(upper
, "failed to disable %pNF on %s!\n",
8448 &feature
, lower
->name
);
8453 static netdev_features_t
netdev_fix_features(struct net_device
*dev
,
8454 netdev_features_t features
)
8456 /* Fix illegal checksum combinations */
8457 if ((features
& NETIF_F_HW_CSUM
) &&
8458 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
8459 netdev_warn(dev
, "mixed HW and IP checksum settings.\n");
8460 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
8463 /* TSO requires that SG is present as well. */
8464 if ((features
& NETIF_F_ALL_TSO
) && !(features
& NETIF_F_SG
)) {
8465 netdev_dbg(dev
, "Dropping TSO features since no SG feature.\n");
8466 features
&= ~NETIF_F_ALL_TSO
;
8469 if ((features
& NETIF_F_TSO
) && !(features
& NETIF_F_HW_CSUM
) &&
8470 !(features
& NETIF_F_IP_CSUM
)) {
8471 netdev_dbg(dev
, "Dropping TSO features since no CSUM feature.\n");
8472 features
&= ~NETIF_F_TSO
;
8473 features
&= ~NETIF_F_TSO_ECN
;
8476 if ((features
& NETIF_F_TSO6
) && !(features
& NETIF_F_HW_CSUM
) &&
8477 !(features
& NETIF_F_IPV6_CSUM
)) {
8478 netdev_dbg(dev
, "Dropping TSO6 features since no CSUM feature.\n");
8479 features
&= ~NETIF_F_TSO6
;
8482 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
8483 if ((features
& NETIF_F_TSO_MANGLEID
) && !(features
& NETIF_F_TSO
))
8484 features
&= ~NETIF_F_TSO_MANGLEID
;
8486 /* TSO ECN requires that TSO is present as well. */
8487 if ((features
& NETIF_F_ALL_TSO
) == NETIF_F_TSO_ECN
)
8488 features
&= ~NETIF_F_TSO_ECN
;
8490 /* Software GSO depends on SG. */
8491 if ((features
& NETIF_F_GSO
) && !(features
& NETIF_F_SG
)) {
8492 netdev_dbg(dev
, "Dropping NETIF_F_GSO since no SG feature.\n");
8493 features
&= ~NETIF_F_GSO
;
8496 /* GSO partial features require GSO partial be set */
8497 if ((features
& dev
->gso_partial_features
) &&
8498 !(features
& NETIF_F_GSO_PARTIAL
)) {
8500 "Dropping partially supported GSO features since no GSO partial.\n");
8501 features
&= ~dev
->gso_partial_features
;
8504 if (!(features
& NETIF_F_RXCSUM
)) {
8505 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
8506 * successfully merged by hardware must also have the
8507 * checksum verified by hardware. If the user does not
8508 * want to enable RXCSUM, logically, we should disable GRO_HW.
8510 if (features
& NETIF_F_GRO_HW
) {
8511 netdev_dbg(dev
, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
8512 features
&= ~NETIF_F_GRO_HW
;
8516 /* LRO/HW-GRO features cannot be combined with RX-FCS */
8517 if (features
& NETIF_F_RXFCS
) {
8518 if (features
& NETIF_F_LRO
) {
8519 netdev_dbg(dev
, "Dropping LRO feature since RX-FCS is requested.\n");
8520 features
&= ~NETIF_F_LRO
;
8523 if (features
& NETIF_F_GRO_HW
) {
8524 netdev_dbg(dev
, "Dropping HW-GRO feature since RX-FCS is requested.\n");
8525 features
&= ~NETIF_F_GRO_HW
;
8532 int __netdev_update_features(struct net_device
*dev
)
8534 struct net_device
*upper
, *lower
;
8535 netdev_features_t features
;
8536 struct list_head
*iter
;
8541 features
= netdev_get_wanted_features(dev
);
8543 if (dev
->netdev_ops
->ndo_fix_features
)
8544 features
= dev
->netdev_ops
->ndo_fix_features(dev
, features
);
8546 /* driver might be less strict about feature dependencies */
8547 features
= netdev_fix_features(dev
, features
);
8549 /* some features can't be enabled if they're off an an upper device */
8550 netdev_for_each_upper_dev_rcu(dev
, upper
, iter
)
8551 features
= netdev_sync_upper_features(dev
, upper
, features
);
8553 if (dev
->features
== features
)
8556 netdev_dbg(dev
, "Features changed: %pNF -> %pNF\n",
8557 &dev
->features
, &features
);
8559 if (dev
->netdev_ops
->ndo_set_features
)
8560 err
= dev
->netdev_ops
->ndo_set_features(dev
, features
);
8564 if (unlikely(err
< 0)) {
8566 "set_features() failed (%d); wanted %pNF, left %pNF\n",
8567 err
, &features
, &dev
->features
);
8568 /* return non-0 since some features might have changed and
8569 * it's better to fire a spurious notification than miss it
8575 /* some features must be disabled on lower devices when disabled
8576 * on an upper device (think: bonding master or bridge)
8578 netdev_for_each_lower_dev(dev
, lower
, iter
)
8579 netdev_sync_lower_features(dev
, lower
, features
);
8582 netdev_features_t diff
= features
^ dev
->features
;
8584 if (diff
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
8585 /* udp_tunnel_{get,drop}_rx_info both need
8586 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
8587 * device, or they won't do anything.
8588 * Thus we need to update dev->features
8589 * *before* calling udp_tunnel_get_rx_info,
8590 * but *after* calling udp_tunnel_drop_rx_info.
8592 if (features
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
8593 dev
->features
= features
;
8594 udp_tunnel_get_rx_info(dev
);
8596 udp_tunnel_drop_rx_info(dev
);
8600 if (diff
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
8601 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
8602 dev
->features
= features
;
8603 err
|= vlan_get_rx_ctag_filter_info(dev
);
8605 vlan_drop_rx_ctag_filter_info(dev
);
8609 if (diff
& NETIF_F_HW_VLAN_STAG_FILTER
) {
8610 if (features
& NETIF_F_HW_VLAN_STAG_FILTER
) {
8611 dev
->features
= features
;
8612 err
|= vlan_get_rx_stag_filter_info(dev
);
8614 vlan_drop_rx_stag_filter_info(dev
);
8618 dev
->features
= features
;
8621 return err
< 0 ? 0 : 1;
8625 * netdev_update_features - recalculate device features
8626 * @dev: the device to check
8628 * Recalculate dev->features set and send notifications if it
8629 * has changed. Should be called after driver or hardware dependent
8630 * conditions might have changed that influence the features.
8632 void netdev_update_features(struct net_device
*dev
)
8634 if (__netdev_update_features(dev
))
8635 netdev_features_change(dev
);
8637 EXPORT_SYMBOL(netdev_update_features
);
8640 * netdev_change_features - recalculate device features
8641 * @dev: the device to check
8643 * Recalculate dev->features set and send notifications even
8644 * if they have not changed. Should be called instead of
8645 * netdev_update_features() if also dev->vlan_features might
8646 * have changed to allow the changes to be propagated to stacked
8649 void netdev_change_features(struct net_device
*dev
)
8651 __netdev_update_features(dev
);
8652 netdev_features_change(dev
);
8654 EXPORT_SYMBOL(netdev_change_features
);
8657 * netif_stacked_transfer_operstate - transfer operstate
8658 * @rootdev: the root or lower level device to transfer state from
8659 * @dev: the device to transfer operstate to
8661 * Transfer operational state from root to device. This is normally
8662 * called when a stacking relationship exists between the root
8663 * device and the device(a leaf device).
8665 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
8666 struct net_device
*dev
)
8668 if (rootdev
->operstate
== IF_OPER_DORMANT
)
8669 netif_dormant_on(dev
);
8671 netif_dormant_off(dev
);
8673 if (netif_carrier_ok(rootdev
))
8674 netif_carrier_on(dev
);
8676 netif_carrier_off(dev
);
8678 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
8680 static int netif_alloc_rx_queues(struct net_device
*dev
)
8682 unsigned int i
, count
= dev
->num_rx_queues
;
8683 struct netdev_rx_queue
*rx
;
8684 size_t sz
= count
* sizeof(*rx
);
8689 rx
= kvzalloc(sz
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
8695 for (i
= 0; i
< count
; i
++) {
8698 /* XDP RX-queue setup */
8699 err
= xdp_rxq_info_reg(&rx
[i
].xdp_rxq
, dev
, i
);
8706 /* Rollback successful reg's and free other resources */
8708 xdp_rxq_info_unreg(&rx
[i
].xdp_rxq
);
8714 static void netif_free_rx_queues(struct net_device
*dev
)
8716 unsigned int i
, count
= dev
->num_rx_queues
;
8718 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
8722 for (i
= 0; i
< count
; i
++)
8723 xdp_rxq_info_unreg(&dev
->_rx
[i
].xdp_rxq
);
8728 static void netdev_init_one_queue(struct net_device
*dev
,
8729 struct netdev_queue
*queue
, void *_unused
)
8731 /* Initialize queue lock */
8732 spin_lock_init(&queue
->_xmit_lock
);
8733 netdev_set_xmit_lockdep_class(&queue
->_xmit_lock
, dev
->type
);
8734 queue
->xmit_lock_owner
= -1;
8735 netdev_queue_numa_node_write(queue
, NUMA_NO_NODE
);
8738 dql_init(&queue
->dql
, HZ
);
8742 static void netif_free_tx_queues(struct net_device
*dev
)
8747 static int netif_alloc_netdev_queues(struct net_device
*dev
)
8749 unsigned int count
= dev
->num_tx_queues
;
8750 struct netdev_queue
*tx
;
8751 size_t sz
= count
* sizeof(*tx
);
8753 if (count
< 1 || count
> 0xffff)
8756 tx
= kvzalloc(sz
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
8762 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
8763 spin_lock_init(&dev
->tx_global_lock
);
8768 void netif_tx_stop_all_queues(struct net_device
*dev
)
8772 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
8773 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
8775 netif_tx_stop_queue(txq
);
8778 EXPORT_SYMBOL(netif_tx_stop_all_queues
);
8781 * register_netdevice - register a network device
8782 * @dev: device to register
8784 * Take a completed network device structure and add it to the kernel
8785 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
8786 * chain. 0 is returned on success. A negative errno code is returned
8787 * on a failure to set up the device, or if the name is a duplicate.
8789 * Callers must hold the rtnl semaphore. You may want
8790 * register_netdev() instead of this.
8793 * The locking appears insufficient to guarantee two parallel registers
8794 * will not get the same name.
8797 int register_netdevice(struct net_device
*dev
)
8800 struct net
*net
= dev_net(dev
);
8802 BUILD_BUG_ON(sizeof(netdev_features_t
) * BITS_PER_BYTE
<
8803 NETDEV_FEATURE_COUNT
);
8804 BUG_ON(dev_boot_phase
);
8809 /* When net_device's are persistent, this will be fatal. */
8810 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
8813 spin_lock_init(&dev
->addr_list_lock
);
8814 netdev_set_addr_lockdep_class(dev
);
8816 ret
= dev_get_valid_name(net
, dev
, dev
->name
);
8820 dev
->name_node
= netdev_name_node_head_alloc(dev
);
8821 if (!dev
->name_node
)
8824 /* Init, if this function is available */
8825 if (dev
->netdev_ops
->ndo_init
) {
8826 ret
= dev
->netdev_ops
->ndo_init(dev
);
8834 if (((dev
->hw_features
| dev
->features
) &
8835 NETIF_F_HW_VLAN_CTAG_FILTER
) &&
8836 (!dev
->netdev_ops
->ndo_vlan_rx_add_vid
||
8837 !dev
->netdev_ops
->ndo_vlan_rx_kill_vid
)) {
8838 netdev_WARN(dev
, "Buggy VLAN acceleration in driver!\n");
8845 dev
->ifindex
= dev_new_index(net
);
8846 else if (__dev_get_by_index(net
, dev
->ifindex
))
8849 /* Transfer changeable features to wanted_features and enable
8850 * software offloads (GSO and GRO).
8852 dev
->hw_features
|= NETIF_F_SOFT_FEATURES
;
8853 dev
->features
|= NETIF_F_SOFT_FEATURES
;
8855 if (dev
->netdev_ops
->ndo_udp_tunnel_add
) {
8856 dev
->features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
8857 dev
->hw_features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
8860 dev
->wanted_features
= dev
->features
& dev
->hw_features
;
8862 if (!(dev
->flags
& IFF_LOOPBACK
))
8863 dev
->hw_features
|= NETIF_F_NOCACHE_COPY
;
8865 /* If IPv4 TCP segmentation offload is supported we should also
8866 * allow the device to enable segmenting the frame with the option
8867 * of ignoring a static IP ID value. This doesn't enable the
8868 * feature itself but allows the user to enable it later.
8870 if (dev
->hw_features
& NETIF_F_TSO
)
8871 dev
->hw_features
|= NETIF_F_TSO_MANGLEID
;
8872 if (dev
->vlan_features
& NETIF_F_TSO
)
8873 dev
->vlan_features
|= NETIF_F_TSO_MANGLEID
;
8874 if (dev
->mpls_features
& NETIF_F_TSO
)
8875 dev
->mpls_features
|= NETIF_F_TSO_MANGLEID
;
8876 if (dev
->hw_enc_features
& NETIF_F_TSO
)
8877 dev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
8879 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
8881 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
8883 /* Make NETIF_F_SG inheritable to tunnel devices.
8885 dev
->hw_enc_features
|= NETIF_F_SG
| NETIF_F_GSO_PARTIAL
;
8887 /* Make NETIF_F_SG inheritable to MPLS.
8889 dev
->mpls_features
|= NETIF_F_SG
;
8891 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
8892 ret
= notifier_to_errno(ret
);
8896 ret
= netdev_register_kobject(dev
);
8899 dev
->reg_state
= NETREG_REGISTERED
;
8901 __netdev_update_features(dev
);
8904 * Default initial state at registry is that the
8905 * device is present.
8908 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
8910 linkwatch_init_dev(dev
);
8912 dev_init_scheduler(dev
);
8914 list_netdevice(dev
);
8915 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
8917 /* If the device has permanent device address, driver should
8918 * set dev_addr and also addr_assign_type should be set to
8919 * NET_ADDR_PERM (default value).
8921 if (dev
->addr_assign_type
== NET_ADDR_PERM
)
8922 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
8924 /* Notify protocols, that a new device appeared. */
8925 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
8926 ret
= notifier_to_errno(ret
);
8928 rollback_registered(dev
);
8931 dev
->reg_state
= NETREG_UNREGISTERED
;
8934 * Prevent userspace races by waiting until the network
8935 * device is fully setup before sending notifications.
8937 if (!dev
->rtnl_link_ops
||
8938 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
8939 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
8946 netdev_name_node_free(dev
->name_node
);
8947 if (dev
->netdev_ops
->ndo_uninit
)
8948 dev
->netdev_ops
->ndo_uninit(dev
);
8949 if (dev
->priv_destructor
)
8950 dev
->priv_destructor(dev
);
8953 EXPORT_SYMBOL(register_netdevice
);
8956 * init_dummy_netdev - init a dummy network device for NAPI
8957 * @dev: device to init
8959 * This takes a network device structure and initialize the minimum
8960 * amount of fields so it can be used to schedule NAPI polls without
8961 * registering a full blown interface. This is to be used by drivers
8962 * that need to tie several hardware interfaces to a single NAPI
8963 * poll scheduler due to HW limitations.
8965 int init_dummy_netdev(struct net_device
*dev
)
8967 /* Clear everything. Note we don't initialize spinlocks
8968 * are they aren't supposed to be taken by any of the
8969 * NAPI code and this dummy netdev is supposed to be
8970 * only ever used for NAPI polls
8972 memset(dev
, 0, sizeof(struct net_device
));
8974 /* make sure we BUG if trying to hit standard
8975 * register/unregister code path
8977 dev
->reg_state
= NETREG_DUMMY
;
8979 /* NAPI wants this */
8980 INIT_LIST_HEAD(&dev
->napi_list
);
8982 /* a dummy interface is started by default */
8983 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
8984 set_bit(__LINK_STATE_START
, &dev
->state
);
8986 /* napi_busy_loop stats accounting wants this */
8987 dev_net_set(dev
, &init_net
);
8989 /* Note : We dont allocate pcpu_refcnt for dummy devices,
8990 * because users of this 'device' dont need to change
8996 EXPORT_SYMBOL_GPL(init_dummy_netdev
);
9000 * register_netdev - register a network device
9001 * @dev: device to register
9003 * Take a completed network device structure and add it to the kernel
9004 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
9005 * chain. 0 is returned on success. A negative errno code is returned
9006 * on a failure to set up the device, or if the name is a duplicate.
9008 * This is a wrapper around register_netdevice that takes the rtnl semaphore
9009 * and expands the device name if you passed a format string to
9012 int register_netdev(struct net_device
*dev
)
9016 if (rtnl_lock_killable())
9018 err
= register_netdevice(dev
);
9022 EXPORT_SYMBOL(register_netdev
);
9024 int netdev_refcnt_read(const struct net_device
*dev
)
9028 for_each_possible_cpu(i
)
9029 refcnt
+= *per_cpu_ptr(dev
->pcpu_refcnt
, i
);
9032 EXPORT_SYMBOL(netdev_refcnt_read
);
9035 * netdev_wait_allrefs - wait until all references are gone.
9036 * @dev: target net_device
9038 * This is called when unregistering network devices.
9040 * Any protocol or device that holds a reference should register
9041 * for netdevice notification, and cleanup and put back the
9042 * reference if they receive an UNREGISTER event.
9043 * We can get stuck here if buggy protocols don't correctly
9046 static void netdev_wait_allrefs(struct net_device
*dev
)
9048 unsigned long rebroadcast_time
, warning_time
;
9051 linkwatch_forget_dev(dev
);
9053 rebroadcast_time
= warning_time
= jiffies
;
9054 refcnt
= netdev_refcnt_read(dev
);
9056 while (refcnt
!= 0) {
9057 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
9060 /* Rebroadcast unregister notification */
9061 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
9067 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
9069 /* We must not have linkwatch events
9070 * pending on unregister. If this
9071 * happens, we simply run the queue
9072 * unscheduled, resulting in a noop
9075 linkwatch_run_queue();
9080 rebroadcast_time
= jiffies
;
9085 refcnt
= netdev_refcnt_read(dev
);
9087 if (refcnt
&& time_after(jiffies
, warning_time
+ 10 * HZ
)) {
9088 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
9090 warning_time
= jiffies
;
9099 * register_netdevice(x1);
9100 * register_netdevice(x2);
9102 * unregister_netdevice(y1);
9103 * unregister_netdevice(y2);
9109 * We are invoked by rtnl_unlock().
9110 * This allows us to deal with problems:
9111 * 1) We can delete sysfs objects which invoke hotplug
9112 * without deadlocking with linkwatch via keventd.
9113 * 2) Since we run with the RTNL semaphore not held, we can sleep
9114 * safely in order to wait for the netdev refcnt to drop to zero.
9116 * We must not return until all unregister events added during
9117 * the interval the lock was held have been completed.
9119 void netdev_run_todo(void)
9121 struct list_head list
;
9123 /* Snapshot list, allow later requests */
9124 list_replace_init(&net_todo_list
, &list
);
9129 /* Wait for rcu callbacks to finish before next phase */
9130 if (!list_empty(&list
))
9133 while (!list_empty(&list
)) {
9134 struct net_device
*dev
9135 = list_first_entry(&list
, struct net_device
, todo_list
);
9136 list_del(&dev
->todo_list
);
9138 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
9139 pr_err("network todo '%s' but state %d\n",
9140 dev
->name
, dev
->reg_state
);
9145 dev
->reg_state
= NETREG_UNREGISTERED
;
9147 netdev_wait_allrefs(dev
);
9150 BUG_ON(netdev_refcnt_read(dev
));
9151 BUG_ON(!list_empty(&dev
->ptype_all
));
9152 BUG_ON(!list_empty(&dev
->ptype_specific
));
9153 WARN_ON(rcu_access_pointer(dev
->ip_ptr
));
9154 WARN_ON(rcu_access_pointer(dev
->ip6_ptr
));
9155 #if IS_ENABLED(CONFIG_DECNET)
9156 WARN_ON(dev
->dn_ptr
);
9158 if (dev
->priv_destructor
)
9159 dev
->priv_destructor(dev
);
9160 if (dev
->needs_free_netdev
)
9163 /* Report a network device has been unregistered */
9165 dev_net(dev
)->dev_unreg_count
--;
9167 wake_up(&netdev_unregistering_wq
);
9169 /* Free network device */
9170 kobject_put(&dev
->dev
.kobj
);
9174 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
9175 * all the same fields in the same order as net_device_stats, with only
9176 * the type differing, but rtnl_link_stats64 may have additional fields
9177 * at the end for newer counters.
9179 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
9180 const struct net_device_stats
*netdev_stats
)
9182 #if BITS_PER_LONG == 64
9183 BUILD_BUG_ON(sizeof(*stats64
) < sizeof(*netdev_stats
));
9184 memcpy(stats64
, netdev_stats
, sizeof(*netdev_stats
));
9185 /* zero out counters that only exist in rtnl_link_stats64 */
9186 memset((char *)stats64
+ sizeof(*netdev_stats
), 0,
9187 sizeof(*stats64
) - sizeof(*netdev_stats
));
9189 size_t i
, n
= sizeof(*netdev_stats
) / sizeof(unsigned long);
9190 const unsigned long *src
= (const unsigned long *)netdev_stats
;
9191 u64
*dst
= (u64
*)stats64
;
9193 BUILD_BUG_ON(n
> sizeof(*stats64
) / sizeof(u64
));
9194 for (i
= 0; i
< n
; i
++)
9196 /* zero out counters that only exist in rtnl_link_stats64 */
9197 memset((char *)stats64
+ n
* sizeof(u64
), 0,
9198 sizeof(*stats64
) - n
* sizeof(u64
));
9201 EXPORT_SYMBOL(netdev_stats_to_stats64
);
9204 * dev_get_stats - get network device statistics
9205 * @dev: device to get statistics from
9206 * @storage: place to store stats
9208 * Get network statistics from device. Return @storage.
9209 * The device driver may provide its own method by setting
9210 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
9211 * otherwise the internal statistics structure is used.
9213 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
9214 struct rtnl_link_stats64
*storage
)
9216 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9218 if (ops
->ndo_get_stats64
) {
9219 memset(storage
, 0, sizeof(*storage
));
9220 ops
->ndo_get_stats64(dev
, storage
);
9221 } else if (ops
->ndo_get_stats
) {
9222 netdev_stats_to_stats64(storage
, ops
->ndo_get_stats(dev
));
9224 netdev_stats_to_stats64(storage
, &dev
->stats
);
9226 storage
->rx_dropped
+= (unsigned long)atomic_long_read(&dev
->rx_dropped
);
9227 storage
->tx_dropped
+= (unsigned long)atomic_long_read(&dev
->tx_dropped
);
9228 storage
->rx_nohandler
+= (unsigned long)atomic_long_read(&dev
->rx_nohandler
);
9231 EXPORT_SYMBOL(dev_get_stats
);
9233 struct netdev_queue
*dev_ingress_queue_create(struct net_device
*dev
)
9235 struct netdev_queue
*queue
= dev_ingress_queue(dev
);
9237 #ifdef CONFIG_NET_CLS_ACT
9240 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
9243 netdev_init_one_queue(dev
, queue
, NULL
);
9244 RCU_INIT_POINTER(queue
->qdisc
, &noop_qdisc
);
9245 queue
->qdisc_sleeping
= &noop_qdisc
;
9246 rcu_assign_pointer(dev
->ingress_queue
, queue
);
9251 static const struct ethtool_ops default_ethtool_ops
;
9253 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
9254 const struct ethtool_ops
*ops
)
9256 if (dev
->ethtool_ops
== &default_ethtool_ops
)
9257 dev
->ethtool_ops
= ops
;
9259 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops
);
9261 void netdev_freemem(struct net_device
*dev
)
9263 char *addr
= (char *)dev
- dev
->padded
;
9269 * alloc_netdev_mqs - allocate network device
9270 * @sizeof_priv: size of private data to allocate space for
9271 * @name: device name format string
9272 * @name_assign_type: origin of device name
9273 * @setup: callback to initialize device
9274 * @txqs: the number of TX subqueues to allocate
9275 * @rxqs: the number of RX subqueues to allocate
9277 * Allocates a struct net_device with private data area for driver use
9278 * and performs basic initialization. Also allocates subqueue structs
9279 * for each queue on the device.
9281 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
9282 unsigned char name_assign_type
,
9283 void (*setup
)(struct net_device
*),
9284 unsigned int txqs
, unsigned int rxqs
)
9286 struct net_device
*dev
;
9287 unsigned int alloc_size
;
9288 struct net_device
*p
;
9290 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
9293 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
9298 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
9302 alloc_size
= sizeof(struct net_device
);
9304 /* ensure 32-byte alignment of private area */
9305 alloc_size
= ALIGN(alloc_size
, NETDEV_ALIGN
);
9306 alloc_size
+= sizeof_priv
;
9308 /* ensure 32-byte alignment of whole construct */
9309 alloc_size
+= NETDEV_ALIGN
- 1;
9311 p
= kvzalloc(alloc_size
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
9315 dev
= PTR_ALIGN(p
, NETDEV_ALIGN
);
9316 dev
->padded
= (char *)dev
- (char *)p
;
9318 dev
->pcpu_refcnt
= alloc_percpu(int);
9319 if (!dev
->pcpu_refcnt
)
9322 if (dev_addr_init(dev
))
9328 dev_net_set(dev
, &init_net
);
9330 dev
->gso_max_size
= GSO_MAX_SIZE
;
9331 dev
->gso_max_segs
= GSO_MAX_SEGS
;
9333 INIT_LIST_HEAD(&dev
->napi_list
);
9334 INIT_LIST_HEAD(&dev
->unreg_list
);
9335 INIT_LIST_HEAD(&dev
->close_list
);
9336 INIT_LIST_HEAD(&dev
->link_watch_list
);
9337 INIT_LIST_HEAD(&dev
->adj_list
.upper
);
9338 INIT_LIST_HEAD(&dev
->adj_list
.lower
);
9339 INIT_LIST_HEAD(&dev
->ptype_all
);
9340 INIT_LIST_HEAD(&dev
->ptype_specific
);
9341 #ifdef CONFIG_NET_SCHED
9342 hash_init(dev
->qdisc_hash
);
9344 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
;
9347 if (!dev
->tx_queue_len
) {
9348 dev
->priv_flags
|= IFF_NO_QUEUE
;
9349 dev
->tx_queue_len
= DEFAULT_TX_QUEUE_LEN
;
9352 dev
->num_tx_queues
= txqs
;
9353 dev
->real_num_tx_queues
= txqs
;
9354 if (netif_alloc_netdev_queues(dev
))
9357 dev
->num_rx_queues
= rxqs
;
9358 dev
->real_num_rx_queues
= rxqs
;
9359 if (netif_alloc_rx_queues(dev
))
9362 strcpy(dev
->name
, name
);
9363 dev
->name_assign_type
= name_assign_type
;
9364 dev
->group
= INIT_NETDEV_GROUP
;
9365 if (!dev
->ethtool_ops
)
9366 dev
->ethtool_ops
= &default_ethtool_ops
;
9368 nf_hook_ingress_init(dev
);
9377 free_percpu(dev
->pcpu_refcnt
);
9379 netdev_freemem(dev
);
9382 EXPORT_SYMBOL(alloc_netdev_mqs
);
9385 * free_netdev - free network device
9388 * This function does the last stage of destroying an allocated device
9389 * interface. The reference to the device object is released. If this
9390 * is the last reference then it will be freed.Must be called in process
9393 void free_netdev(struct net_device
*dev
)
9395 struct napi_struct
*p
, *n
;
9398 netif_free_tx_queues(dev
);
9399 netif_free_rx_queues(dev
);
9401 kfree(rcu_dereference_protected(dev
->ingress_queue
, 1));
9403 /* Flush device addresses */
9404 dev_addr_flush(dev
);
9406 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
9409 free_percpu(dev
->pcpu_refcnt
);
9410 dev
->pcpu_refcnt
= NULL
;
9412 /* Compatibility with error handling in drivers */
9413 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
9414 netdev_freemem(dev
);
9418 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
9419 dev
->reg_state
= NETREG_RELEASED
;
9421 /* will free via device release */
9422 put_device(&dev
->dev
);
9424 EXPORT_SYMBOL(free_netdev
);
9427 * synchronize_net - Synchronize with packet receive processing
9429 * Wait for packets currently being received to be done.
9430 * Does not block later packets from starting.
9432 void synchronize_net(void)
9435 if (rtnl_is_locked())
9436 synchronize_rcu_expedited();
9440 EXPORT_SYMBOL(synchronize_net
);
9443 * unregister_netdevice_queue - remove device from the kernel
9447 * This function shuts down a device interface and removes it
9448 * from the kernel tables.
9449 * If head not NULL, device is queued to be unregistered later.
9451 * Callers must hold the rtnl semaphore. You may want
9452 * unregister_netdev() instead of this.
9455 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
9460 list_move_tail(&dev
->unreg_list
, head
);
9462 rollback_registered(dev
);
9463 /* Finish processing unregister after unlock */
9467 EXPORT_SYMBOL(unregister_netdevice_queue
);
9470 * unregister_netdevice_many - unregister many devices
9471 * @head: list of devices
9473 * Note: As most callers use a stack allocated list_head,
9474 * we force a list_del() to make sure stack wont be corrupted later.
9476 void unregister_netdevice_many(struct list_head
*head
)
9478 struct net_device
*dev
;
9480 if (!list_empty(head
)) {
9481 rollback_registered_many(head
);
9482 list_for_each_entry(dev
, head
, unreg_list
)
9487 EXPORT_SYMBOL(unregister_netdevice_many
);
9490 * unregister_netdev - remove device from the kernel
9493 * This function shuts down a device interface and removes it
9494 * from the kernel tables.
9496 * This is just a wrapper for unregister_netdevice that takes
9497 * the rtnl semaphore. In general you want to use this and not
9498 * unregister_netdevice.
9500 void unregister_netdev(struct net_device
*dev
)
9503 unregister_netdevice(dev
);
9506 EXPORT_SYMBOL(unregister_netdev
);
9509 * dev_change_net_namespace - move device to different nethost namespace
9511 * @net: network namespace
9512 * @pat: If not NULL name pattern to try if the current device name
9513 * is already taken in the destination network namespace.
9515 * This function shuts down a device interface and moves it
9516 * to a new network namespace. On success 0 is returned, on
9517 * a failure a netagive errno code is returned.
9519 * Callers must hold the rtnl semaphore.
9522 int dev_change_net_namespace(struct net_device
*dev
, struct net
*net
, const char *pat
)
9524 int err
, new_nsid
, new_ifindex
;
9528 /* Don't allow namespace local devices to be moved. */
9530 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
9533 /* Ensure the device has been registrered */
9534 if (dev
->reg_state
!= NETREG_REGISTERED
)
9537 /* Get out if there is nothing todo */
9539 if (net_eq(dev_net(dev
), net
))
9542 /* Pick the destination device name, and ensure
9543 * we can use it in the destination network namespace.
9546 if (__dev_get_by_name(net
, dev
->name
)) {
9547 /* We get here if we can't use the current device name */
9550 err
= dev_get_valid_name(net
, dev
, pat
);
9556 * And now a mini version of register_netdevice unregister_netdevice.
9559 /* If device is running close it first. */
9562 /* And unlink it from device chain */
9563 unlist_netdevice(dev
);
9567 /* Shutdown queueing discipline. */
9570 /* Notify protocols, that we are about to destroy
9571 * this device. They should clean all the things.
9573 * Note that dev->reg_state stays at NETREG_REGISTERED.
9574 * This is wanted because this way 8021q and macvlan know
9575 * the device is just moving and can keep their slaves up.
9577 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
9580 new_nsid
= peernet2id_alloc(dev_net(dev
), net
);
9581 /* If there is an ifindex conflict assign a new one */
9582 if (__dev_get_by_index(net
, dev
->ifindex
))
9583 new_ifindex
= dev_new_index(net
);
9585 new_ifindex
= dev
->ifindex
;
9587 rtmsg_ifinfo_newnet(RTM_DELLINK
, dev
, ~0U, GFP_KERNEL
, &new_nsid
,
9591 * Flush the unicast and multicast chains
9596 /* Send a netdev-removed uevent to the old namespace */
9597 kobject_uevent(&dev
->dev
.kobj
, KOBJ_REMOVE
);
9598 netdev_adjacent_del_links(dev
);
9600 /* Actually switch the network namespace */
9601 dev_net_set(dev
, net
);
9602 dev
->ifindex
= new_ifindex
;
9604 /* Send a netdev-add uevent to the new namespace */
9605 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ADD
);
9606 netdev_adjacent_add_links(dev
);
9608 /* Fixup kobjects */
9609 err
= device_rename(&dev
->dev
, dev
->name
);
9612 /* Add the device back in the hashes */
9613 list_netdevice(dev
);
9615 /* Notify protocols, that a new device appeared. */
9616 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
9619 * Prevent userspace races by waiting until the network
9620 * device is fully setup before sending notifications.
9622 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
9629 EXPORT_SYMBOL_GPL(dev_change_net_namespace
);
9631 static int dev_cpu_dead(unsigned int oldcpu
)
9633 struct sk_buff
**list_skb
;
9634 struct sk_buff
*skb
;
9636 struct softnet_data
*sd
, *oldsd
, *remsd
= NULL
;
9638 local_irq_disable();
9639 cpu
= smp_processor_id();
9640 sd
= &per_cpu(softnet_data
, cpu
);
9641 oldsd
= &per_cpu(softnet_data
, oldcpu
);
9643 /* Find end of our completion_queue. */
9644 list_skb
= &sd
->completion_queue
;
9646 list_skb
= &(*list_skb
)->next
;
9647 /* Append completion queue from offline CPU. */
9648 *list_skb
= oldsd
->completion_queue
;
9649 oldsd
->completion_queue
= NULL
;
9651 /* Append output queue from offline CPU. */
9652 if (oldsd
->output_queue
) {
9653 *sd
->output_queue_tailp
= oldsd
->output_queue
;
9654 sd
->output_queue_tailp
= oldsd
->output_queue_tailp
;
9655 oldsd
->output_queue
= NULL
;
9656 oldsd
->output_queue_tailp
= &oldsd
->output_queue
;
9658 /* Append NAPI poll list from offline CPU, with one exception :
9659 * process_backlog() must be called by cpu owning percpu backlog.
9660 * We properly handle process_queue & input_pkt_queue later.
9662 while (!list_empty(&oldsd
->poll_list
)) {
9663 struct napi_struct
*napi
= list_first_entry(&oldsd
->poll_list
,
9667 list_del_init(&napi
->poll_list
);
9668 if (napi
->poll
== process_backlog
)
9671 ____napi_schedule(sd
, napi
);
9674 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
9678 remsd
= oldsd
->rps_ipi_list
;
9679 oldsd
->rps_ipi_list
= NULL
;
9681 /* send out pending IPI's on offline CPU */
9682 net_rps_send_ipi(remsd
);
9684 /* Process offline CPU's input_pkt_queue */
9685 while ((skb
= __skb_dequeue(&oldsd
->process_queue
))) {
9687 input_queue_head_incr(oldsd
);
9689 while ((skb
= skb_dequeue(&oldsd
->input_pkt_queue
))) {
9691 input_queue_head_incr(oldsd
);
9698 * netdev_increment_features - increment feature set by one
9699 * @all: current feature set
9700 * @one: new feature set
9701 * @mask: mask feature set
9703 * Computes a new feature set after adding a device with feature set
9704 * @one to the master device with current feature set @all. Will not
9705 * enable anything that is off in @mask. Returns the new feature set.
9707 netdev_features_t
netdev_increment_features(netdev_features_t all
,
9708 netdev_features_t one
, netdev_features_t mask
)
9710 if (mask
& NETIF_F_HW_CSUM
)
9711 mask
|= NETIF_F_CSUM_MASK
;
9712 mask
|= NETIF_F_VLAN_CHALLENGED
;
9714 all
|= one
& (NETIF_F_ONE_FOR_ALL
| NETIF_F_CSUM_MASK
) & mask
;
9715 all
&= one
| ~NETIF_F_ALL_FOR_ALL
;
9717 /* If one device supports hw checksumming, set for all. */
9718 if (all
& NETIF_F_HW_CSUM
)
9719 all
&= ~(NETIF_F_CSUM_MASK
& ~NETIF_F_HW_CSUM
);
9723 EXPORT_SYMBOL(netdev_increment_features
);
9725 static struct hlist_head
* __net_init
netdev_create_hash(void)
9728 struct hlist_head
*hash
;
9730 hash
= kmalloc_array(NETDEV_HASHENTRIES
, sizeof(*hash
), GFP_KERNEL
);
9732 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
9733 INIT_HLIST_HEAD(&hash
[i
]);
9738 /* Initialize per network namespace state */
9739 static int __net_init
netdev_init(struct net
*net
)
9741 BUILD_BUG_ON(GRO_HASH_BUCKETS
>
9742 8 * FIELD_SIZEOF(struct napi_struct
, gro_bitmask
));
9744 if (net
!= &init_net
)
9745 INIT_LIST_HEAD(&net
->dev_base_head
);
9747 net
->dev_name_head
= netdev_create_hash();
9748 if (net
->dev_name_head
== NULL
)
9751 net
->dev_index_head
= netdev_create_hash();
9752 if (net
->dev_index_head
== NULL
)
9758 kfree(net
->dev_name_head
);
9764 * netdev_drivername - network driver for the device
9765 * @dev: network device
9767 * Determine network driver for device.
9769 const char *netdev_drivername(const struct net_device
*dev
)
9771 const struct device_driver
*driver
;
9772 const struct device
*parent
;
9773 const char *empty
= "";
9775 parent
= dev
->dev
.parent
;
9779 driver
= parent
->driver
;
9780 if (driver
&& driver
->name
)
9781 return driver
->name
;
9785 static void __netdev_printk(const char *level
, const struct net_device
*dev
,
9786 struct va_format
*vaf
)
9788 if (dev
&& dev
->dev
.parent
) {
9789 dev_printk_emit(level
[1] - '0',
9792 dev_driver_string(dev
->dev
.parent
),
9793 dev_name(dev
->dev
.parent
),
9794 netdev_name(dev
), netdev_reg_state(dev
),
9797 printk("%s%s%s: %pV",
9798 level
, netdev_name(dev
), netdev_reg_state(dev
), vaf
);
9800 printk("%s(NULL net_device): %pV", level
, vaf
);
9804 void netdev_printk(const char *level
, const struct net_device
*dev
,
9805 const char *format
, ...)
9807 struct va_format vaf
;
9810 va_start(args
, format
);
9815 __netdev_printk(level
, dev
, &vaf
);
9819 EXPORT_SYMBOL(netdev_printk
);
9821 #define define_netdev_printk_level(func, level) \
9822 void func(const struct net_device *dev, const char *fmt, ...) \
9824 struct va_format vaf; \
9827 va_start(args, fmt); \
9832 __netdev_printk(level, dev, &vaf); \
9836 EXPORT_SYMBOL(func);
9838 define_netdev_printk_level(netdev_emerg
, KERN_EMERG
);
9839 define_netdev_printk_level(netdev_alert
, KERN_ALERT
);
9840 define_netdev_printk_level(netdev_crit
, KERN_CRIT
);
9841 define_netdev_printk_level(netdev_err
, KERN_ERR
);
9842 define_netdev_printk_level(netdev_warn
, KERN_WARNING
);
9843 define_netdev_printk_level(netdev_notice
, KERN_NOTICE
);
9844 define_netdev_printk_level(netdev_info
, KERN_INFO
);
9846 static void __net_exit
netdev_exit(struct net
*net
)
9848 kfree(net
->dev_name_head
);
9849 kfree(net
->dev_index_head
);
9850 if (net
!= &init_net
)
9851 WARN_ON_ONCE(!list_empty(&net
->dev_base_head
));
9854 static struct pernet_operations __net_initdata netdev_net_ops
= {
9855 .init
= netdev_init
,
9856 .exit
= netdev_exit
,
9859 static void __net_exit
default_device_exit(struct net
*net
)
9861 struct net_device
*dev
, *aux
;
9863 * Push all migratable network devices back to the
9864 * initial network namespace
9867 for_each_netdev_safe(net
, dev
, aux
) {
9869 char fb_name
[IFNAMSIZ
];
9871 /* Ignore unmoveable devices (i.e. loopback) */
9872 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
9875 /* Leave virtual devices for the generic cleanup */
9876 if (dev
->rtnl_link_ops
)
9879 /* Push remaining network devices to init_net */
9880 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
9881 if (__dev_get_by_name(&init_net
, fb_name
))
9882 snprintf(fb_name
, IFNAMSIZ
, "dev%%d");
9883 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
9885 pr_emerg("%s: failed to move %s to init_net: %d\n",
9886 __func__
, dev
->name
, err
);
9893 static void __net_exit
rtnl_lock_unregistering(struct list_head
*net_list
)
9895 /* Return with the rtnl_lock held when there are no network
9896 * devices unregistering in any network namespace in net_list.
9900 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
9902 add_wait_queue(&netdev_unregistering_wq
, &wait
);
9904 unregistering
= false;
9906 list_for_each_entry(net
, net_list
, exit_list
) {
9907 if (net
->dev_unreg_count
> 0) {
9908 unregistering
= true;
9916 wait_woken(&wait
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
9918 remove_wait_queue(&netdev_unregistering_wq
, &wait
);
9921 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
9923 /* At exit all network devices most be removed from a network
9924 * namespace. Do this in the reverse order of registration.
9925 * Do this across as many network namespaces as possible to
9926 * improve batching efficiency.
9928 struct net_device
*dev
;
9930 LIST_HEAD(dev_kill_list
);
9932 /* To prevent network device cleanup code from dereferencing
9933 * loopback devices or network devices that have been freed
9934 * wait here for all pending unregistrations to complete,
9935 * before unregistring the loopback device and allowing the
9936 * network namespace be freed.
9938 * The netdev todo list containing all network devices
9939 * unregistrations that happen in default_device_exit_batch
9940 * will run in the rtnl_unlock() at the end of
9941 * default_device_exit_batch.
9943 rtnl_lock_unregistering(net_list
);
9944 list_for_each_entry(net
, net_list
, exit_list
) {
9945 for_each_netdev_reverse(net
, dev
) {
9946 if (dev
->rtnl_link_ops
&& dev
->rtnl_link_ops
->dellink
)
9947 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
9949 unregister_netdevice_queue(dev
, &dev_kill_list
);
9952 unregister_netdevice_many(&dev_kill_list
);
9956 static struct pernet_operations __net_initdata default_device_ops
= {
9957 .exit
= default_device_exit
,
9958 .exit_batch
= default_device_exit_batch
,
9962 * Initialize the DEV module. At boot time this walks the device list and
9963 * unhooks any devices that fail to initialise (normally hardware not
9964 * present) and leaves us with a valid list of present and active devices.
9969 * This is called single threaded during boot, so no need
9970 * to take the rtnl semaphore.
9972 static int __init
net_dev_init(void)
9974 int i
, rc
= -ENOMEM
;
9976 BUG_ON(!dev_boot_phase
);
9978 if (dev_proc_init())
9981 if (netdev_kobject_init())
9984 INIT_LIST_HEAD(&ptype_all
);
9985 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
9986 INIT_LIST_HEAD(&ptype_base
[i
]);
9988 INIT_LIST_HEAD(&offload_base
);
9990 if (register_pernet_subsys(&netdev_net_ops
))
9994 * Initialise the packet receive queues.
9997 for_each_possible_cpu(i
) {
9998 struct work_struct
*flush
= per_cpu_ptr(&flush_works
, i
);
9999 struct softnet_data
*sd
= &per_cpu(softnet_data
, i
);
10001 INIT_WORK(flush
, flush_backlog
);
10003 skb_queue_head_init(&sd
->input_pkt_queue
);
10004 skb_queue_head_init(&sd
->process_queue
);
10005 #ifdef CONFIG_XFRM_OFFLOAD
10006 skb_queue_head_init(&sd
->xfrm_backlog
);
10008 INIT_LIST_HEAD(&sd
->poll_list
);
10009 sd
->output_queue_tailp
= &sd
->output_queue
;
10011 sd
->csd
.func
= rps_trigger_softirq
;
10016 init_gro_hash(&sd
->backlog
);
10017 sd
->backlog
.poll
= process_backlog
;
10018 sd
->backlog
.weight
= weight_p
;
10021 dev_boot_phase
= 0;
10023 /* The loopback device is special if any other network devices
10024 * is present in a network namespace the loopback device must
10025 * be present. Since we now dynamically allocate and free the
10026 * loopback device ensure this invariant is maintained by
10027 * keeping the loopback device as the first device on the
10028 * list of network devices. Ensuring the loopback devices
10029 * is the first device that appears and the last network device
10032 if (register_pernet_device(&loopback_net_ops
))
10035 if (register_pernet_device(&default_device_ops
))
10038 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
10039 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
10041 rc
= cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD
, "net/dev:dead",
10042 NULL
, dev_cpu_dead
);
10049 subsys_initcall(net_dev_init
);