2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <linux/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/sched/mm.h>
85 #include <linux/mutex.h>
86 #include <linux/string.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/errno.h>
91 #include <linux/interrupt.h>
92 #include <linux/if_ether.h>
93 #include <linux/netdevice.h>
94 #include <linux/etherdevice.h>
95 #include <linux/ethtool.h>
96 #include <linux/notifier.h>
97 #include <linux/skbuff.h>
98 #include <linux/bpf.h>
99 #include <linux/bpf_trace.h>
100 #include <net/net_namespace.h>
101 #include <net/sock.h>
102 #include <net/busy_poll.h>
103 #include <linux/rtnetlink.h>
104 #include <linux/stat.h>
106 #include <net/dst_metadata.h>
107 #include <net/pkt_sched.h>
108 #include <net/pkt_cls.h>
109 #include <net/checksum.h>
110 #include <net/xfrm.h>
111 #include <linux/highmem.h>
112 #include <linux/init.h>
113 #include <linux/module.h>
114 #include <linux/netpoll.h>
115 #include <linux/rcupdate.h>
116 #include <linux/delay.h>
117 #include <net/iw_handler.h>
118 #include <asm/current.h>
119 #include <linux/audit.h>
120 #include <linux/dmaengine.h>
121 #include <linux/err.h>
122 #include <linux/ctype.h>
123 #include <linux/if_arp.h>
124 #include <linux/if_vlan.h>
125 #include <linux/ip.h>
127 #include <net/mpls.h>
128 #include <linux/ipv6.h>
129 #include <linux/in.h>
130 #include <linux/jhash.h>
131 #include <linux/random.h>
132 #include <trace/events/napi.h>
133 #include <trace/events/net.h>
134 #include <trace/events/skb.h>
135 #include <linux/pci.h>
136 #include <linux/inetdevice.h>
137 #include <linux/cpu_rmap.h>
138 #include <linux/static_key.h>
139 #include <linux/hashtable.h>
140 #include <linux/vmalloc.h>
141 #include <linux/if_macvlan.h>
142 #include <linux/errqueue.h>
143 #include <linux/hrtimer.h>
144 #include <linux/netfilter_ingress.h>
145 #include <linux/crash_dump.h>
146 #include <linux/sctp.h>
147 #include <net/udp_tunnel.h>
149 #include "net-sysfs.h"
151 /* Instead of increasing this, you should create a hash table. */
152 #define MAX_GRO_SKBS 8
154 /* This should be increased if a protocol with a bigger head is added. */
155 #define GRO_MAX_HEAD (MAX_HEADER + 128)
157 static DEFINE_SPINLOCK(ptype_lock
);
158 static DEFINE_SPINLOCK(offload_lock
);
159 struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
160 struct list_head ptype_all __read_mostly
; /* Taps */
161 static struct list_head offload_base __read_mostly
;
163 static int netif_rx_internal(struct sk_buff
*skb
);
164 static int call_netdevice_notifiers_info(unsigned long val
,
165 struct net_device
*dev
,
166 struct netdev_notifier_info
*info
);
167 static struct napi_struct
*napi_by_id(unsigned int napi_id
);
170 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
173 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
175 * Writers must hold the rtnl semaphore while they loop through the
176 * dev_base_head list, and hold dev_base_lock for writing when they do the
177 * actual updates. This allows pure readers to access the list even
178 * while a writer is preparing to update it.
180 * To put it another way, dev_base_lock is held for writing only to
181 * protect against pure readers; the rtnl semaphore provides the
182 * protection against other writers.
184 * See, for example usages, register_netdevice() and
185 * unregister_netdevice(), which must be called with the rtnl
188 DEFINE_RWLOCK(dev_base_lock
);
189 EXPORT_SYMBOL(dev_base_lock
);
191 /* protects napi_hash addition/deletion and napi_gen_id */
192 static DEFINE_SPINLOCK(napi_hash_lock
);
194 static unsigned int napi_gen_id
= NR_CPUS
;
195 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash
, 8);
197 static seqcount_t devnet_rename_seq
;
199 static inline void dev_base_seq_inc(struct net
*net
)
201 while (++net
->dev_base_seq
== 0)
205 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
207 unsigned int hash
= full_name_hash(net
, name
, strnlen(name
, IFNAMSIZ
));
209 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
212 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
214 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
217 static inline void rps_lock(struct softnet_data
*sd
)
220 spin_lock(&sd
->input_pkt_queue
.lock
);
224 static inline void rps_unlock(struct softnet_data
*sd
)
227 spin_unlock(&sd
->input_pkt_queue
.lock
);
231 /* Device list insertion */
232 static void list_netdevice(struct net_device
*dev
)
234 struct net
*net
= dev_net(dev
);
238 write_lock_bh(&dev_base_lock
);
239 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
240 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
241 hlist_add_head_rcu(&dev
->index_hlist
,
242 dev_index_hash(net
, dev
->ifindex
));
243 write_unlock_bh(&dev_base_lock
);
245 dev_base_seq_inc(net
);
248 /* Device list removal
249 * caller must respect a RCU grace period before freeing/reusing dev
251 static void unlist_netdevice(struct net_device
*dev
)
255 /* Unlink dev from the device chain */
256 write_lock_bh(&dev_base_lock
);
257 list_del_rcu(&dev
->dev_list
);
258 hlist_del_rcu(&dev
->name_hlist
);
259 hlist_del_rcu(&dev
->index_hlist
);
260 write_unlock_bh(&dev_base_lock
);
262 dev_base_seq_inc(dev_net(dev
));
269 static RAW_NOTIFIER_HEAD(netdev_chain
);
272 * Device drivers call our routines to queue packets here. We empty the
273 * queue in the local softnet handler.
276 DEFINE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
277 EXPORT_PER_CPU_SYMBOL(softnet_data
);
279 #ifdef CONFIG_LOCKDEP
281 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
282 * according to dev->type
284 static const unsigned short netdev_lock_type
[] = {
285 ARPHRD_NETROM
, ARPHRD_ETHER
, ARPHRD_EETHER
, ARPHRD_AX25
,
286 ARPHRD_PRONET
, ARPHRD_CHAOS
, ARPHRD_IEEE802
, ARPHRD_ARCNET
,
287 ARPHRD_APPLETLK
, ARPHRD_DLCI
, ARPHRD_ATM
, ARPHRD_METRICOM
,
288 ARPHRD_IEEE1394
, ARPHRD_EUI64
, ARPHRD_INFINIBAND
, ARPHRD_SLIP
,
289 ARPHRD_CSLIP
, ARPHRD_SLIP6
, ARPHRD_CSLIP6
, ARPHRD_RSRVD
,
290 ARPHRD_ADAPT
, ARPHRD_ROSE
, ARPHRD_X25
, ARPHRD_HWX25
,
291 ARPHRD_PPP
, ARPHRD_CISCO
, ARPHRD_LAPB
, ARPHRD_DDCMP
,
292 ARPHRD_RAWHDLC
, ARPHRD_TUNNEL
, ARPHRD_TUNNEL6
, ARPHRD_FRAD
,
293 ARPHRD_SKIP
, ARPHRD_LOOPBACK
, ARPHRD_LOCALTLK
, ARPHRD_FDDI
,
294 ARPHRD_BIF
, ARPHRD_SIT
, ARPHRD_IPDDP
, ARPHRD_IPGRE
,
295 ARPHRD_PIMREG
, ARPHRD_HIPPI
, ARPHRD_ASH
, ARPHRD_ECONET
,
296 ARPHRD_IRDA
, ARPHRD_FCPP
, ARPHRD_FCAL
, ARPHRD_FCPL
,
297 ARPHRD_FCFABRIC
, ARPHRD_IEEE80211
, ARPHRD_IEEE80211_PRISM
,
298 ARPHRD_IEEE80211_RADIOTAP
, ARPHRD_PHONET
, ARPHRD_PHONET_PIPE
,
299 ARPHRD_IEEE802154
, ARPHRD_VOID
, ARPHRD_NONE
};
301 static const char *const netdev_lock_name
[] = {
302 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
303 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
304 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
305 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
306 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
307 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
308 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
309 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
310 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
311 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
312 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
313 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
314 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
315 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
316 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
318 static struct lock_class_key netdev_xmit_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
319 static struct lock_class_key netdev_addr_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
321 static inline unsigned short netdev_lock_pos(unsigned short dev_type
)
325 for (i
= 0; i
< ARRAY_SIZE(netdev_lock_type
); i
++)
326 if (netdev_lock_type
[i
] == dev_type
)
328 /* the last key is used by default */
329 return ARRAY_SIZE(netdev_lock_type
) - 1;
332 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
333 unsigned short dev_type
)
337 i
= netdev_lock_pos(dev_type
);
338 lockdep_set_class_and_name(lock
, &netdev_xmit_lock_key
[i
],
339 netdev_lock_name
[i
]);
342 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
346 i
= netdev_lock_pos(dev
->type
);
347 lockdep_set_class_and_name(&dev
->addr_list_lock
,
348 &netdev_addr_lock_key
[i
],
349 netdev_lock_name
[i
]);
352 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
353 unsigned short dev_type
)
356 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
361 /*******************************************************************************
363 * Protocol management and registration routines
365 *******************************************************************************/
369 * Add a protocol ID to the list. Now that the input handler is
370 * smarter we can dispense with all the messy stuff that used to be
373 * BEWARE!!! Protocol handlers, mangling input packets,
374 * MUST BE last in hash buckets and checking protocol handlers
375 * MUST start from promiscuous ptype_all chain in net_bh.
376 * It is true now, do not change it.
377 * Explanation follows: if protocol handler, mangling packet, will
378 * be the first on list, it is not able to sense, that packet
379 * is cloned and should be copied-on-write, so that it will
380 * change it and subsequent readers will get broken packet.
384 static inline struct list_head
*ptype_head(const struct packet_type
*pt
)
386 if (pt
->type
== htons(ETH_P_ALL
))
387 return pt
->dev
? &pt
->dev
->ptype_all
: &ptype_all
;
389 return pt
->dev
? &pt
->dev
->ptype_specific
:
390 &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
394 * dev_add_pack - add packet handler
395 * @pt: packet type declaration
397 * Add a protocol handler to the networking stack. The passed &packet_type
398 * is linked into kernel lists and may not be freed until it has been
399 * removed from the kernel lists.
401 * This call does not sleep therefore it can not
402 * guarantee all CPU's that are in middle of receiving packets
403 * will see the new packet type (until the next received packet).
406 void dev_add_pack(struct packet_type
*pt
)
408 struct list_head
*head
= ptype_head(pt
);
410 spin_lock(&ptype_lock
);
411 list_add_rcu(&pt
->list
, head
);
412 spin_unlock(&ptype_lock
);
414 EXPORT_SYMBOL(dev_add_pack
);
417 * __dev_remove_pack - remove packet handler
418 * @pt: packet type declaration
420 * Remove a protocol handler that was previously added to the kernel
421 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
422 * from the kernel lists and can be freed or reused once this function
425 * The packet type might still be in use by receivers
426 * and must not be freed until after all the CPU's have gone
427 * through a quiescent state.
429 void __dev_remove_pack(struct packet_type
*pt
)
431 struct list_head
*head
= ptype_head(pt
);
432 struct packet_type
*pt1
;
434 spin_lock(&ptype_lock
);
436 list_for_each_entry(pt1
, head
, list
) {
438 list_del_rcu(&pt
->list
);
443 pr_warn("dev_remove_pack: %p not found\n", pt
);
445 spin_unlock(&ptype_lock
);
447 EXPORT_SYMBOL(__dev_remove_pack
);
450 * dev_remove_pack - remove packet handler
451 * @pt: packet type declaration
453 * Remove a protocol handler that was previously added to the kernel
454 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
455 * from the kernel lists and can be freed or reused once this function
458 * This call sleeps to guarantee that no CPU is looking at the packet
461 void dev_remove_pack(struct packet_type
*pt
)
463 __dev_remove_pack(pt
);
467 EXPORT_SYMBOL(dev_remove_pack
);
471 * dev_add_offload - register offload handlers
472 * @po: protocol offload declaration
474 * Add protocol offload handlers to the networking stack. The passed
475 * &proto_offload is linked into kernel lists and may not be freed until
476 * it has been removed from the kernel lists.
478 * This call does not sleep therefore it can not
479 * guarantee all CPU's that are in middle of receiving packets
480 * will see the new offload handlers (until the next received packet).
482 void dev_add_offload(struct packet_offload
*po
)
484 struct packet_offload
*elem
;
486 spin_lock(&offload_lock
);
487 list_for_each_entry(elem
, &offload_base
, list
) {
488 if (po
->priority
< elem
->priority
)
491 list_add_rcu(&po
->list
, elem
->list
.prev
);
492 spin_unlock(&offload_lock
);
494 EXPORT_SYMBOL(dev_add_offload
);
497 * __dev_remove_offload - remove offload handler
498 * @po: packet offload declaration
500 * Remove a protocol offload handler that was previously added to the
501 * kernel offload handlers by dev_add_offload(). The passed &offload_type
502 * is removed from the kernel lists and can be freed or reused once this
505 * The packet type might still be in use by receivers
506 * and must not be freed until after all the CPU's have gone
507 * through a quiescent state.
509 static void __dev_remove_offload(struct packet_offload
*po
)
511 struct list_head
*head
= &offload_base
;
512 struct packet_offload
*po1
;
514 spin_lock(&offload_lock
);
516 list_for_each_entry(po1
, head
, list
) {
518 list_del_rcu(&po
->list
);
523 pr_warn("dev_remove_offload: %p not found\n", po
);
525 spin_unlock(&offload_lock
);
529 * dev_remove_offload - remove packet offload handler
530 * @po: packet offload declaration
532 * Remove a packet offload handler that was previously added to the kernel
533 * offload handlers by dev_add_offload(). The passed &offload_type is
534 * removed from the kernel lists and can be freed or reused once this
537 * This call sleeps to guarantee that no CPU is looking at the packet
540 void dev_remove_offload(struct packet_offload
*po
)
542 __dev_remove_offload(po
);
546 EXPORT_SYMBOL(dev_remove_offload
);
548 /******************************************************************************
550 * Device Boot-time Settings Routines
552 ******************************************************************************/
554 /* Boot time configuration table */
555 static struct netdev_boot_setup dev_boot_setup
[NETDEV_BOOT_SETUP_MAX
];
558 * netdev_boot_setup_add - add new setup entry
559 * @name: name of the device
560 * @map: configured settings for the device
562 * Adds new setup entry to the dev_boot_setup list. The function
563 * returns 0 on error and 1 on success. This is a generic routine to
566 static int netdev_boot_setup_add(char *name
, struct ifmap
*map
)
568 struct netdev_boot_setup
*s
;
572 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
573 if (s
[i
].name
[0] == '\0' || s
[i
].name
[0] == ' ') {
574 memset(s
[i
].name
, 0, sizeof(s
[i
].name
));
575 strlcpy(s
[i
].name
, name
, IFNAMSIZ
);
576 memcpy(&s
[i
].map
, map
, sizeof(s
[i
].map
));
581 return i
>= NETDEV_BOOT_SETUP_MAX
? 0 : 1;
585 * netdev_boot_setup_check - check boot time settings
586 * @dev: the netdevice
588 * Check boot time settings for the device.
589 * The found settings are set for the device to be used
590 * later in the device probing.
591 * Returns 0 if no settings found, 1 if they are.
593 int netdev_boot_setup_check(struct net_device
*dev
)
595 struct netdev_boot_setup
*s
= dev_boot_setup
;
598 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
599 if (s
[i
].name
[0] != '\0' && s
[i
].name
[0] != ' ' &&
600 !strcmp(dev
->name
, s
[i
].name
)) {
601 dev
->irq
= s
[i
].map
.irq
;
602 dev
->base_addr
= s
[i
].map
.base_addr
;
603 dev
->mem_start
= s
[i
].map
.mem_start
;
604 dev
->mem_end
= s
[i
].map
.mem_end
;
610 EXPORT_SYMBOL(netdev_boot_setup_check
);
614 * netdev_boot_base - get address from boot time settings
615 * @prefix: prefix for network device
616 * @unit: id for network device
618 * Check boot time settings for the base address of device.
619 * The found settings are set for the device to be used
620 * later in the device probing.
621 * Returns 0 if no settings found.
623 unsigned long netdev_boot_base(const char *prefix
, int unit
)
625 const struct netdev_boot_setup
*s
= dev_boot_setup
;
629 sprintf(name
, "%s%d", prefix
, unit
);
632 * If device already registered then return base of 1
633 * to indicate not to probe for this interface
635 if (__dev_get_by_name(&init_net
, name
))
638 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++)
639 if (!strcmp(name
, s
[i
].name
))
640 return s
[i
].map
.base_addr
;
645 * Saves at boot time configured settings for any netdevice.
647 int __init
netdev_boot_setup(char *str
)
652 str
= get_options(str
, ARRAY_SIZE(ints
), ints
);
657 memset(&map
, 0, sizeof(map
));
661 map
.base_addr
= ints
[2];
663 map
.mem_start
= ints
[3];
665 map
.mem_end
= ints
[4];
667 /* Add new entry to the list */
668 return netdev_boot_setup_add(str
, &map
);
671 __setup("netdev=", netdev_boot_setup
);
673 /*******************************************************************************
675 * Device Interface Subroutines
677 *******************************************************************************/
680 * dev_get_iflink - get 'iflink' value of a interface
681 * @dev: targeted interface
683 * Indicates the ifindex the interface is linked to.
684 * Physical interfaces have the same 'ifindex' and 'iflink' values.
687 int dev_get_iflink(const struct net_device
*dev
)
689 if (dev
->netdev_ops
&& dev
->netdev_ops
->ndo_get_iflink
)
690 return dev
->netdev_ops
->ndo_get_iflink(dev
);
694 EXPORT_SYMBOL(dev_get_iflink
);
697 * dev_fill_metadata_dst - Retrieve tunnel egress information.
698 * @dev: targeted interface
701 * For better visibility of tunnel traffic OVS needs to retrieve
702 * egress tunnel information for a packet. Following API allows
703 * user to get this info.
705 int dev_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
707 struct ip_tunnel_info
*info
;
709 if (!dev
->netdev_ops
|| !dev
->netdev_ops
->ndo_fill_metadata_dst
)
712 info
= skb_tunnel_info_unclone(skb
);
715 if (unlikely(!(info
->mode
& IP_TUNNEL_INFO_TX
)))
718 return dev
->netdev_ops
->ndo_fill_metadata_dst(dev
, skb
);
720 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst
);
723 * __dev_get_by_name - find a device by its name
724 * @net: the applicable net namespace
725 * @name: name to find
727 * Find an interface by name. Must be called under RTNL semaphore
728 * or @dev_base_lock. If the name is found a pointer to the device
729 * is returned. If the name is not found then %NULL is returned. The
730 * reference counters are not incremented so the caller must be
731 * careful with locks.
734 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
736 struct net_device
*dev
;
737 struct hlist_head
*head
= dev_name_hash(net
, name
);
739 hlist_for_each_entry(dev
, head
, name_hlist
)
740 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
745 EXPORT_SYMBOL(__dev_get_by_name
);
748 * dev_get_by_name_rcu - find a device by its name
749 * @net: the applicable net namespace
750 * @name: name to find
752 * Find an interface by name.
753 * If the name is found a pointer to the device is returned.
754 * If the name is not found then %NULL is returned.
755 * The reference counters are not incremented so the caller must be
756 * careful with locks. The caller must hold RCU lock.
759 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
761 struct net_device
*dev
;
762 struct hlist_head
*head
= dev_name_hash(net
, name
);
764 hlist_for_each_entry_rcu(dev
, head
, name_hlist
)
765 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
770 EXPORT_SYMBOL(dev_get_by_name_rcu
);
773 * dev_get_by_name - find a device by its name
774 * @net: the applicable net namespace
775 * @name: name to find
777 * Find an interface by name. This can be called from any
778 * context and does its own locking. The returned handle has
779 * the usage count incremented and the caller must use dev_put() to
780 * release it when it is no longer needed. %NULL is returned if no
781 * matching device is found.
784 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
786 struct net_device
*dev
;
789 dev
= dev_get_by_name_rcu(net
, name
);
795 EXPORT_SYMBOL(dev_get_by_name
);
798 * __dev_get_by_index - find a device by its ifindex
799 * @net: the applicable net namespace
800 * @ifindex: index of device
802 * Search for an interface by index. Returns %NULL if the device
803 * is not found or a pointer to the device. The device has not
804 * had its reference counter increased so the caller must be careful
805 * about locking. The caller must hold either the RTNL semaphore
809 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
811 struct net_device
*dev
;
812 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
814 hlist_for_each_entry(dev
, head
, index_hlist
)
815 if (dev
->ifindex
== ifindex
)
820 EXPORT_SYMBOL(__dev_get_by_index
);
823 * dev_get_by_index_rcu - find a device by its ifindex
824 * @net: the applicable net namespace
825 * @ifindex: index of device
827 * Search for an interface by index. Returns %NULL if the device
828 * is not found or a pointer to the device. The device has not
829 * had its reference counter increased so the caller must be careful
830 * about locking. The caller must hold RCU lock.
833 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
835 struct net_device
*dev
;
836 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
838 hlist_for_each_entry_rcu(dev
, head
, index_hlist
)
839 if (dev
->ifindex
== ifindex
)
844 EXPORT_SYMBOL(dev_get_by_index_rcu
);
848 * dev_get_by_index - find a device by its ifindex
849 * @net: the applicable net namespace
850 * @ifindex: index of device
852 * Search for an interface by index. Returns NULL if the device
853 * is not found or a pointer to the device. The device returned has
854 * had a reference added and the pointer is safe until the user calls
855 * dev_put to indicate they have finished with it.
858 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
860 struct net_device
*dev
;
863 dev
= dev_get_by_index_rcu(net
, ifindex
);
869 EXPORT_SYMBOL(dev_get_by_index
);
872 * dev_get_by_napi_id - find a device by napi_id
873 * @napi_id: ID of the NAPI struct
875 * Search for an interface by NAPI ID. Returns %NULL if the device
876 * is not found or a pointer to the device. The device has not had
877 * its reference counter increased so the caller must be careful
878 * about locking. The caller must hold RCU lock.
881 struct net_device
*dev_get_by_napi_id(unsigned int napi_id
)
883 struct napi_struct
*napi
;
885 WARN_ON_ONCE(!rcu_read_lock_held());
887 if (napi_id
< MIN_NAPI_ID
)
890 napi
= napi_by_id(napi_id
);
892 return napi
? napi
->dev
: NULL
;
894 EXPORT_SYMBOL(dev_get_by_napi_id
);
897 * netdev_get_name - get a netdevice name, knowing its ifindex.
898 * @net: network namespace
899 * @name: a pointer to the buffer where the name will be stored.
900 * @ifindex: the ifindex of the interface to get the name from.
902 * The use of raw_seqcount_begin() and cond_resched() before
903 * retrying is required as we want to give the writers a chance
904 * to complete when CONFIG_PREEMPT is not set.
906 int netdev_get_name(struct net
*net
, char *name
, int ifindex
)
908 struct net_device
*dev
;
912 seq
= raw_seqcount_begin(&devnet_rename_seq
);
914 dev
= dev_get_by_index_rcu(net
, ifindex
);
920 strcpy(name
, dev
->name
);
922 if (read_seqcount_retry(&devnet_rename_seq
, seq
)) {
931 * dev_getbyhwaddr_rcu - find a device by its hardware address
932 * @net: the applicable net namespace
933 * @type: media type of device
934 * @ha: hardware address
936 * Search for an interface by MAC address. Returns NULL if the device
937 * is not found or a pointer to the device.
938 * The caller must hold RCU or RTNL.
939 * The returned device has not had its ref count increased
940 * and the caller must therefore be careful about locking
944 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
947 struct net_device
*dev
;
949 for_each_netdev_rcu(net
, dev
)
950 if (dev
->type
== type
&&
951 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
956 EXPORT_SYMBOL(dev_getbyhwaddr_rcu
);
958 struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
960 struct net_device
*dev
;
963 for_each_netdev(net
, dev
)
964 if (dev
->type
== type
)
969 EXPORT_SYMBOL(__dev_getfirstbyhwtype
);
971 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
973 struct net_device
*dev
, *ret
= NULL
;
976 for_each_netdev_rcu(net
, dev
)
977 if (dev
->type
== type
) {
985 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
988 * __dev_get_by_flags - find any device with given flags
989 * @net: the applicable net namespace
990 * @if_flags: IFF_* values
991 * @mask: bitmask of bits in if_flags to check
993 * Search for any interface with the given flags. Returns NULL if a device
994 * is not found or a pointer to the device. Must be called inside
995 * rtnl_lock(), and result refcount is unchanged.
998 struct net_device
*__dev_get_by_flags(struct net
*net
, unsigned short if_flags
,
1001 struct net_device
*dev
, *ret
;
1006 for_each_netdev(net
, dev
) {
1007 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
1014 EXPORT_SYMBOL(__dev_get_by_flags
);
1017 * dev_valid_name - check if name is okay for network device
1018 * @name: name string
1020 * Network device names need to be valid file names to
1021 * to allow sysfs to work. We also disallow any kind of
1024 bool dev_valid_name(const char *name
)
1028 if (strlen(name
) >= IFNAMSIZ
)
1030 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
1034 if (*name
== '/' || *name
== ':' || isspace(*name
))
1040 EXPORT_SYMBOL(dev_valid_name
);
1043 * __dev_alloc_name - allocate a name for a device
1044 * @net: network namespace to allocate the device name in
1045 * @name: name format string
1046 * @buf: scratch buffer and result name string
1048 * Passed a format string - eg "lt%d" it will try and find a suitable
1049 * id. It scans list of devices to build up a free map, then chooses
1050 * the first empty slot. The caller must hold the dev_base or rtnl lock
1051 * while allocating the name and adding the device in order to avoid
1053 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1054 * Returns the number of the unit assigned or a negative errno code.
1057 static int __dev_alloc_name(struct net
*net
, const char *name
, char *buf
)
1061 const int max_netdevices
= 8*PAGE_SIZE
;
1062 unsigned long *inuse
;
1063 struct net_device
*d
;
1065 p
= strnchr(name
, IFNAMSIZ
-1, '%');
1068 * Verify the string as this thing may have come from
1069 * the user. There must be either one "%d" and no other "%"
1072 if (p
[1] != 'd' || strchr(p
+ 2, '%'))
1075 /* Use one page as a bit array of possible slots */
1076 inuse
= (unsigned long *) get_zeroed_page(GFP_ATOMIC
);
1080 for_each_netdev(net
, d
) {
1081 if (!sscanf(d
->name
, name
, &i
))
1083 if (i
< 0 || i
>= max_netdevices
)
1086 /* avoid cases where sscanf is not exact inverse of printf */
1087 snprintf(buf
, IFNAMSIZ
, name
, i
);
1088 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
1092 i
= find_first_zero_bit(inuse
, max_netdevices
);
1093 free_page((unsigned long) inuse
);
1097 snprintf(buf
, IFNAMSIZ
, name
, i
);
1098 if (!__dev_get_by_name(net
, buf
))
1101 /* It is possible to run out of possible slots
1102 * when the name is long and there isn't enough space left
1103 * for the digits, or if all bits are used.
1109 * dev_alloc_name - allocate a name for a device
1111 * @name: name format string
1113 * Passed a format string - eg "lt%d" it will try and find a suitable
1114 * id. It scans list of devices to build up a free map, then chooses
1115 * the first empty slot. The caller must hold the dev_base or rtnl lock
1116 * while allocating the name and adding the device in order to avoid
1118 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1119 * Returns the number of the unit assigned or a negative errno code.
1122 int dev_alloc_name(struct net_device
*dev
, const char *name
)
1128 BUG_ON(!dev_net(dev
));
1130 ret
= __dev_alloc_name(net
, name
, buf
);
1132 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1135 EXPORT_SYMBOL(dev_alloc_name
);
1137 static int dev_alloc_name_ns(struct net
*net
,
1138 struct net_device
*dev
,
1144 ret
= __dev_alloc_name(net
, name
, buf
);
1146 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1150 static int dev_get_valid_name(struct net
*net
,
1151 struct net_device
*dev
,
1156 if (!dev_valid_name(name
))
1159 if (strchr(name
, '%'))
1160 return dev_alloc_name_ns(net
, dev
, name
);
1161 else if (__dev_get_by_name(net
, name
))
1163 else if (dev
->name
!= name
)
1164 strlcpy(dev
->name
, name
, IFNAMSIZ
);
1170 * dev_change_name - change name of a device
1172 * @newname: name (or format string) must be at least IFNAMSIZ
1174 * Change name of a device, can pass format strings "eth%d".
1177 int dev_change_name(struct net_device
*dev
, const char *newname
)
1179 unsigned char old_assign_type
;
1180 char oldname
[IFNAMSIZ
];
1186 BUG_ON(!dev_net(dev
));
1189 if (dev
->flags
& IFF_UP
)
1192 write_seqcount_begin(&devnet_rename_seq
);
1194 if (strncmp(newname
, dev
->name
, IFNAMSIZ
) == 0) {
1195 write_seqcount_end(&devnet_rename_seq
);
1199 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
1201 err
= dev_get_valid_name(net
, dev
, newname
);
1203 write_seqcount_end(&devnet_rename_seq
);
1207 if (oldname
[0] && !strchr(oldname
, '%'))
1208 netdev_info(dev
, "renamed from %s\n", oldname
);
1210 old_assign_type
= dev
->name_assign_type
;
1211 dev
->name_assign_type
= NET_NAME_RENAMED
;
1214 ret
= device_rename(&dev
->dev
, dev
->name
);
1216 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1217 dev
->name_assign_type
= old_assign_type
;
1218 write_seqcount_end(&devnet_rename_seq
);
1222 write_seqcount_end(&devnet_rename_seq
);
1224 netdev_adjacent_rename_links(dev
, oldname
);
1226 write_lock_bh(&dev_base_lock
);
1227 hlist_del_rcu(&dev
->name_hlist
);
1228 write_unlock_bh(&dev_base_lock
);
1232 write_lock_bh(&dev_base_lock
);
1233 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
1234 write_unlock_bh(&dev_base_lock
);
1236 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1237 ret
= notifier_to_errno(ret
);
1240 /* err >= 0 after dev_alloc_name() or stores the first errno */
1243 write_seqcount_begin(&devnet_rename_seq
);
1244 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1245 memcpy(oldname
, newname
, IFNAMSIZ
);
1246 dev
->name_assign_type
= old_assign_type
;
1247 old_assign_type
= NET_NAME_RENAMED
;
1250 pr_err("%s: name change rollback failed: %d\n",
1259 * dev_set_alias - change ifalias of a device
1261 * @alias: name up to IFALIASZ
1262 * @len: limit of bytes to copy from info
1264 * Set ifalias for a device,
1266 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1272 if (len
>= IFALIASZ
)
1276 kfree(dev
->ifalias
);
1277 dev
->ifalias
= NULL
;
1281 new_ifalias
= krealloc(dev
->ifalias
, len
+ 1, GFP_KERNEL
);
1284 dev
->ifalias
= new_ifalias
;
1285 memcpy(dev
->ifalias
, alias
, len
);
1286 dev
->ifalias
[len
] = 0;
1293 * netdev_features_change - device changes features
1294 * @dev: device to cause notification
1296 * Called to indicate a device has changed features.
1298 void netdev_features_change(struct net_device
*dev
)
1300 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1302 EXPORT_SYMBOL(netdev_features_change
);
1305 * netdev_state_change - device changes state
1306 * @dev: device to cause notification
1308 * Called to indicate a device has changed state. This function calls
1309 * the notifier chains for netdev_chain and sends a NEWLINK message
1310 * to the routing socket.
1312 void netdev_state_change(struct net_device
*dev
)
1314 if (dev
->flags
& IFF_UP
) {
1315 struct netdev_notifier_change_info change_info
;
1317 change_info
.flags_changed
= 0;
1318 call_netdevice_notifiers_info(NETDEV_CHANGE
, dev
,
1320 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0, GFP_KERNEL
);
1323 EXPORT_SYMBOL(netdev_state_change
);
1326 * netdev_notify_peers - notify network peers about existence of @dev
1327 * @dev: network device
1329 * Generate traffic such that interested network peers are aware of
1330 * @dev, such as by generating a gratuitous ARP. This may be used when
1331 * a device wants to inform the rest of the network about some sort of
1332 * reconfiguration such as a failover event or virtual machine
1335 void netdev_notify_peers(struct net_device
*dev
)
1338 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, dev
);
1339 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, dev
);
1342 EXPORT_SYMBOL(netdev_notify_peers
);
1344 static int __dev_open(struct net_device
*dev
)
1346 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1351 if (!netif_device_present(dev
))
1354 /* Block netpoll from trying to do any rx path servicing.
1355 * If we don't do this there is a chance ndo_poll_controller
1356 * or ndo_poll may be running while we open the device
1358 netpoll_poll_disable(dev
);
1360 ret
= call_netdevice_notifiers(NETDEV_PRE_UP
, dev
);
1361 ret
= notifier_to_errno(ret
);
1365 set_bit(__LINK_STATE_START
, &dev
->state
);
1367 if (ops
->ndo_validate_addr
)
1368 ret
= ops
->ndo_validate_addr(dev
);
1370 if (!ret
&& ops
->ndo_open
)
1371 ret
= ops
->ndo_open(dev
);
1373 netpoll_poll_enable(dev
);
1376 clear_bit(__LINK_STATE_START
, &dev
->state
);
1378 dev
->flags
|= IFF_UP
;
1379 dev_set_rx_mode(dev
);
1381 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
1388 * dev_open - prepare an interface for use.
1389 * @dev: device to open
1391 * Takes a device from down to up state. The device's private open
1392 * function is invoked and then the multicast lists are loaded. Finally
1393 * the device is moved into the up state and a %NETDEV_UP message is
1394 * sent to the netdev notifier chain.
1396 * Calling this function on an active interface is a nop. On a failure
1397 * a negative errno code is returned.
1399 int dev_open(struct net_device
*dev
)
1403 if (dev
->flags
& IFF_UP
)
1406 ret
= __dev_open(dev
);
1410 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1411 call_netdevice_notifiers(NETDEV_UP
, dev
);
1415 EXPORT_SYMBOL(dev_open
);
1417 static void __dev_close_many(struct list_head
*head
)
1419 struct net_device
*dev
;
1424 list_for_each_entry(dev
, head
, close_list
) {
1425 /* Temporarily disable netpoll until the interface is down */
1426 netpoll_poll_disable(dev
);
1428 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1430 clear_bit(__LINK_STATE_START
, &dev
->state
);
1432 /* Synchronize to scheduled poll. We cannot touch poll list, it
1433 * can be even on different cpu. So just clear netif_running().
1435 * dev->stop() will invoke napi_disable() on all of it's
1436 * napi_struct instances on this device.
1438 smp_mb__after_atomic(); /* Commit netif_running(). */
1441 dev_deactivate_many(head
);
1443 list_for_each_entry(dev
, head
, close_list
) {
1444 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1447 * Call the device specific close. This cannot fail.
1448 * Only if device is UP
1450 * We allow it to be called even after a DETACH hot-plug
1456 dev
->flags
&= ~IFF_UP
;
1457 netpoll_poll_enable(dev
);
1461 static void __dev_close(struct net_device
*dev
)
1465 list_add(&dev
->close_list
, &single
);
1466 __dev_close_many(&single
);
1470 void dev_close_many(struct list_head
*head
, bool unlink
)
1472 struct net_device
*dev
, *tmp
;
1474 /* Remove the devices that don't need to be closed */
1475 list_for_each_entry_safe(dev
, tmp
, head
, close_list
)
1476 if (!(dev
->flags
& IFF_UP
))
1477 list_del_init(&dev
->close_list
);
1479 __dev_close_many(head
);
1481 list_for_each_entry_safe(dev
, tmp
, head
, close_list
) {
1482 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1483 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1485 list_del_init(&dev
->close_list
);
1488 EXPORT_SYMBOL(dev_close_many
);
1491 * dev_close - shutdown an interface.
1492 * @dev: device to shutdown
1494 * This function moves an active device into down state. A
1495 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1496 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1499 void dev_close(struct net_device
*dev
)
1501 if (dev
->flags
& IFF_UP
) {
1504 list_add(&dev
->close_list
, &single
);
1505 dev_close_many(&single
, true);
1509 EXPORT_SYMBOL(dev_close
);
1513 * dev_disable_lro - disable Large Receive Offload on a device
1516 * Disable Large Receive Offload (LRO) on a net device. Must be
1517 * called under RTNL. This is needed if received packets may be
1518 * forwarded to another interface.
1520 void dev_disable_lro(struct net_device
*dev
)
1522 struct net_device
*lower_dev
;
1523 struct list_head
*iter
;
1525 dev
->wanted_features
&= ~NETIF_F_LRO
;
1526 netdev_update_features(dev
);
1528 if (unlikely(dev
->features
& NETIF_F_LRO
))
1529 netdev_WARN(dev
, "failed to disable LRO!\n");
1531 netdev_for_each_lower_dev(dev
, lower_dev
, iter
)
1532 dev_disable_lro(lower_dev
);
1534 EXPORT_SYMBOL(dev_disable_lro
);
1536 static int call_netdevice_notifier(struct notifier_block
*nb
, unsigned long val
,
1537 struct net_device
*dev
)
1539 struct netdev_notifier_info info
;
1541 netdev_notifier_info_init(&info
, dev
);
1542 return nb
->notifier_call(nb
, val
, &info
);
1545 static int dev_boot_phase
= 1;
1548 * register_netdevice_notifier - register a network notifier block
1551 * Register a notifier to be called when network device events occur.
1552 * The notifier passed is linked into the kernel structures and must
1553 * not be reused until it has been unregistered. A negative errno code
1554 * is returned on a failure.
1556 * When registered all registration and up events are replayed
1557 * to the new notifier to allow device to have a race free
1558 * view of the network device list.
1561 int register_netdevice_notifier(struct notifier_block
*nb
)
1563 struct net_device
*dev
;
1564 struct net_device
*last
;
1569 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1575 for_each_netdev(net
, dev
) {
1576 err
= call_netdevice_notifier(nb
, NETDEV_REGISTER
, dev
);
1577 err
= notifier_to_errno(err
);
1581 if (!(dev
->flags
& IFF_UP
))
1584 call_netdevice_notifier(nb
, NETDEV_UP
, dev
);
1595 for_each_netdev(net
, dev
) {
1599 if (dev
->flags
& IFF_UP
) {
1600 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1602 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1604 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1609 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1612 EXPORT_SYMBOL(register_netdevice_notifier
);
1615 * unregister_netdevice_notifier - unregister a network notifier block
1618 * Unregister a notifier previously registered by
1619 * register_netdevice_notifier(). The notifier is unlinked into the
1620 * kernel structures and may then be reused. A negative errno code
1621 * is returned on a failure.
1623 * After unregistering unregister and down device events are synthesized
1624 * for all devices on the device list to the removed notifier to remove
1625 * the need for special case cleanup code.
1628 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1630 struct net_device
*dev
;
1635 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1640 for_each_netdev(net
, dev
) {
1641 if (dev
->flags
& IFF_UP
) {
1642 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1644 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1646 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1653 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1656 * call_netdevice_notifiers_info - call all network notifier blocks
1657 * @val: value passed unmodified to notifier function
1658 * @dev: net_device pointer passed unmodified to notifier function
1659 * @info: notifier information data
1661 * Call all network notifier blocks. Parameters and return value
1662 * are as for raw_notifier_call_chain().
1665 static int call_netdevice_notifiers_info(unsigned long val
,
1666 struct net_device
*dev
,
1667 struct netdev_notifier_info
*info
)
1670 netdev_notifier_info_init(info
, dev
);
1671 return raw_notifier_call_chain(&netdev_chain
, val
, info
);
1675 * call_netdevice_notifiers - call all network notifier blocks
1676 * @val: value passed unmodified to notifier function
1677 * @dev: net_device pointer passed unmodified to notifier function
1679 * Call all network notifier blocks. Parameters and return value
1680 * are as for raw_notifier_call_chain().
1683 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
1685 struct netdev_notifier_info info
;
1687 return call_netdevice_notifiers_info(val
, dev
, &info
);
1689 EXPORT_SYMBOL(call_netdevice_notifiers
);
1691 #ifdef CONFIG_NET_INGRESS
1692 static struct static_key ingress_needed __read_mostly
;
1694 void net_inc_ingress_queue(void)
1696 static_key_slow_inc(&ingress_needed
);
1698 EXPORT_SYMBOL_GPL(net_inc_ingress_queue
);
1700 void net_dec_ingress_queue(void)
1702 static_key_slow_dec(&ingress_needed
);
1704 EXPORT_SYMBOL_GPL(net_dec_ingress_queue
);
1707 #ifdef CONFIG_NET_EGRESS
1708 static struct static_key egress_needed __read_mostly
;
1710 void net_inc_egress_queue(void)
1712 static_key_slow_inc(&egress_needed
);
1714 EXPORT_SYMBOL_GPL(net_inc_egress_queue
);
1716 void net_dec_egress_queue(void)
1718 static_key_slow_dec(&egress_needed
);
1720 EXPORT_SYMBOL_GPL(net_dec_egress_queue
);
1723 static struct static_key netstamp_needed __read_mostly
;
1724 #ifdef HAVE_JUMP_LABEL
1725 static atomic_t netstamp_needed_deferred
;
1726 static atomic_t netstamp_wanted
;
1727 static void netstamp_clear(struct work_struct
*work
)
1729 int deferred
= atomic_xchg(&netstamp_needed_deferred
, 0);
1732 wanted
= atomic_add_return(deferred
, &netstamp_wanted
);
1734 static_key_enable(&netstamp_needed
);
1736 static_key_disable(&netstamp_needed
);
1738 static DECLARE_WORK(netstamp_work
, netstamp_clear
);
1741 void net_enable_timestamp(void)
1743 #ifdef HAVE_JUMP_LABEL
1747 wanted
= atomic_read(&netstamp_wanted
);
1750 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
+ 1) == wanted
)
1753 atomic_inc(&netstamp_needed_deferred
);
1754 schedule_work(&netstamp_work
);
1756 static_key_slow_inc(&netstamp_needed
);
1759 EXPORT_SYMBOL(net_enable_timestamp
);
1761 void net_disable_timestamp(void)
1763 #ifdef HAVE_JUMP_LABEL
1767 wanted
= atomic_read(&netstamp_wanted
);
1770 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
- 1) == wanted
)
1773 atomic_dec(&netstamp_needed_deferred
);
1774 schedule_work(&netstamp_work
);
1776 static_key_slow_dec(&netstamp_needed
);
1779 EXPORT_SYMBOL(net_disable_timestamp
);
1781 static inline void net_timestamp_set(struct sk_buff
*skb
)
1784 if (static_key_false(&netstamp_needed
))
1785 __net_timestamp(skb
);
1788 #define net_timestamp_check(COND, SKB) \
1789 if (static_key_false(&netstamp_needed)) { \
1790 if ((COND) && !(SKB)->tstamp) \
1791 __net_timestamp(SKB); \
1794 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
1798 if (!(dev
->flags
& IFF_UP
))
1801 len
= dev
->mtu
+ dev
->hard_header_len
+ VLAN_HLEN
;
1802 if (skb
->len
<= len
)
1805 /* if TSO is enabled, we don't care about the length as the packet
1806 * could be forwarded without being segmented before
1808 if (skb_is_gso(skb
))
1813 EXPORT_SYMBOL_GPL(is_skb_forwardable
);
1815 int __dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1817 int ret
= ____dev_forward_skb(dev
, skb
);
1820 skb
->protocol
= eth_type_trans(skb
, dev
);
1821 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
1826 EXPORT_SYMBOL_GPL(__dev_forward_skb
);
1829 * dev_forward_skb - loopback an skb to another netif
1831 * @dev: destination network device
1832 * @skb: buffer to forward
1835 * NET_RX_SUCCESS (no congestion)
1836 * NET_RX_DROP (packet was dropped, but freed)
1838 * dev_forward_skb can be used for injecting an skb from the
1839 * start_xmit function of one device into the receive queue
1840 * of another device.
1842 * The receiving device may be in another namespace, so
1843 * we have to clear all information in the skb that could
1844 * impact namespace isolation.
1846 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1848 return __dev_forward_skb(dev
, skb
) ?: netif_rx_internal(skb
);
1850 EXPORT_SYMBOL_GPL(dev_forward_skb
);
1852 static inline int deliver_skb(struct sk_buff
*skb
,
1853 struct packet_type
*pt_prev
,
1854 struct net_device
*orig_dev
)
1856 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
1858 refcount_inc(&skb
->users
);
1859 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
1862 static inline void deliver_ptype_list_skb(struct sk_buff
*skb
,
1863 struct packet_type
**pt
,
1864 struct net_device
*orig_dev
,
1866 struct list_head
*ptype_list
)
1868 struct packet_type
*ptype
, *pt_prev
= *pt
;
1870 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1871 if (ptype
->type
!= type
)
1874 deliver_skb(skb
, pt_prev
, orig_dev
);
1880 static inline bool skb_loop_sk(struct packet_type
*ptype
, struct sk_buff
*skb
)
1882 if (!ptype
->af_packet_priv
|| !skb
->sk
)
1885 if (ptype
->id_match
)
1886 return ptype
->id_match(ptype
, skb
->sk
);
1887 else if ((struct sock
*)ptype
->af_packet_priv
== skb
->sk
)
1894 * Support routine. Sends outgoing frames to any network
1895 * taps currently in use.
1898 void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
1900 struct packet_type
*ptype
;
1901 struct sk_buff
*skb2
= NULL
;
1902 struct packet_type
*pt_prev
= NULL
;
1903 struct list_head
*ptype_list
= &ptype_all
;
1907 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1908 /* Never send packets back to the socket
1909 * they originated from - MvS (miquels@drinkel.ow.org)
1911 if (skb_loop_sk(ptype
, skb
))
1915 deliver_skb(skb2
, pt_prev
, skb
->dev
);
1920 /* need to clone skb, done only once */
1921 skb2
= skb_clone(skb
, GFP_ATOMIC
);
1925 net_timestamp_set(skb2
);
1927 /* skb->nh should be correctly
1928 * set by sender, so that the second statement is
1929 * just protection against buggy protocols.
1931 skb_reset_mac_header(skb2
);
1933 if (skb_network_header(skb2
) < skb2
->data
||
1934 skb_network_header(skb2
) > skb_tail_pointer(skb2
)) {
1935 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1936 ntohs(skb2
->protocol
),
1938 skb_reset_network_header(skb2
);
1941 skb2
->transport_header
= skb2
->network_header
;
1942 skb2
->pkt_type
= PACKET_OUTGOING
;
1946 if (ptype_list
== &ptype_all
) {
1947 ptype_list
= &dev
->ptype_all
;
1952 if (!skb_orphan_frags_rx(skb2
, GFP_ATOMIC
))
1953 pt_prev
->func(skb2
, skb
->dev
, pt_prev
, skb
->dev
);
1959 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit
);
1962 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1963 * @dev: Network device
1964 * @txq: number of queues available
1966 * If real_num_tx_queues is changed the tc mappings may no longer be
1967 * valid. To resolve this verify the tc mapping remains valid and if
1968 * not NULL the mapping. With no priorities mapping to this
1969 * offset/count pair it will no longer be used. In the worst case TC0
1970 * is invalid nothing can be done so disable priority mappings. If is
1971 * expected that drivers will fix this mapping if they can before
1972 * calling netif_set_real_num_tx_queues.
1974 static void netif_setup_tc(struct net_device
*dev
, unsigned int txq
)
1977 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
1979 /* If TC0 is invalidated disable TC mapping */
1980 if (tc
->offset
+ tc
->count
> txq
) {
1981 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1986 /* Invalidated prio to tc mappings set to TC0 */
1987 for (i
= 1; i
< TC_BITMASK
+ 1; i
++) {
1988 int q
= netdev_get_prio_tc_map(dev
, i
);
1990 tc
= &dev
->tc_to_txq
[q
];
1991 if (tc
->offset
+ tc
->count
> txq
) {
1992 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1994 netdev_set_prio_tc_map(dev
, i
, 0);
1999 int netdev_txq_to_tc(struct net_device
*dev
, unsigned int txq
)
2002 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2005 for (i
= 0; i
< TC_MAX_QUEUE
; i
++, tc
++) {
2006 if ((txq
- tc
->offset
) < tc
->count
)
2017 static DEFINE_MUTEX(xps_map_mutex
);
2018 #define xmap_dereference(P) \
2019 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2021 static bool remove_xps_queue(struct xps_dev_maps
*dev_maps
,
2024 struct xps_map
*map
= NULL
;
2028 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2032 for (pos
= map
->len
; pos
--;) {
2033 if (map
->queues
[pos
] != index
)
2037 map
->queues
[pos
] = map
->queues
[--map
->len
];
2041 RCU_INIT_POINTER(dev_maps
->cpu_map
[tci
], NULL
);
2042 kfree_rcu(map
, rcu
);
2049 static bool remove_xps_queue_cpu(struct net_device
*dev
,
2050 struct xps_dev_maps
*dev_maps
,
2051 int cpu
, u16 offset
, u16 count
)
2053 int num_tc
= dev
->num_tc
? : 1;
2054 bool active
= false;
2057 for (tci
= cpu
* num_tc
; num_tc
--; tci
++) {
2060 for (i
= count
, j
= offset
; i
--; j
++) {
2061 if (!remove_xps_queue(dev_maps
, cpu
, j
))
2071 static void netif_reset_xps_queues(struct net_device
*dev
, u16 offset
,
2074 struct xps_dev_maps
*dev_maps
;
2076 bool active
= false;
2078 mutex_lock(&xps_map_mutex
);
2079 dev_maps
= xmap_dereference(dev
->xps_maps
);
2084 for_each_possible_cpu(cpu
)
2085 active
|= remove_xps_queue_cpu(dev
, dev_maps
, cpu
,
2089 RCU_INIT_POINTER(dev
->xps_maps
, NULL
);
2090 kfree_rcu(dev_maps
, rcu
);
2093 for (i
= offset
+ (count
- 1); count
--; i
--)
2094 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, i
),
2098 mutex_unlock(&xps_map_mutex
);
2101 static void netif_reset_xps_queues_gt(struct net_device
*dev
, u16 index
)
2103 netif_reset_xps_queues(dev
, index
, dev
->num_tx_queues
- index
);
2106 static struct xps_map
*expand_xps_map(struct xps_map
*map
,
2109 struct xps_map
*new_map
;
2110 int alloc_len
= XPS_MIN_MAP_ALLOC
;
2113 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
2114 if (map
->queues
[pos
] != index
)
2119 /* Need to add queue to this CPU's existing map */
2121 if (pos
< map
->alloc_len
)
2124 alloc_len
= map
->alloc_len
* 2;
2127 /* Need to allocate new map to store queue on this CPU's map */
2128 new_map
= kzalloc_node(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
,
2133 for (i
= 0; i
< pos
; i
++)
2134 new_map
->queues
[i
] = map
->queues
[i
];
2135 new_map
->alloc_len
= alloc_len
;
2141 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
2144 struct xps_dev_maps
*dev_maps
, *new_dev_maps
= NULL
;
2145 int i
, cpu
, tci
, numa_node_id
= -2;
2146 int maps_sz
, num_tc
= 1, tc
= 0;
2147 struct xps_map
*map
, *new_map
;
2148 bool active
= false;
2151 num_tc
= dev
->num_tc
;
2152 tc
= netdev_txq_to_tc(dev
, index
);
2157 maps_sz
= XPS_DEV_MAPS_SIZE(num_tc
);
2158 if (maps_sz
< L1_CACHE_BYTES
)
2159 maps_sz
= L1_CACHE_BYTES
;
2161 mutex_lock(&xps_map_mutex
);
2163 dev_maps
= xmap_dereference(dev
->xps_maps
);
2165 /* allocate memory for queue storage */
2166 for_each_cpu_and(cpu
, cpu_online_mask
, mask
) {
2168 new_dev_maps
= kzalloc(maps_sz
, GFP_KERNEL
);
2169 if (!new_dev_maps
) {
2170 mutex_unlock(&xps_map_mutex
);
2174 tci
= cpu
* num_tc
+ tc
;
2175 map
= dev_maps
? xmap_dereference(dev_maps
->cpu_map
[tci
]) :
2178 map
= expand_xps_map(map
, cpu
, index
);
2182 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2186 goto out_no_new_maps
;
2188 for_each_possible_cpu(cpu
) {
2189 /* copy maps belonging to foreign traffic classes */
2190 for (i
= tc
, tci
= cpu
* num_tc
; dev_maps
&& i
--; tci
++) {
2191 /* fill in the new device map from the old device map */
2192 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2193 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2196 /* We need to explicitly update tci as prevous loop
2197 * could break out early if dev_maps is NULL.
2199 tci
= cpu
* num_tc
+ tc
;
2201 if (cpumask_test_cpu(cpu
, mask
) && cpu_online(cpu
)) {
2202 /* add queue to CPU maps */
2205 map
= xmap_dereference(new_dev_maps
->cpu_map
[tci
]);
2206 while ((pos
< map
->len
) && (map
->queues
[pos
] != index
))
2209 if (pos
== map
->len
)
2210 map
->queues
[map
->len
++] = index
;
2212 if (numa_node_id
== -2)
2213 numa_node_id
= cpu_to_node(cpu
);
2214 else if (numa_node_id
!= cpu_to_node(cpu
))
2217 } else if (dev_maps
) {
2218 /* fill in the new device map from the old device map */
2219 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2220 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2223 /* copy maps belonging to foreign traffic classes */
2224 for (i
= num_tc
- tc
, tci
++; dev_maps
&& --i
; tci
++) {
2225 /* fill in the new device map from the old device map */
2226 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2227 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2231 rcu_assign_pointer(dev
->xps_maps
, new_dev_maps
);
2233 /* Cleanup old maps */
2235 goto out_no_old_maps
;
2237 for_each_possible_cpu(cpu
) {
2238 for (i
= num_tc
, tci
= cpu
* num_tc
; i
--; tci
++) {
2239 new_map
= xmap_dereference(new_dev_maps
->cpu_map
[tci
]);
2240 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2241 if (map
&& map
!= new_map
)
2242 kfree_rcu(map
, rcu
);
2246 kfree_rcu(dev_maps
, rcu
);
2249 dev_maps
= new_dev_maps
;
2253 /* update Tx queue numa node */
2254 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, index
),
2255 (numa_node_id
>= 0) ? numa_node_id
:
2261 /* removes queue from unused CPUs */
2262 for_each_possible_cpu(cpu
) {
2263 for (i
= tc
, tci
= cpu
* num_tc
; i
--; tci
++)
2264 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2265 if (!cpumask_test_cpu(cpu
, mask
) || !cpu_online(cpu
))
2266 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2267 for (i
= num_tc
- tc
, tci
++; --i
; tci
++)
2268 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2271 /* free map if not active */
2273 RCU_INIT_POINTER(dev
->xps_maps
, NULL
);
2274 kfree_rcu(dev_maps
, rcu
);
2278 mutex_unlock(&xps_map_mutex
);
2282 /* remove any maps that we added */
2283 for_each_possible_cpu(cpu
) {
2284 for (i
= num_tc
, tci
= cpu
* num_tc
; i
--; tci
++) {
2285 new_map
= xmap_dereference(new_dev_maps
->cpu_map
[tci
]);
2287 xmap_dereference(dev_maps
->cpu_map
[tci
]) :
2289 if (new_map
&& new_map
!= map
)
2294 mutex_unlock(&xps_map_mutex
);
2296 kfree(new_dev_maps
);
2299 EXPORT_SYMBOL(netif_set_xps_queue
);
2302 void netdev_reset_tc(struct net_device
*dev
)
2305 netif_reset_xps_queues_gt(dev
, 0);
2308 memset(dev
->tc_to_txq
, 0, sizeof(dev
->tc_to_txq
));
2309 memset(dev
->prio_tc_map
, 0, sizeof(dev
->prio_tc_map
));
2311 EXPORT_SYMBOL(netdev_reset_tc
);
2313 int netdev_set_tc_queue(struct net_device
*dev
, u8 tc
, u16 count
, u16 offset
)
2315 if (tc
>= dev
->num_tc
)
2319 netif_reset_xps_queues(dev
, offset
, count
);
2321 dev
->tc_to_txq
[tc
].count
= count
;
2322 dev
->tc_to_txq
[tc
].offset
= offset
;
2325 EXPORT_SYMBOL(netdev_set_tc_queue
);
2327 int netdev_set_num_tc(struct net_device
*dev
, u8 num_tc
)
2329 if (num_tc
> TC_MAX_QUEUE
)
2333 netif_reset_xps_queues_gt(dev
, 0);
2335 dev
->num_tc
= num_tc
;
2338 EXPORT_SYMBOL(netdev_set_num_tc
);
2341 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2342 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2344 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
)
2348 if (txq
< 1 || txq
> dev
->num_tx_queues
)
2351 if (dev
->reg_state
== NETREG_REGISTERED
||
2352 dev
->reg_state
== NETREG_UNREGISTERING
) {
2355 rc
= netdev_queue_update_kobjects(dev
, dev
->real_num_tx_queues
,
2361 netif_setup_tc(dev
, txq
);
2363 if (txq
< dev
->real_num_tx_queues
) {
2364 qdisc_reset_all_tx_gt(dev
, txq
);
2366 netif_reset_xps_queues_gt(dev
, txq
);
2371 dev
->real_num_tx_queues
= txq
;
2374 EXPORT_SYMBOL(netif_set_real_num_tx_queues
);
2378 * netif_set_real_num_rx_queues - set actual number of RX queues used
2379 * @dev: Network device
2380 * @rxq: Actual number of RX queues
2382 * This must be called either with the rtnl_lock held or before
2383 * registration of the net device. Returns 0 on success, or a
2384 * negative error code. If called before registration, it always
2387 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
)
2391 if (rxq
< 1 || rxq
> dev
->num_rx_queues
)
2394 if (dev
->reg_state
== NETREG_REGISTERED
) {
2397 rc
= net_rx_queue_update_kobjects(dev
, dev
->real_num_rx_queues
,
2403 dev
->real_num_rx_queues
= rxq
;
2406 EXPORT_SYMBOL(netif_set_real_num_rx_queues
);
2410 * netif_get_num_default_rss_queues - default number of RSS queues
2412 * This routine should set an upper limit on the number of RSS queues
2413 * used by default by multiqueue devices.
2415 int netif_get_num_default_rss_queues(void)
2417 return is_kdump_kernel() ?
2418 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES
, num_online_cpus());
2420 EXPORT_SYMBOL(netif_get_num_default_rss_queues
);
2422 static void __netif_reschedule(struct Qdisc
*q
)
2424 struct softnet_data
*sd
;
2425 unsigned long flags
;
2427 local_irq_save(flags
);
2428 sd
= this_cpu_ptr(&softnet_data
);
2429 q
->next_sched
= NULL
;
2430 *sd
->output_queue_tailp
= q
;
2431 sd
->output_queue_tailp
= &q
->next_sched
;
2432 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2433 local_irq_restore(flags
);
2436 void __netif_schedule(struct Qdisc
*q
)
2438 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
2439 __netif_reschedule(q
);
2441 EXPORT_SYMBOL(__netif_schedule
);
2443 struct dev_kfree_skb_cb
{
2444 enum skb_free_reason reason
;
2447 static struct dev_kfree_skb_cb
*get_kfree_skb_cb(const struct sk_buff
*skb
)
2449 return (struct dev_kfree_skb_cb
*)skb
->cb
;
2452 void netif_schedule_queue(struct netdev_queue
*txq
)
2455 if (!(txq
->state
& QUEUE_STATE_ANY_XOFF
)) {
2456 struct Qdisc
*q
= rcu_dereference(txq
->qdisc
);
2458 __netif_schedule(q
);
2462 EXPORT_SYMBOL(netif_schedule_queue
);
2464 void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
2466 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
)) {
2470 q
= rcu_dereference(dev_queue
->qdisc
);
2471 __netif_schedule(q
);
2475 EXPORT_SYMBOL(netif_tx_wake_queue
);
2477 void __dev_kfree_skb_irq(struct sk_buff
*skb
, enum skb_free_reason reason
)
2479 unsigned long flags
;
2484 if (likely(refcount_read(&skb
->users
) == 1)) {
2486 refcount_set(&skb
->users
, 0);
2487 } else if (likely(!refcount_dec_and_test(&skb
->users
))) {
2490 get_kfree_skb_cb(skb
)->reason
= reason
;
2491 local_irq_save(flags
);
2492 skb
->next
= __this_cpu_read(softnet_data
.completion_queue
);
2493 __this_cpu_write(softnet_data
.completion_queue
, skb
);
2494 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2495 local_irq_restore(flags
);
2497 EXPORT_SYMBOL(__dev_kfree_skb_irq
);
2499 void __dev_kfree_skb_any(struct sk_buff
*skb
, enum skb_free_reason reason
)
2501 if (in_irq() || irqs_disabled())
2502 __dev_kfree_skb_irq(skb
, reason
);
2506 EXPORT_SYMBOL(__dev_kfree_skb_any
);
2510 * netif_device_detach - mark device as removed
2511 * @dev: network device
2513 * Mark device as removed from system and therefore no longer available.
2515 void netif_device_detach(struct net_device
*dev
)
2517 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2518 netif_running(dev
)) {
2519 netif_tx_stop_all_queues(dev
);
2522 EXPORT_SYMBOL(netif_device_detach
);
2525 * netif_device_attach - mark device as attached
2526 * @dev: network device
2528 * Mark device as attached from system and restart if needed.
2530 void netif_device_attach(struct net_device
*dev
)
2532 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2533 netif_running(dev
)) {
2534 netif_tx_wake_all_queues(dev
);
2535 __netdev_watchdog_up(dev
);
2538 EXPORT_SYMBOL(netif_device_attach
);
2541 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2542 * to be used as a distribution range.
2544 u16
__skb_tx_hash(const struct net_device
*dev
, struct sk_buff
*skb
,
2545 unsigned int num_tx_queues
)
2549 u16 qcount
= num_tx_queues
;
2551 if (skb_rx_queue_recorded(skb
)) {
2552 hash
= skb_get_rx_queue(skb
);
2553 while (unlikely(hash
>= num_tx_queues
))
2554 hash
-= num_tx_queues
;
2559 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
2561 qoffset
= dev
->tc_to_txq
[tc
].offset
;
2562 qcount
= dev
->tc_to_txq
[tc
].count
;
2565 return (u16
) reciprocal_scale(skb_get_hash(skb
), qcount
) + qoffset
;
2567 EXPORT_SYMBOL(__skb_tx_hash
);
2569 static void skb_warn_bad_offload(const struct sk_buff
*skb
)
2571 static const netdev_features_t null_features
;
2572 struct net_device
*dev
= skb
->dev
;
2573 const char *name
= "";
2575 if (!net_ratelimit())
2579 if (dev
->dev
.parent
)
2580 name
= dev_driver_string(dev
->dev
.parent
);
2582 name
= netdev_name(dev
);
2584 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2585 "gso_type=%d ip_summed=%d\n",
2586 name
, dev
? &dev
->features
: &null_features
,
2587 skb
->sk
? &skb
->sk
->sk_route_caps
: &null_features
,
2588 skb
->len
, skb
->data_len
, skb_shinfo(skb
)->gso_size
,
2589 skb_shinfo(skb
)->gso_type
, skb
->ip_summed
);
2593 * Invalidate hardware checksum when packet is to be mangled, and
2594 * complete checksum manually on outgoing path.
2596 int skb_checksum_help(struct sk_buff
*skb
)
2599 int ret
= 0, offset
;
2601 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
2602 goto out_set_summed
;
2604 if (unlikely(skb_shinfo(skb
)->gso_size
)) {
2605 skb_warn_bad_offload(skb
);
2609 /* Before computing a checksum, we should make sure no frag could
2610 * be modified by an external entity : checksum could be wrong.
2612 if (skb_has_shared_frag(skb
)) {
2613 ret
= __skb_linearize(skb
);
2618 offset
= skb_checksum_start_offset(skb
);
2619 BUG_ON(offset
>= skb_headlen(skb
));
2620 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
2622 offset
+= skb
->csum_offset
;
2623 BUG_ON(offset
+ sizeof(__sum16
) > skb_headlen(skb
));
2625 if (skb_cloned(skb
) &&
2626 !skb_clone_writable(skb
, offset
+ sizeof(__sum16
))) {
2627 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2632 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
) ?: CSUM_MANGLED_0
;
2634 skb
->ip_summed
= CHECKSUM_NONE
;
2638 EXPORT_SYMBOL(skb_checksum_help
);
2640 int skb_crc32c_csum_help(struct sk_buff
*skb
)
2643 int ret
= 0, offset
, start
;
2645 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2648 if (unlikely(skb_is_gso(skb
)))
2651 /* Before computing a checksum, we should make sure no frag could
2652 * be modified by an external entity : checksum could be wrong.
2654 if (unlikely(skb_has_shared_frag(skb
))) {
2655 ret
= __skb_linearize(skb
);
2659 start
= skb_checksum_start_offset(skb
);
2660 offset
= start
+ offsetof(struct sctphdr
, checksum
);
2661 if (WARN_ON_ONCE(offset
>= skb_headlen(skb
))) {
2665 if (skb_cloned(skb
) &&
2666 !skb_clone_writable(skb
, offset
+ sizeof(__le32
))) {
2667 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2671 crc32c_csum
= cpu_to_le32(~__skb_checksum(skb
, start
,
2672 skb
->len
- start
, ~(__u32
)0,
2674 *(__le32
*)(skb
->data
+ offset
) = crc32c_csum
;
2675 skb
->ip_summed
= CHECKSUM_NONE
;
2676 skb
->csum_not_inet
= 0;
2681 __be16
skb_network_protocol(struct sk_buff
*skb
, int *depth
)
2683 __be16 type
= skb
->protocol
;
2685 /* Tunnel gso handlers can set protocol to ethernet. */
2686 if (type
== htons(ETH_P_TEB
)) {
2689 if (unlikely(!pskb_may_pull(skb
, sizeof(struct ethhdr
))))
2692 eth
= (struct ethhdr
*)skb_mac_header(skb
);
2693 type
= eth
->h_proto
;
2696 return __vlan_get_protocol(skb
, type
, depth
);
2700 * skb_mac_gso_segment - mac layer segmentation handler.
2701 * @skb: buffer to segment
2702 * @features: features for the output path (see dev->features)
2704 struct sk_buff
*skb_mac_gso_segment(struct sk_buff
*skb
,
2705 netdev_features_t features
)
2707 struct sk_buff
*segs
= ERR_PTR(-EPROTONOSUPPORT
);
2708 struct packet_offload
*ptype
;
2709 int vlan_depth
= skb
->mac_len
;
2710 __be16 type
= skb_network_protocol(skb
, &vlan_depth
);
2712 if (unlikely(!type
))
2713 return ERR_PTR(-EINVAL
);
2715 __skb_pull(skb
, vlan_depth
);
2718 list_for_each_entry_rcu(ptype
, &offload_base
, list
) {
2719 if (ptype
->type
== type
&& ptype
->callbacks
.gso_segment
) {
2720 segs
= ptype
->callbacks
.gso_segment(skb
, features
);
2726 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
2730 EXPORT_SYMBOL(skb_mac_gso_segment
);
2733 /* openvswitch calls this on rx path, so we need a different check.
2735 static inline bool skb_needs_check(struct sk_buff
*skb
, bool tx_path
)
2738 return skb
->ip_summed
!= CHECKSUM_PARTIAL
;
2740 return skb
->ip_summed
== CHECKSUM_NONE
;
2744 * __skb_gso_segment - Perform segmentation on skb.
2745 * @skb: buffer to segment
2746 * @features: features for the output path (see dev->features)
2747 * @tx_path: whether it is called in TX path
2749 * This function segments the given skb and returns a list of segments.
2751 * It may return NULL if the skb requires no segmentation. This is
2752 * only possible when GSO is used for verifying header integrity.
2754 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
2756 struct sk_buff
*__skb_gso_segment(struct sk_buff
*skb
,
2757 netdev_features_t features
, bool tx_path
)
2759 struct sk_buff
*segs
;
2761 if (unlikely(skb_needs_check(skb
, tx_path
))) {
2764 /* We're going to init ->check field in TCP or UDP header */
2765 err
= skb_cow_head(skb
, 0);
2767 return ERR_PTR(err
);
2770 /* Only report GSO partial support if it will enable us to
2771 * support segmentation on this frame without needing additional
2774 if (features
& NETIF_F_GSO_PARTIAL
) {
2775 netdev_features_t partial_features
= NETIF_F_GSO_ROBUST
;
2776 struct net_device
*dev
= skb
->dev
;
2778 partial_features
|= dev
->features
& dev
->gso_partial_features
;
2779 if (!skb_gso_ok(skb
, features
| partial_features
))
2780 features
&= ~NETIF_F_GSO_PARTIAL
;
2783 BUILD_BUG_ON(SKB_SGO_CB_OFFSET
+
2784 sizeof(*SKB_GSO_CB(skb
)) > sizeof(skb
->cb
));
2786 SKB_GSO_CB(skb
)->mac_offset
= skb_headroom(skb
);
2787 SKB_GSO_CB(skb
)->encap_level
= 0;
2789 skb_reset_mac_header(skb
);
2790 skb_reset_mac_len(skb
);
2792 segs
= skb_mac_gso_segment(skb
, features
);
2794 if (unlikely(skb_needs_check(skb
, tx_path
)))
2795 skb_warn_bad_offload(skb
);
2799 EXPORT_SYMBOL(__skb_gso_segment
);
2801 /* Take action when hardware reception checksum errors are detected. */
2803 void netdev_rx_csum_fault(struct net_device
*dev
)
2805 if (net_ratelimit()) {
2806 pr_err("%s: hw csum failure\n", dev
? dev
->name
: "<unknown>");
2810 EXPORT_SYMBOL(netdev_rx_csum_fault
);
2813 /* Actually, we should eliminate this check as soon as we know, that:
2814 * 1. IOMMU is present and allows to map all the memory.
2815 * 2. No high memory really exists on this machine.
2818 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
2820 #ifdef CONFIG_HIGHMEM
2823 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
2824 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2825 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2827 if (PageHighMem(skb_frag_page(frag
)))
2832 if (PCI_DMA_BUS_IS_PHYS
) {
2833 struct device
*pdev
= dev
->dev
.parent
;
2837 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2838 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2839 dma_addr_t addr
= page_to_phys(skb_frag_page(frag
));
2841 if (!pdev
->dma_mask
|| addr
+ PAGE_SIZE
- 1 > *pdev
->dma_mask
)
2849 /* If MPLS offload request, verify we are testing hardware MPLS features
2850 * instead of standard features for the netdev.
2852 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2853 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
2854 netdev_features_t features
,
2857 if (eth_p_mpls(type
))
2858 features
&= skb
->dev
->mpls_features
;
2863 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
2864 netdev_features_t features
,
2871 static netdev_features_t
harmonize_features(struct sk_buff
*skb
,
2872 netdev_features_t features
)
2877 type
= skb_network_protocol(skb
, &tmp
);
2878 features
= net_mpls_features(skb
, features
, type
);
2880 if (skb
->ip_summed
!= CHECKSUM_NONE
&&
2881 !can_checksum_protocol(features
, type
)) {
2882 features
&= ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
2884 if (illegal_highdma(skb
->dev
, skb
))
2885 features
&= ~NETIF_F_SG
;
2890 netdev_features_t
passthru_features_check(struct sk_buff
*skb
,
2891 struct net_device
*dev
,
2892 netdev_features_t features
)
2896 EXPORT_SYMBOL(passthru_features_check
);
2898 static netdev_features_t
dflt_features_check(const struct sk_buff
*skb
,
2899 struct net_device
*dev
,
2900 netdev_features_t features
)
2902 return vlan_features_check(skb
, features
);
2905 static netdev_features_t
gso_features_check(const struct sk_buff
*skb
,
2906 struct net_device
*dev
,
2907 netdev_features_t features
)
2909 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
;
2911 if (gso_segs
> dev
->gso_max_segs
)
2912 return features
& ~NETIF_F_GSO_MASK
;
2914 /* Support for GSO partial features requires software
2915 * intervention before we can actually process the packets
2916 * so we need to strip support for any partial features now
2917 * and we can pull them back in after we have partially
2918 * segmented the frame.
2920 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
))
2921 features
&= ~dev
->gso_partial_features
;
2923 /* Make sure to clear the IPv4 ID mangling feature if the
2924 * IPv4 header has the potential to be fragmented.
2926 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
2927 struct iphdr
*iph
= skb
->encapsulation
?
2928 inner_ip_hdr(skb
) : ip_hdr(skb
);
2930 if (!(iph
->frag_off
& htons(IP_DF
)))
2931 features
&= ~NETIF_F_TSO_MANGLEID
;
2937 netdev_features_t
netif_skb_features(struct sk_buff
*skb
)
2939 struct net_device
*dev
= skb
->dev
;
2940 netdev_features_t features
= dev
->features
;
2942 if (skb_is_gso(skb
))
2943 features
= gso_features_check(skb
, dev
, features
);
2945 /* If encapsulation offload request, verify we are testing
2946 * hardware encapsulation features instead of standard
2947 * features for the netdev
2949 if (skb
->encapsulation
)
2950 features
&= dev
->hw_enc_features
;
2952 if (skb_vlan_tagged(skb
))
2953 features
= netdev_intersect_features(features
,
2954 dev
->vlan_features
|
2955 NETIF_F_HW_VLAN_CTAG_TX
|
2956 NETIF_F_HW_VLAN_STAG_TX
);
2958 if (dev
->netdev_ops
->ndo_features_check
)
2959 features
&= dev
->netdev_ops
->ndo_features_check(skb
, dev
,
2962 features
&= dflt_features_check(skb
, dev
, features
);
2964 return harmonize_features(skb
, features
);
2966 EXPORT_SYMBOL(netif_skb_features
);
2968 static int xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
2969 struct netdev_queue
*txq
, bool more
)
2974 if (!list_empty(&ptype_all
) || !list_empty(&dev
->ptype_all
))
2975 dev_queue_xmit_nit(skb
, dev
);
2978 trace_net_dev_start_xmit(skb
, dev
);
2979 rc
= netdev_start_xmit(skb
, dev
, txq
, more
);
2980 trace_net_dev_xmit(skb
, rc
, dev
, len
);
2985 struct sk_buff
*dev_hard_start_xmit(struct sk_buff
*first
, struct net_device
*dev
,
2986 struct netdev_queue
*txq
, int *ret
)
2988 struct sk_buff
*skb
= first
;
2989 int rc
= NETDEV_TX_OK
;
2992 struct sk_buff
*next
= skb
->next
;
2995 rc
= xmit_one(skb
, dev
, txq
, next
!= NULL
);
2996 if (unlikely(!dev_xmit_complete(rc
))) {
3002 if (netif_xmit_stopped(txq
) && skb
) {
3003 rc
= NETDEV_TX_BUSY
;
3013 static struct sk_buff
*validate_xmit_vlan(struct sk_buff
*skb
,
3014 netdev_features_t features
)
3016 if (skb_vlan_tag_present(skb
) &&
3017 !vlan_hw_offload_capable(features
, skb
->vlan_proto
))
3018 skb
= __vlan_hwaccel_push_inside(skb
);
3022 int skb_csum_hwoffload_help(struct sk_buff
*skb
,
3023 const netdev_features_t features
)
3025 if (unlikely(skb
->csum_not_inet
))
3026 return !!(features
& NETIF_F_SCTP_CRC
) ? 0 :
3027 skb_crc32c_csum_help(skb
);
3029 return !!(features
& NETIF_F_CSUM_MASK
) ? 0 : skb_checksum_help(skb
);
3031 EXPORT_SYMBOL(skb_csum_hwoffload_help
);
3033 static struct sk_buff
*validate_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
)
3035 netdev_features_t features
;
3037 features
= netif_skb_features(skb
);
3038 skb
= validate_xmit_vlan(skb
, features
);
3042 if (netif_needs_gso(skb
, features
)) {
3043 struct sk_buff
*segs
;
3045 segs
= skb_gso_segment(skb
, features
);
3053 if (skb_needs_linearize(skb
, features
) &&
3054 __skb_linearize(skb
))
3057 if (validate_xmit_xfrm(skb
, features
))
3060 /* If packet is not checksummed and device does not
3061 * support checksumming for this protocol, complete
3062 * checksumming here.
3064 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3065 if (skb
->encapsulation
)
3066 skb_set_inner_transport_header(skb
,
3067 skb_checksum_start_offset(skb
));
3069 skb_set_transport_header(skb
,
3070 skb_checksum_start_offset(skb
));
3071 if (skb_csum_hwoffload_help(skb
, features
))
3081 atomic_long_inc(&dev
->tx_dropped
);
3085 struct sk_buff
*validate_xmit_skb_list(struct sk_buff
*skb
, struct net_device
*dev
)
3087 struct sk_buff
*next
, *head
= NULL
, *tail
;
3089 for (; skb
!= NULL
; skb
= next
) {
3093 /* in case skb wont be segmented, point to itself */
3096 skb
= validate_xmit_skb(skb
, dev
);
3104 /* If skb was segmented, skb->prev points to
3105 * the last segment. If not, it still contains skb.
3111 EXPORT_SYMBOL_GPL(validate_xmit_skb_list
);
3113 static void qdisc_pkt_len_init(struct sk_buff
*skb
)
3115 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
3117 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
3119 /* To get more precise estimation of bytes sent on wire,
3120 * we add to pkt_len the headers size of all segments
3122 if (shinfo
->gso_size
) {
3123 unsigned int hdr_len
;
3124 u16 gso_segs
= shinfo
->gso_segs
;
3126 /* mac layer + network layer */
3127 hdr_len
= skb_transport_header(skb
) - skb_mac_header(skb
);
3129 /* + transport layer */
3130 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)))
3131 hdr_len
+= tcp_hdrlen(skb
);
3133 hdr_len
+= sizeof(struct udphdr
);
3135 if (shinfo
->gso_type
& SKB_GSO_DODGY
)
3136 gso_segs
= DIV_ROUND_UP(skb
->len
- hdr_len
,
3139 qdisc_skb_cb(skb
)->pkt_len
+= (gso_segs
- 1) * hdr_len
;
3143 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
3144 struct net_device
*dev
,
3145 struct netdev_queue
*txq
)
3147 spinlock_t
*root_lock
= qdisc_lock(q
);
3148 struct sk_buff
*to_free
= NULL
;
3152 qdisc_calculate_pkt_len(skb
, q
);
3154 * Heuristic to force contended enqueues to serialize on a
3155 * separate lock before trying to get qdisc main lock.
3156 * This permits qdisc->running owner to get the lock more
3157 * often and dequeue packets faster.
3159 contended
= qdisc_is_running(q
);
3160 if (unlikely(contended
))
3161 spin_lock(&q
->busylock
);
3163 spin_lock(root_lock
);
3164 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
3165 __qdisc_drop(skb
, &to_free
);
3167 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
3168 qdisc_run_begin(q
)) {
3170 * This is a work-conserving queue; there are no old skbs
3171 * waiting to be sent out; and the qdisc is not running -
3172 * xmit the skb directly.
3175 qdisc_bstats_update(q
, skb
);
3177 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, true)) {
3178 if (unlikely(contended
)) {
3179 spin_unlock(&q
->busylock
);
3186 rc
= NET_XMIT_SUCCESS
;
3188 rc
= q
->enqueue(skb
, q
, &to_free
) & NET_XMIT_MASK
;
3189 if (qdisc_run_begin(q
)) {
3190 if (unlikely(contended
)) {
3191 spin_unlock(&q
->busylock
);
3197 spin_unlock(root_lock
);
3198 if (unlikely(to_free
))
3199 kfree_skb_list(to_free
);
3200 if (unlikely(contended
))
3201 spin_unlock(&q
->busylock
);
3205 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3206 static void skb_update_prio(struct sk_buff
*skb
)
3208 struct netprio_map
*map
= rcu_dereference_bh(skb
->dev
->priomap
);
3210 if (!skb
->priority
&& skb
->sk
&& map
) {
3211 unsigned int prioidx
=
3212 sock_cgroup_prioidx(&skb
->sk
->sk_cgrp_data
);
3214 if (prioidx
< map
->priomap_len
)
3215 skb
->priority
= map
->priomap
[prioidx
];
3219 #define skb_update_prio(skb)
3222 DEFINE_PER_CPU(int, xmit_recursion
);
3223 EXPORT_SYMBOL(xmit_recursion
);
3226 * dev_loopback_xmit - loop back @skb
3227 * @net: network namespace this loopback is happening in
3228 * @sk: sk needed to be a netfilter okfn
3229 * @skb: buffer to transmit
3231 int dev_loopback_xmit(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
3233 skb_reset_mac_header(skb
);
3234 __skb_pull(skb
, skb_network_offset(skb
));
3235 skb
->pkt_type
= PACKET_LOOPBACK
;
3236 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3237 WARN_ON(!skb_dst(skb
));
3242 EXPORT_SYMBOL(dev_loopback_xmit
);
3244 #ifdef CONFIG_NET_EGRESS
3245 static struct sk_buff
*
3246 sch_handle_egress(struct sk_buff
*skb
, int *ret
, struct net_device
*dev
)
3248 struct tcf_proto
*cl
= rcu_dereference_bh(dev
->egress_cl_list
);
3249 struct tcf_result cl_res
;
3254 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3255 qdisc_bstats_cpu_update(cl
->q
, skb
);
3257 switch (tcf_classify(skb
, cl
, &cl_res
, false)) {
3259 case TC_ACT_RECLASSIFY
:
3260 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
3263 qdisc_qstats_cpu_drop(cl
->q
);
3264 *ret
= NET_XMIT_DROP
;
3270 *ret
= NET_XMIT_SUCCESS
;
3273 case TC_ACT_REDIRECT
:
3274 /* No need to push/pop skb's mac_header here on egress! */
3275 skb_do_redirect(skb
);
3276 *ret
= NET_XMIT_SUCCESS
;
3284 #endif /* CONFIG_NET_EGRESS */
3286 static inline int get_xps_queue(struct net_device
*dev
, struct sk_buff
*skb
)
3289 struct xps_dev_maps
*dev_maps
;
3290 struct xps_map
*map
;
3291 int queue_index
= -1;
3294 dev_maps
= rcu_dereference(dev
->xps_maps
);
3296 unsigned int tci
= skb
->sender_cpu
- 1;
3300 tci
+= netdev_get_prio_tc_map(dev
, skb
->priority
);
3303 map
= rcu_dereference(dev_maps
->cpu_map
[tci
]);
3306 queue_index
= map
->queues
[0];
3308 queue_index
= map
->queues
[reciprocal_scale(skb_get_hash(skb
),
3310 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
3322 static u16
__netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
)
3324 struct sock
*sk
= skb
->sk
;
3325 int queue_index
= sk_tx_queue_get(sk
);
3327 if (queue_index
< 0 || skb
->ooo_okay
||
3328 queue_index
>= dev
->real_num_tx_queues
) {
3329 int new_index
= get_xps_queue(dev
, skb
);
3332 new_index
= skb_tx_hash(dev
, skb
);
3334 if (queue_index
!= new_index
&& sk
&&
3336 rcu_access_pointer(sk
->sk_dst_cache
))
3337 sk_tx_queue_set(sk
, new_index
);
3339 queue_index
= new_index
;
3345 struct netdev_queue
*netdev_pick_tx(struct net_device
*dev
,
3346 struct sk_buff
*skb
,
3349 int queue_index
= 0;
3352 u32 sender_cpu
= skb
->sender_cpu
- 1;
3354 if (sender_cpu
>= (u32
)NR_CPUS
)
3355 skb
->sender_cpu
= raw_smp_processor_id() + 1;
3358 if (dev
->real_num_tx_queues
!= 1) {
3359 const struct net_device_ops
*ops
= dev
->netdev_ops
;
3361 if (ops
->ndo_select_queue
)
3362 queue_index
= ops
->ndo_select_queue(dev
, skb
, accel_priv
,
3365 queue_index
= __netdev_pick_tx(dev
, skb
);
3368 queue_index
= netdev_cap_txqueue(dev
, queue_index
);
3371 skb_set_queue_mapping(skb
, queue_index
);
3372 return netdev_get_tx_queue(dev
, queue_index
);
3376 * __dev_queue_xmit - transmit a buffer
3377 * @skb: buffer to transmit
3378 * @accel_priv: private data used for L2 forwarding offload
3380 * Queue a buffer for transmission to a network device. The caller must
3381 * have set the device and priority and built the buffer before calling
3382 * this function. The function can be called from an interrupt.
3384 * A negative errno code is returned on a failure. A success does not
3385 * guarantee the frame will be transmitted as it may be dropped due
3386 * to congestion or traffic shaping.
3388 * -----------------------------------------------------------------------------------
3389 * I notice this method can also return errors from the queue disciplines,
3390 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3393 * Regardless of the return value, the skb is consumed, so it is currently
3394 * difficult to retry a send to this method. (You can bump the ref count
3395 * before sending to hold a reference for retry if you are careful.)
3397 * When calling this method, interrupts MUST be enabled. This is because
3398 * the BH enable code must have IRQs enabled so that it will not deadlock.
3401 static int __dev_queue_xmit(struct sk_buff
*skb
, void *accel_priv
)
3403 struct net_device
*dev
= skb
->dev
;
3404 struct netdev_queue
*txq
;
3408 skb_reset_mac_header(skb
);
3410 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_SCHED_TSTAMP
))
3411 __skb_tstamp_tx(skb
, NULL
, skb
->sk
, SCM_TSTAMP_SCHED
);
3413 /* Disable soft irqs for various locks below. Also
3414 * stops preemption for RCU.
3418 skb_update_prio(skb
);
3420 qdisc_pkt_len_init(skb
);
3421 #ifdef CONFIG_NET_CLS_ACT
3422 skb
->tc_at_ingress
= 0;
3423 # ifdef CONFIG_NET_EGRESS
3424 if (static_key_false(&egress_needed
)) {
3425 skb
= sch_handle_egress(skb
, &rc
, dev
);
3431 /* If device/qdisc don't need skb->dst, release it right now while
3432 * its hot in this cpu cache.
3434 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
3439 txq
= netdev_pick_tx(dev
, skb
, accel_priv
);
3440 q
= rcu_dereference_bh(txq
->qdisc
);
3442 trace_net_dev_queue(skb
);
3444 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
3448 /* The device has no queue. Common case for software devices:
3449 * loopback, all the sorts of tunnels...
3451 * Really, it is unlikely that netif_tx_lock protection is necessary
3452 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3454 * However, it is possible, that they rely on protection
3457 * Check this and shot the lock. It is not prone from deadlocks.
3458 *Either shot noqueue qdisc, it is even simpler 8)
3460 if (dev
->flags
& IFF_UP
) {
3461 int cpu
= smp_processor_id(); /* ok because BHs are off */
3463 if (txq
->xmit_lock_owner
!= cpu
) {
3464 if (unlikely(__this_cpu_read(xmit_recursion
) >
3465 XMIT_RECURSION_LIMIT
))
3466 goto recursion_alert
;
3468 skb
= validate_xmit_skb(skb
, dev
);
3472 HARD_TX_LOCK(dev
, txq
, cpu
);
3474 if (!netif_xmit_stopped(txq
)) {
3475 __this_cpu_inc(xmit_recursion
);
3476 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &rc
);
3477 __this_cpu_dec(xmit_recursion
);
3478 if (dev_xmit_complete(rc
)) {
3479 HARD_TX_UNLOCK(dev
, txq
);
3483 HARD_TX_UNLOCK(dev
, txq
);
3484 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3487 /* Recursion is detected! It is possible,
3491 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3497 rcu_read_unlock_bh();
3499 atomic_long_inc(&dev
->tx_dropped
);
3500 kfree_skb_list(skb
);
3503 rcu_read_unlock_bh();
3507 int dev_queue_xmit(struct sk_buff
*skb
)
3509 return __dev_queue_xmit(skb
, NULL
);
3511 EXPORT_SYMBOL(dev_queue_xmit
);
3513 int dev_queue_xmit_accel(struct sk_buff
*skb
, void *accel_priv
)
3515 return __dev_queue_xmit(skb
, accel_priv
);
3517 EXPORT_SYMBOL(dev_queue_xmit_accel
);
3520 /*************************************************************************
3522 *************************************************************************/
3524 int netdev_max_backlog __read_mostly
= 1000;
3525 EXPORT_SYMBOL(netdev_max_backlog
);
3527 int netdev_tstamp_prequeue __read_mostly
= 1;
3528 int netdev_budget __read_mostly
= 300;
3529 unsigned int __read_mostly netdev_budget_usecs
= 2000;
3530 int weight_p __read_mostly
= 64; /* old backlog weight */
3531 int dev_weight_rx_bias __read_mostly
= 1; /* bias for backlog weight */
3532 int dev_weight_tx_bias __read_mostly
= 1; /* bias for output_queue quota */
3533 int dev_rx_weight __read_mostly
= 64;
3534 int dev_tx_weight __read_mostly
= 64;
3536 /* Called with irq disabled */
3537 static inline void ____napi_schedule(struct softnet_data
*sd
,
3538 struct napi_struct
*napi
)
3540 list_add_tail(&napi
->poll_list
, &sd
->poll_list
);
3541 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3546 /* One global table that all flow-based protocols share. */
3547 struct rps_sock_flow_table __rcu
*rps_sock_flow_table __read_mostly
;
3548 EXPORT_SYMBOL(rps_sock_flow_table
);
3549 u32 rps_cpu_mask __read_mostly
;
3550 EXPORT_SYMBOL(rps_cpu_mask
);
3552 struct static_key rps_needed __read_mostly
;
3553 EXPORT_SYMBOL(rps_needed
);
3554 struct static_key rfs_needed __read_mostly
;
3555 EXPORT_SYMBOL(rfs_needed
);
3557 static struct rps_dev_flow
*
3558 set_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3559 struct rps_dev_flow
*rflow
, u16 next_cpu
)
3561 if (next_cpu
< nr_cpu_ids
) {
3562 #ifdef CONFIG_RFS_ACCEL
3563 struct netdev_rx_queue
*rxqueue
;
3564 struct rps_dev_flow_table
*flow_table
;
3565 struct rps_dev_flow
*old_rflow
;
3570 /* Should we steer this flow to a different hardware queue? */
3571 if (!skb_rx_queue_recorded(skb
) || !dev
->rx_cpu_rmap
||
3572 !(dev
->features
& NETIF_F_NTUPLE
))
3574 rxq_index
= cpu_rmap_lookup_index(dev
->rx_cpu_rmap
, next_cpu
);
3575 if (rxq_index
== skb_get_rx_queue(skb
))
3578 rxqueue
= dev
->_rx
+ rxq_index
;
3579 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3582 flow_id
= skb_get_hash(skb
) & flow_table
->mask
;
3583 rc
= dev
->netdev_ops
->ndo_rx_flow_steer(dev
, skb
,
3584 rxq_index
, flow_id
);
3588 rflow
= &flow_table
->flows
[flow_id
];
3590 if (old_rflow
->filter
== rflow
->filter
)
3591 old_rflow
->filter
= RPS_NO_FILTER
;
3595 per_cpu(softnet_data
, next_cpu
).input_queue_head
;
3598 rflow
->cpu
= next_cpu
;
3603 * get_rps_cpu is called from netif_receive_skb and returns the target
3604 * CPU from the RPS map of the receiving queue for a given skb.
3605 * rcu_read_lock must be held on entry.
3607 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3608 struct rps_dev_flow
**rflowp
)
3610 const struct rps_sock_flow_table
*sock_flow_table
;
3611 struct netdev_rx_queue
*rxqueue
= dev
->_rx
;
3612 struct rps_dev_flow_table
*flow_table
;
3613 struct rps_map
*map
;
3618 if (skb_rx_queue_recorded(skb
)) {
3619 u16 index
= skb_get_rx_queue(skb
);
3621 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
3622 WARN_ONCE(dev
->real_num_rx_queues
> 1,
3623 "%s received packet on queue %u, but number "
3624 "of RX queues is %u\n",
3625 dev
->name
, index
, dev
->real_num_rx_queues
);
3631 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3633 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3634 map
= rcu_dereference(rxqueue
->rps_map
);
3635 if (!flow_table
&& !map
)
3638 skb_reset_network_header(skb
);
3639 hash
= skb_get_hash(skb
);
3643 sock_flow_table
= rcu_dereference(rps_sock_flow_table
);
3644 if (flow_table
&& sock_flow_table
) {
3645 struct rps_dev_flow
*rflow
;
3649 /* First check into global flow table if there is a match */
3650 ident
= sock_flow_table
->ents
[hash
& sock_flow_table
->mask
];
3651 if ((ident
^ hash
) & ~rps_cpu_mask
)
3654 next_cpu
= ident
& rps_cpu_mask
;
3656 /* OK, now we know there is a match,
3657 * we can look at the local (per receive queue) flow table
3659 rflow
= &flow_table
->flows
[hash
& flow_table
->mask
];
3663 * If the desired CPU (where last recvmsg was done) is
3664 * different from current CPU (one in the rx-queue flow
3665 * table entry), switch if one of the following holds:
3666 * - Current CPU is unset (>= nr_cpu_ids).
3667 * - Current CPU is offline.
3668 * - The current CPU's queue tail has advanced beyond the
3669 * last packet that was enqueued using this table entry.
3670 * This guarantees that all previous packets for the flow
3671 * have been dequeued, thus preserving in order delivery.
3673 if (unlikely(tcpu
!= next_cpu
) &&
3674 (tcpu
>= nr_cpu_ids
|| !cpu_online(tcpu
) ||
3675 ((int)(per_cpu(softnet_data
, tcpu
).input_queue_head
-
3676 rflow
->last_qtail
)) >= 0)) {
3678 rflow
= set_rps_cpu(dev
, skb
, rflow
, next_cpu
);
3681 if (tcpu
< nr_cpu_ids
&& cpu_online(tcpu
)) {
3691 tcpu
= map
->cpus
[reciprocal_scale(hash
, map
->len
)];
3692 if (cpu_online(tcpu
)) {
3702 #ifdef CONFIG_RFS_ACCEL
3705 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3706 * @dev: Device on which the filter was set
3707 * @rxq_index: RX queue index
3708 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3709 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3711 * Drivers that implement ndo_rx_flow_steer() should periodically call
3712 * this function for each installed filter and remove the filters for
3713 * which it returns %true.
3715 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
3716 u32 flow_id
, u16 filter_id
)
3718 struct netdev_rx_queue
*rxqueue
= dev
->_rx
+ rxq_index
;
3719 struct rps_dev_flow_table
*flow_table
;
3720 struct rps_dev_flow
*rflow
;
3725 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3726 if (flow_table
&& flow_id
<= flow_table
->mask
) {
3727 rflow
= &flow_table
->flows
[flow_id
];
3728 cpu
= ACCESS_ONCE(rflow
->cpu
);
3729 if (rflow
->filter
== filter_id
&& cpu
< nr_cpu_ids
&&
3730 ((int)(per_cpu(softnet_data
, cpu
).input_queue_head
-
3731 rflow
->last_qtail
) <
3732 (int)(10 * flow_table
->mask
)))
3738 EXPORT_SYMBOL(rps_may_expire_flow
);
3740 #endif /* CONFIG_RFS_ACCEL */
3742 /* Called from hardirq (IPI) context */
3743 static void rps_trigger_softirq(void *data
)
3745 struct softnet_data
*sd
= data
;
3747 ____napi_schedule(sd
, &sd
->backlog
);
3751 #endif /* CONFIG_RPS */
3754 * Check if this softnet_data structure is another cpu one
3755 * If yes, queue it to our IPI list and return 1
3758 static int rps_ipi_queued(struct softnet_data
*sd
)
3761 struct softnet_data
*mysd
= this_cpu_ptr(&softnet_data
);
3764 sd
->rps_ipi_next
= mysd
->rps_ipi_list
;
3765 mysd
->rps_ipi_list
= sd
;
3767 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3770 #endif /* CONFIG_RPS */
3774 #ifdef CONFIG_NET_FLOW_LIMIT
3775 int netdev_flow_limit_table_len __read_mostly
= (1 << 12);
3778 static bool skb_flow_limit(struct sk_buff
*skb
, unsigned int qlen
)
3780 #ifdef CONFIG_NET_FLOW_LIMIT
3781 struct sd_flow_limit
*fl
;
3782 struct softnet_data
*sd
;
3783 unsigned int old_flow
, new_flow
;
3785 if (qlen
< (netdev_max_backlog
>> 1))
3788 sd
= this_cpu_ptr(&softnet_data
);
3791 fl
= rcu_dereference(sd
->flow_limit
);
3793 new_flow
= skb_get_hash(skb
) & (fl
->num_buckets
- 1);
3794 old_flow
= fl
->history
[fl
->history_head
];
3795 fl
->history
[fl
->history_head
] = new_flow
;
3798 fl
->history_head
&= FLOW_LIMIT_HISTORY
- 1;
3800 if (likely(fl
->buckets
[old_flow
]))
3801 fl
->buckets
[old_flow
]--;
3803 if (++fl
->buckets
[new_flow
] > (FLOW_LIMIT_HISTORY
>> 1)) {
3815 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3816 * queue (may be a remote CPU queue).
3818 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
,
3819 unsigned int *qtail
)
3821 struct softnet_data
*sd
;
3822 unsigned long flags
;
3825 sd
= &per_cpu(softnet_data
, cpu
);
3827 local_irq_save(flags
);
3830 if (!netif_running(skb
->dev
))
3832 qlen
= skb_queue_len(&sd
->input_pkt_queue
);
3833 if (qlen
<= netdev_max_backlog
&& !skb_flow_limit(skb
, qlen
)) {
3836 __skb_queue_tail(&sd
->input_pkt_queue
, skb
);
3837 input_queue_tail_incr_save(sd
, qtail
);
3839 local_irq_restore(flags
);
3840 return NET_RX_SUCCESS
;
3843 /* Schedule NAPI for backlog device
3844 * We can use non atomic operation since we own the queue lock
3846 if (!__test_and_set_bit(NAPI_STATE_SCHED
, &sd
->backlog
.state
)) {
3847 if (!rps_ipi_queued(sd
))
3848 ____napi_schedule(sd
, &sd
->backlog
);
3857 local_irq_restore(flags
);
3859 atomic_long_inc(&skb
->dev
->rx_dropped
);
3864 static u32
netif_receive_generic_xdp(struct sk_buff
*skb
,
3865 struct bpf_prog
*xdp_prog
)
3867 struct xdp_buff xdp
;
3873 /* Reinjected packets coming from act_mirred or similar should
3874 * not get XDP generic processing.
3876 if (skb_cloned(skb
))
3879 if (skb_linearize(skb
))
3882 /* The XDP program wants to see the packet starting at the MAC
3885 mac_len
= skb
->data
- skb_mac_header(skb
);
3886 hlen
= skb_headlen(skb
) + mac_len
;
3887 xdp
.data
= skb
->data
- mac_len
;
3888 xdp
.data_end
= xdp
.data
+ hlen
;
3889 xdp
.data_hard_start
= skb
->data
- skb_headroom(skb
);
3890 orig_data
= xdp
.data
;
3892 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
3894 off
= xdp
.data
- orig_data
;
3896 __skb_pull(skb
, off
);
3898 __skb_push(skb
, -off
);
3899 skb
->mac_header
+= off
;
3904 __skb_push(skb
, mac_len
);
3910 bpf_warn_invalid_xdp_action(act
);
3913 trace_xdp_exception(skb
->dev
, xdp_prog
, act
);
3924 /* When doing generic XDP we have to bypass the qdisc layer and the
3925 * network taps in order to match in-driver-XDP behavior.
3927 void generic_xdp_tx(struct sk_buff
*skb
, struct bpf_prog
*xdp_prog
)
3929 struct net_device
*dev
= skb
->dev
;
3930 struct netdev_queue
*txq
;
3931 bool free_skb
= true;
3934 txq
= netdev_pick_tx(dev
, skb
, NULL
);
3935 cpu
= smp_processor_id();
3936 HARD_TX_LOCK(dev
, txq
, cpu
);
3937 if (!netif_xmit_stopped(txq
)) {
3938 rc
= netdev_start_xmit(skb
, dev
, txq
, 0);
3939 if (dev_xmit_complete(rc
))
3942 HARD_TX_UNLOCK(dev
, txq
);
3944 trace_xdp_exception(dev
, xdp_prog
, XDP_TX
);
3948 EXPORT_SYMBOL_GPL(generic_xdp_tx
);
3950 static struct static_key generic_xdp_needed __read_mostly
;
3952 int do_xdp_generic(struct bpf_prog
*xdp_prog
, struct sk_buff
*skb
)
3955 u32 act
= netif_receive_generic_xdp(skb
, xdp_prog
);
3958 if (act
!= XDP_PASS
) {
3961 err
= xdp_do_generic_redirect(skb
->dev
, skb
,
3965 /* fallthru to submit skb */
3967 generic_xdp_tx(skb
, xdp_prog
);
3978 EXPORT_SYMBOL_GPL(do_xdp_generic
);
3980 static int netif_rx_internal(struct sk_buff
*skb
)
3984 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
3986 trace_netif_rx(skb
);
3988 if (static_key_false(&generic_xdp_needed
)) {
3993 ret
= do_xdp_generic(rcu_dereference(skb
->dev
->xdp_prog
), skb
);
3997 /* Consider XDP consuming the packet a success from
3998 * the netdev point of view we do not want to count
4001 if (ret
!= XDP_PASS
)
4002 return NET_RX_SUCCESS
;
4006 if (static_key_false(&rps_needed
)) {
4007 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
4013 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
4015 cpu
= smp_processor_id();
4017 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
4026 ret
= enqueue_to_backlog(skb
, get_cpu(), &qtail
);
4033 * netif_rx - post buffer to the network code
4034 * @skb: buffer to post
4036 * This function receives a packet from a device driver and queues it for
4037 * the upper (protocol) levels to process. It always succeeds. The buffer
4038 * may be dropped during processing for congestion control or by the
4042 * NET_RX_SUCCESS (no congestion)
4043 * NET_RX_DROP (packet was dropped)
4047 int netif_rx(struct sk_buff
*skb
)
4049 trace_netif_rx_entry(skb
);
4051 return netif_rx_internal(skb
);
4053 EXPORT_SYMBOL(netif_rx
);
4055 int netif_rx_ni(struct sk_buff
*skb
)
4059 trace_netif_rx_ni_entry(skb
);
4062 err
= netif_rx_internal(skb
);
4063 if (local_softirq_pending())
4069 EXPORT_SYMBOL(netif_rx_ni
);
4071 static __latent_entropy
void net_tx_action(struct softirq_action
*h
)
4073 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
4075 if (sd
->completion_queue
) {
4076 struct sk_buff
*clist
;
4078 local_irq_disable();
4079 clist
= sd
->completion_queue
;
4080 sd
->completion_queue
= NULL
;
4084 struct sk_buff
*skb
= clist
;
4086 clist
= clist
->next
;
4088 WARN_ON(refcount_read(&skb
->users
));
4089 if (likely(get_kfree_skb_cb(skb
)->reason
== SKB_REASON_CONSUMED
))
4090 trace_consume_skb(skb
);
4092 trace_kfree_skb(skb
, net_tx_action
);
4094 if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
)
4097 __kfree_skb_defer(skb
);
4100 __kfree_skb_flush();
4103 if (sd
->output_queue
) {
4106 local_irq_disable();
4107 head
= sd
->output_queue
;
4108 sd
->output_queue
= NULL
;
4109 sd
->output_queue_tailp
= &sd
->output_queue
;
4113 struct Qdisc
*q
= head
;
4114 spinlock_t
*root_lock
;
4116 head
= head
->next_sched
;
4118 root_lock
= qdisc_lock(q
);
4119 spin_lock(root_lock
);
4120 /* We need to make sure head->next_sched is read
4121 * before clearing __QDISC_STATE_SCHED
4123 smp_mb__before_atomic();
4124 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
4126 spin_unlock(root_lock
);
4131 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4132 /* This hook is defined here for ATM LANE */
4133 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
4134 unsigned char *addr
) __read_mostly
;
4135 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
4138 static inline struct sk_buff
*
4139 sch_handle_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
, int *ret
,
4140 struct net_device
*orig_dev
)
4142 #ifdef CONFIG_NET_CLS_ACT
4143 struct tcf_proto
*cl
= rcu_dereference_bh(skb
->dev
->ingress_cl_list
);
4144 struct tcf_result cl_res
;
4146 /* If there's at least one ingress present somewhere (so
4147 * we get here via enabled static key), remaining devices
4148 * that are not configured with an ingress qdisc will bail
4154 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4158 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
4159 skb
->tc_at_ingress
= 1;
4160 qdisc_bstats_cpu_update(cl
->q
, skb
);
4162 switch (tcf_classify(skb
, cl
, &cl_res
, false)) {
4164 case TC_ACT_RECLASSIFY
:
4165 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
4168 qdisc_qstats_cpu_drop(cl
->q
);
4176 case TC_ACT_REDIRECT
:
4177 /* skb_mac_header check was done by cls/act_bpf, so
4178 * we can safely push the L2 header back before
4179 * redirecting to another netdev
4181 __skb_push(skb
, skb
->mac_len
);
4182 skb_do_redirect(skb
);
4187 #endif /* CONFIG_NET_CLS_ACT */
4192 * netdev_is_rx_handler_busy - check if receive handler is registered
4193 * @dev: device to check
4195 * Check if a receive handler is already registered for a given device.
4196 * Return true if there one.
4198 * The caller must hold the rtnl_mutex.
4200 bool netdev_is_rx_handler_busy(struct net_device
*dev
)
4203 return dev
&& rtnl_dereference(dev
->rx_handler
);
4205 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy
);
4208 * netdev_rx_handler_register - register receive handler
4209 * @dev: device to register a handler for
4210 * @rx_handler: receive handler to register
4211 * @rx_handler_data: data pointer that is used by rx handler
4213 * Register a receive handler for a device. This handler will then be
4214 * called from __netif_receive_skb. A negative errno code is returned
4217 * The caller must hold the rtnl_mutex.
4219 * For a general description of rx_handler, see enum rx_handler_result.
4221 int netdev_rx_handler_register(struct net_device
*dev
,
4222 rx_handler_func_t
*rx_handler
,
4223 void *rx_handler_data
)
4225 if (netdev_is_rx_handler_busy(dev
))
4228 /* Note: rx_handler_data must be set before rx_handler */
4229 rcu_assign_pointer(dev
->rx_handler_data
, rx_handler_data
);
4230 rcu_assign_pointer(dev
->rx_handler
, rx_handler
);
4234 EXPORT_SYMBOL_GPL(netdev_rx_handler_register
);
4237 * netdev_rx_handler_unregister - unregister receive handler
4238 * @dev: device to unregister a handler from
4240 * Unregister a receive handler from a device.
4242 * The caller must hold the rtnl_mutex.
4244 void netdev_rx_handler_unregister(struct net_device
*dev
)
4248 RCU_INIT_POINTER(dev
->rx_handler
, NULL
);
4249 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4250 * section has a guarantee to see a non NULL rx_handler_data
4254 RCU_INIT_POINTER(dev
->rx_handler_data
, NULL
);
4256 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister
);
4259 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4260 * the special handling of PFMEMALLOC skbs.
4262 static bool skb_pfmemalloc_protocol(struct sk_buff
*skb
)
4264 switch (skb
->protocol
) {
4265 case htons(ETH_P_ARP
):
4266 case htons(ETH_P_IP
):
4267 case htons(ETH_P_IPV6
):
4268 case htons(ETH_P_8021Q
):
4269 case htons(ETH_P_8021AD
):
4276 static inline int nf_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
,
4277 int *ret
, struct net_device
*orig_dev
)
4279 #ifdef CONFIG_NETFILTER_INGRESS
4280 if (nf_hook_ingress_active(skb
)) {
4284 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4289 ingress_retval
= nf_hook_ingress(skb
);
4291 return ingress_retval
;
4293 #endif /* CONFIG_NETFILTER_INGRESS */
4297 static int __netif_receive_skb_core(struct sk_buff
*skb
, bool pfmemalloc
)
4299 struct packet_type
*ptype
, *pt_prev
;
4300 rx_handler_func_t
*rx_handler
;
4301 struct net_device
*orig_dev
;
4302 bool deliver_exact
= false;
4303 int ret
= NET_RX_DROP
;
4306 net_timestamp_check(!netdev_tstamp_prequeue
, skb
);
4308 trace_netif_receive_skb(skb
);
4310 orig_dev
= skb
->dev
;
4312 skb_reset_network_header(skb
);
4313 if (!skb_transport_header_was_set(skb
))
4314 skb_reset_transport_header(skb
);
4315 skb_reset_mac_len(skb
);
4320 skb
->skb_iif
= skb
->dev
->ifindex
;
4322 __this_cpu_inc(softnet_data
.processed
);
4324 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
) ||
4325 skb
->protocol
== cpu_to_be16(ETH_P_8021AD
)) {
4326 skb
= skb_vlan_untag(skb
);
4331 if (skb_skip_tc_classify(skb
))
4337 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
4339 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4343 list_for_each_entry_rcu(ptype
, &skb
->dev
->ptype_all
, list
) {
4345 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4350 #ifdef CONFIG_NET_INGRESS
4351 if (static_key_false(&ingress_needed
)) {
4352 skb
= sch_handle_ingress(skb
, &pt_prev
, &ret
, orig_dev
);
4356 if (nf_ingress(skb
, &pt_prev
, &ret
, orig_dev
) < 0)
4362 if (pfmemalloc
&& !skb_pfmemalloc_protocol(skb
))
4365 if (skb_vlan_tag_present(skb
)) {
4367 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4370 if (vlan_do_receive(&skb
))
4372 else if (unlikely(!skb
))
4376 rx_handler
= rcu_dereference(skb
->dev
->rx_handler
);
4379 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4382 switch (rx_handler(&skb
)) {
4383 case RX_HANDLER_CONSUMED
:
4384 ret
= NET_RX_SUCCESS
;
4386 case RX_HANDLER_ANOTHER
:
4388 case RX_HANDLER_EXACT
:
4389 deliver_exact
= true;
4390 case RX_HANDLER_PASS
:
4397 if (unlikely(skb_vlan_tag_present(skb
))) {
4398 if (skb_vlan_tag_get_id(skb
))
4399 skb
->pkt_type
= PACKET_OTHERHOST
;
4400 /* Note: we might in the future use prio bits
4401 * and set skb->priority like in vlan_do_receive()
4402 * For the time being, just ignore Priority Code Point
4407 type
= skb
->protocol
;
4409 /* deliver only exact match when indicated */
4410 if (likely(!deliver_exact
)) {
4411 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4412 &ptype_base
[ntohs(type
) &
4416 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4417 &orig_dev
->ptype_specific
);
4419 if (unlikely(skb
->dev
!= orig_dev
)) {
4420 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4421 &skb
->dev
->ptype_specific
);
4425 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
4428 ret
= pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
4432 atomic_long_inc(&skb
->dev
->rx_dropped
);
4434 atomic_long_inc(&skb
->dev
->rx_nohandler
);
4436 /* Jamal, now you will not able to escape explaining
4437 * me how you were going to use this. :-)
4446 static int __netif_receive_skb(struct sk_buff
*skb
)
4450 if (sk_memalloc_socks() && skb_pfmemalloc(skb
)) {
4451 unsigned int noreclaim_flag
;
4454 * PFMEMALLOC skbs are special, they should
4455 * - be delivered to SOCK_MEMALLOC sockets only
4456 * - stay away from userspace
4457 * - have bounded memory usage
4459 * Use PF_MEMALLOC as this saves us from propagating the allocation
4460 * context down to all allocation sites.
4462 noreclaim_flag
= memalloc_noreclaim_save();
4463 ret
= __netif_receive_skb_core(skb
, true);
4464 memalloc_noreclaim_restore(noreclaim_flag
);
4466 ret
= __netif_receive_skb_core(skb
, false);
4471 static int generic_xdp_install(struct net_device
*dev
, struct netdev_xdp
*xdp
)
4473 struct bpf_prog
*old
= rtnl_dereference(dev
->xdp_prog
);
4474 struct bpf_prog
*new = xdp
->prog
;
4477 switch (xdp
->command
) {
4478 case XDP_SETUP_PROG
:
4479 rcu_assign_pointer(dev
->xdp_prog
, new);
4484 static_key_slow_dec(&generic_xdp_needed
);
4485 } else if (new && !old
) {
4486 static_key_slow_inc(&generic_xdp_needed
);
4487 dev_disable_lro(dev
);
4491 case XDP_QUERY_PROG
:
4492 xdp
->prog_attached
= !!old
;
4493 xdp
->prog_id
= old
? old
->aux
->id
: 0;
4504 static int netif_receive_skb_internal(struct sk_buff
*skb
)
4508 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
4510 if (skb_defer_rx_timestamp(skb
))
4511 return NET_RX_SUCCESS
;
4513 if (static_key_false(&generic_xdp_needed
)) {
4518 ret
= do_xdp_generic(rcu_dereference(skb
->dev
->xdp_prog
), skb
);
4522 if (ret
!= XDP_PASS
)
4528 if (static_key_false(&rps_needed
)) {
4529 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
4530 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
4533 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
4539 ret
= __netif_receive_skb(skb
);
4545 * netif_receive_skb - process receive buffer from network
4546 * @skb: buffer to process
4548 * netif_receive_skb() is the main receive data processing function.
4549 * It always succeeds. The buffer may be dropped during processing
4550 * for congestion control or by the protocol layers.
4552 * This function may only be called from softirq context and interrupts
4553 * should be enabled.
4555 * Return values (usually ignored):
4556 * NET_RX_SUCCESS: no congestion
4557 * NET_RX_DROP: packet was dropped
4559 int netif_receive_skb(struct sk_buff
*skb
)
4561 trace_netif_receive_skb_entry(skb
);
4563 return netif_receive_skb_internal(skb
);
4565 EXPORT_SYMBOL(netif_receive_skb
);
4567 DEFINE_PER_CPU(struct work_struct
, flush_works
);
4569 /* Network device is going away, flush any packets still pending */
4570 static void flush_backlog(struct work_struct
*work
)
4572 struct sk_buff
*skb
, *tmp
;
4573 struct softnet_data
*sd
;
4576 sd
= this_cpu_ptr(&softnet_data
);
4578 local_irq_disable();
4580 skb_queue_walk_safe(&sd
->input_pkt_queue
, skb
, tmp
) {
4581 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
4582 __skb_unlink(skb
, &sd
->input_pkt_queue
);
4584 input_queue_head_incr(sd
);
4590 skb_queue_walk_safe(&sd
->process_queue
, skb
, tmp
) {
4591 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
4592 __skb_unlink(skb
, &sd
->process_queue
);
4594 input_queue_head_incr(sd
);
4600 static void flush_all_backlogs(void)
4606 for_each_online_cpu(cpu
)
4607 queue_work_on(cpu
, system_highpri_wq
,
4608 per_cpu_ptr(&flush_works
, cpu
));
4610 for_each_online_cpu(cpu
)
4611 flush_work(per_cpu_ptr(&flush_works
, cpu
));
4616 static int napi_gro_complete(struct sk_buff
*skb
)
4618 struct packet_offload
*ptype
;
4619 __be16 type
= skb
->protocol
;
4620 struct list_head
*head
= &offload_base
;
4623 BUILD_BUG_ON(sizeof(struct napi_gro_cb
) > sizeof(skb
->cb
));
4625 if (NAPI_GRO_CB(skb
)->count
== 1) {
4626 skb_shinfo(skb
)->gso_size
= 0;
4631 list_for_each_entry_rcu(ptype
, head
, list
) {
4632 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
4635 err
= ptype
->callbacks
.gro_complete(skb
, 0);
4641 WARN_ON(&ptype
->list
== head
);
4643 return NET_RX_SUCCESS
;
4647 return netif_receive_skb_internal(skb
);
4650 /* napi->gro_list contains packets ordered by age.
4651 * youngest packets at the head of it.
4652 * Complete skbs in reverse order to reduce latencies.
4654 void napi_gro_flush(struct napi_struct
*napi
, bool flush_old
)
4656 struct sk_buff
*skb
, *prev
= NULL
;
4658 /* scan list and build reverse chain */
4659 for (skb
= napi
->gro_list
; skb
!= NULL
; skb
= skb
->next
) {
4664 for (skb
= prev
; skb
; skb
= prev
) {
4667 if (flush_old
&& NAPI_GRO_CB(skb
)->age
== jiffies
)
4671 napi_gro_complete(skb
);
4675 napi
->gro_list
= NULL
;
4677 EXPORT_SYMBOL(napi_gro_flush
);
4679 static void gro_list_prepare(struct napi_struct
*napi
, struct sk_buff
*skb
)
4682 unsigned int maclen
= skb
->dev
->hard_header_len
;
4683 u32 hash
= skb_get_hash_raw(skb
);
4685 for (p
= napi
->gro_list
; p
; p
= p
->next
) {
4686 unsigned long diffs
;
4688 NAPI_GRO_CB(p
)->flush
= 0;
4690 if (hash
!= skb_get_hash_raw(p
)) {
4691 NAPI_GRO_CB(p
)->same_flow
= 0;
4695 diffs
= (unsigned long)p
->dev
^ (unsigned long)skb
->dev
;
4696 diffs
|= p
->vlan_tci
^ skb
->vlan_tci
;
4697 diffs
|= skb_metadata_dst_cmp(p
, skb
);
4698 if (maclen
== ETH_HLEN
)
4699 diffs
|= compare_ether_header(skb_mac_header(p
),
4700 skb_mac_header(skb
));
4702 diffs
= memcmp(skb_mac_header(p
),
4703 skb_mac_header(skb
),
4705 NAPI_GRO_CB(p
)->same_flow
= !diffs
;
4709 static void skb_gro_reset_offset(struct sk_buff
*skb
)
4711 const struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
4712 const skb_frag_t
*frag0
= &pinfo
->frags
[0];
4714 NAPI_GRO_CB(skb
)->data_offset
= 0;
4715 NAPI_GRO_CB(skb
)->frag0
= NULL
;
4716 NAPI_GRO_CB(skb
)->frag0_len
= 0;
4718 if (skb_mac_header(skb
) == skb_tail_pointer(skb
) &&
4720 !PageHighMem(skb_frag_page(frag0
))) {
4721 NAPI_GRO_CB(skb
)->frag0
= skb_frag_address(frag0
);
4722 NAPI_GRO_CB(skb
)->frag0_len
= min_t(unsigned int,
4723 skb_frag_size(frag0
),
4724 skb
->end
- skb
->tail
);
4728 static void gro_pull_from_frag0(struct sk_buff
*skb
, int grow
)
4730 struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
4732 BUG_ON(skb
->end
- skb
->tail
< grow
);
4734 memcpy(skb_tail_pointer(skb
), NAPI_GRO_CB(skb
)->frag0
, grow
);
4736 skb
->data_len
-= grow
;
4739 pinfo
->frags
[0].page_offset
+= grow
;
4740 skb_frag_size_sub(&pinfo
->frags
[0], grow
);
4742 if (unlikely(!skb_frag_size(&pinfo
->frags
[0]))) {
4743 skb_frag_unref(skb
, 0);
4744 memmove(pinfo
->frags
, pinfo
->frags
+ 1,
4745 --pinfo
->nr_frags
* sizeof(pinfo
->frags
[0]));
4749 static enum gro_result
dev_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
4751 struct sk_buff
**pp
= NULL
;
4752 struct packet_offload
*ptype
;
4753 __be16 type
= skb
->protocol
;
4754 struct list_head
*head
= &offload_base
;
4756 enum gro_result ret
;
4759 if (netif_elide_gro(skb
->dev
))
4762 gro_list_prepare(napi
, skb
);
4765 list_for_each_entry_rcu(ptype
, head
, list
) {
4766 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
4769 skb_set_network_header(skb
, skb_gro_offset(skb
));
4770 skb_reset_mac_len(skb
);
4771 NAPI_GRO_CB(skb
)->same_flow
= 0;
4772 NAPI_GRO_CB(skb
)->flush
= skb_is_gso(skb
) || skb_has_frag_list(skb
);
4773 NAPI_GRO_CB(skb
)->free
= 0;
4774 NAPI_GRO_CB(skb
)->encap_mark
= 0;
4775 NAPI_GRO_CB(skb
)->recursion_counter
= 0;
4776 NAPI_GRO_CB(skb
)->is_fou
= 0;
4777 NAPI_GRO_CB(skb
)->is_atomic
= 1;
4778 NAPI_GRO_CB(skb
)->gro_remcsum_start
= 0;
4780 /* Setup for GRO checksum validation */
4781 switch (skb
->ip_summed
) {
4782 case CHECKSUM_COMPLETE
:
4783 NAPI_GRO_CB(skb
)->csum
= skb
->csum
;
4784 NAPI_GRO_CB(skb
)->csum_valid
= 1;
4785 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
4787 case CHECKSUM_UNNECESSARY
:
4788 NAPI_GRO_CB(skb
)->csum_cnt
= skb
->csum_level
+ 1;
4789 NAPI_GRO_CB(skb
)->csum_valid
= 0;
4792 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
4793 NAPI_GRO_CB(skb
)->csum_valid
= 0;
4796 pp
= ptype
->callbacks
.gro_receive(&napi
->gro_list
, skb
);
4801 if (&ptype
->list
== head
)
4804 if (IS_ERR(pp
) && PTR_ERR(pp
) == -EINPROGRESS
) {
4809 same_flow
= NAPI_GRO_CB(skb
)->same_flow
;
4810 ret
= NAPI_GRO_CB(skb
)->free
? GRO_MERGED_FREE
: GRO_MERGED
;
4813 struct sk_buff
*nskb
= *pp
;
4817 napi_gro_complete(nskb
);
4824 if (NAPI_GRO_CB(skb
)->flush
)
4827 if (unlikely(napi
->gro_count
>= MAX_GRO_SKBS
)) {
4828 struct sk_buff
*nskb
= napi
->gro_list
;
4830 /* locate the end of the list to select the 'oldest' flow */
4831 while (nskb
->next
) {
4837 napi_gro_complete(nskb
);
4841 NAPI_GRO_CB(skb
)->count
= 1;
4842 NAPI_GRO_CB(skb
)->age
= jiffies
;
4843 NAPI_GRO_CB(skb
)->last
= skb
;
4844 skb_shinfo(skb
)->gso_size
= skb_gro_len(skb
);
4845 skb
->next
= napi
->gro_list
;
4846 napi
->gro_list
= skb
;
4850 grow
= skb_gro_offset(skb
) - skb_headlen(skb
);
4852 gro_pull_from_frag0(skb
, grow
);
4861 struct packet_offload
*gro_find_receive_by_type(__be16 type
)
4863 struct list_head
*offload_head
= &offload_base
;
4864 struct packet_offload
*ptype
;
4866 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
4867 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
4873 EXPORT_SYMBOL(gro_find_receive_by_type
);
4875 struct packet_offload
*gro_find_complete_by_type(__be16 type
)
4877 struct list_head
*offload_head
= &offload_base
;
4878 struct packet_offload
*ptype
;
4880 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
4881 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
4887 EXPORT_SYMBOL(gro_find_complete_by_type
);
4889 static void napi_skb_free_stolen_head(struct sk_buff
*skb
)
4893 kmem_cache_free(skbuff_head_cache
, skb
);
4896 static gro_result_t
napi_skb_finish(gro_result_t ret
, struct sk_buff
*skb
)
4900 if (netif_receive_skb_internal(skb
))
4908 case GRO_MERGED_FREE
:
4909 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
4910 napi_skb_free_stolen_head(skb
);
4924 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
4926 skb_mark_napi_id(skb
, napi
);
4927 trace_napi_gro_receive_entry(skb
);
4929 skb_gro_reset_offset(skb
);
4931 return napi_skb_finish(dev_gro_receive(napi
, skb
), skb
);
4933 EXPORT_SYMBOL(napi_gro_receive
);
4935 static void napi_reuse_skb(struct napi_struct
*napi
, struct sk_buff
*skb
)
4937 if (unlikely(skb
->pfmemalloc
)) {
4941 __skb_pull(skb
, skb_headlen(skb
));
4942 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4943 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
- skb_headroom(skb
));
4945 skb
->dev
= napi
->dev
;
4947 skb
->encapsulation
= 0;
4948 skb_shinfo(skb
)->gso_type
= 0;
4949 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
4955 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
)
4957 struct sk_buff
*skb
= napi
->skb
;
4960 skb
= napi_alloc_skb(napi
, GRO_MAX_HEAD
);
4963 skb_mark_napi_id(skb
, napi
);
4968 EXPORT_SYMBOL(napi_get_frags
);
4970 static gro_result_t
napi_frags_finish(struct napi_struct
*napi
,
4971 struct sk_buff
*skb
,
4977 __skb_push(skb
, ETH_HLEN
);
4978 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
4979 if (ret
== GRO_NORMAL
&& netif_receive_skb_internal(skb
))
4984 napi_reuse_skb(napi
, skb
);
4987 case GRO_MERGED_FREE
:
4988 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
4989 napi_skb_free_stolen_head(skb
);
4991 napi_reuse_skb(napi
, skb
);
5002 /* Upper GRO stack assumes network header starts at gro_offset=0
5003 * Drivers could call both napi_gro_frags() and napi_gro_receive()
5004 * We copy ethernet header into skb->data to have a common layout.
5006 static struct sk_buff
*napi_frags_skb(struct napi_struct
*napi
)
5008 struct sk_buff
*skb
= napi
->skb
;
5009 const struct ethhdr
*eth
;
5010 unsigned int hlen
= sizeof(*eth
);
5014 skb_reset_mac_header(skb
);
5015 skb_gro_reset_offset(skb
);
5017 eth
= skb_gro_header_fast(skb
, 0);
5018 if (unlikely(skb_gro_header_hard(skb
, hlen
))) {
5019 eth
= skb_gro_header_slow(skb
, hlen
, 0);
5020 if (unlikely(!eth
)) {
5021 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5022 __func__
, napi
->dev
->name
);
5023 napi_reuse_skb(napi
, skb
);
5027 gro_pull_from_frag0(skb
, hlen
);
5028 NAPI_GRO_CB(skb
)->frag0
+= hlen
;
5029 NAPI_GRO_CB(skb
)->frag0_len
-= hlen
;
5031 __skb_pull(skb
, hlen
);
5034 * This works because the only protocols we care about don't require
5036 * We'll fix it up properly in napi_frags_finish()
5038 skb
->protocol
= eth
->h_proto
;
5043 gro_result_t
napi_gro_frags(struct napi_struct
*napi
)
5045 struct sk_buff
*skb
= napi_frags_skb(napi
);
5050 trace_napi_gro_frags_entry(skb
);
5052 return napi_frags_finish(napi
, skb
, dev_gro_receive(napi
, skb
));
5054 EXPORT_SYMBOL(napi_gro_frags
);
5056 /* Compute the checksum from gro_offset and return the folded value
5057 * after adding in any pseudo checksum.
5059 __sum16
__skb_gro_checksum_complete(struct sk_buff
*skb
)
5064 wsum
= skb_checksum(skb
, skb_gro_offset(skb
), skb_gro_len(skb
), 0);
5066 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5067 sum
= csum_fold(csum_add(NAPI_GRO_CB(skb
)->csum
, wsum
));
5069 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
5070 !skb
->csum_complete_sw
)
5071 netdev_rx_csum_fault(skb
->dev
);
5074 NAPI_GRO_CB(skb
)->csum
= wsum
;
5075 NAPI_GRO_CB(skb
)->csum_valid
= 1;
5079 EXPORT_SYMBOL(__skb_gro_checksum_complete
);
5081 static void net_rps_send_ipi(struct softnet_data
*remsd
)
5085 struct softnet_data
*next
= remsd
->rps_ipi_next
;
5087 if (cpu_online(remsd
->cpu
))
5088 smp_call_function_single_async(remsd
->cpu
, &remsd
->csd
);
5095 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5096 * Note: called with local irq disabled, but exits with local irq enabled.
5098 static void net_rps_action_and_irq_enable(struct softnet_data
*sd
)
5101 struct softnet_data
*remsd
= sd
->rps_ipi_list
;
5104 sd
->rps_ipi_list
= NULL
;
5108 /* Send pending IPI's to kick RPS processing on remote cpus. */
5109 net_rps_send_ipi(remsd
);
5115 static bool sd_has_rps_ipi_waiting(struct softnet_data
*sd
)
5118 return sd
->rps_ipi_list
!= NULL
;
5124 static int process_backlog(struct napi_struct
*napi
, int quota
)
5126 struct softnet_data
*sd
= container_of(napi
, struct softnet_data
, backlog
);
5130 /* Check if we have pending ipi, its better to send them now,
5131 * not waiting net_rx_action() end.
5133 if (sd_has_rps_ipi_waiting(sd
)) {
5134 local_irq_disable();
5135 net_rps_action_and_irq_enable(sd
);
5138 napi
->weight
= dev_rx_weight
;
5140 struct sk_buff
*skb
;
5142 while ((skb
= __skb_dequeue(&sd
->process_queue
))) {
5144 __netif_receive_skb(skb
);
5146 input_queue_head_incr(sd
);
5147 if (++work
>= quota
)
5152 local_irq_disable();
5154 if (skb_queue_empty(&sd
->input_pkt_queue
)) {
5156 * Inline a custom version of __napi_complete().
5157 * only current cpu owns and manipulates this napi,
5158 * and NAPI_STATE_SCHED is the only possible flag set
5160 * We can use a plain write instead of clear_bit(),
5161 * and we dont need an smp_mb() memory barrier.
5166 skb_queue_splice_tail_init(&sd
->input_pkt_queue
,
5167 &sd
->process_queue
);
5177 * __napi_schedule - schedule for receive
5178 * @n: entry to schedule
5180 * The entry's receive function will be scheduled to run.
5181 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
5183 void __napi_schedule(struct napi_struct
*n
)
5185 unsigned long flags
;
5187 local_irq_save(flags
);
5188 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
5189 local_irq_restore(flags
);
5191 EXPORT_SYMBOL(__napi_schedule
);
5194 * napi_schedule_prep - check if napi can be scheduled
5197 * Test if NAPI routine is already running, and if not mark
5198 * it as running. This is used as a condition variable
5199 * insure only one NAPI poll instance runs. We also make
5200 * sure there is no pending NAPI disable.
5202 bool napi_schedule_prep(struct napi_struct
*n
)
5204 unsigned long val
, new;
5207 val
= READ_ONCE(n
->state
);
5208 if (unlikely(val
& NAPIF_STATE_DISABLE
))
5210 new = val
| NAPIF_STATE_SCHED
;
5212 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5213 * This was suggested by Alexander Duyck, as compiler
5214 * emits better code than :
5215 * if (val & NAPIF_STATE_SCHED)
5216 * new |= NAPIF_STATE_MISSED;
5218 new |= (val
& NAPIF_STATE_SCHED
) / NAPIF_STATE_SCHED
*
5220 } while (cmpxchg(&n
->state
, val
, new) != val
);
5222 return !(val
& NAPIF_STATE_SCHED
);
5224 EXPORT_SYMBOL(napi_schedule_prep
);
5227 * __napi_schedule_irqoff - schedule for receive
5228 * @n: entry to schedule
5230 * Variant of __napi_schedule() assuming hard irqs are masked
5232 void __napi_schedule_irqoff(struct napi_struct
*n
)
5234 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
5236 EXPORT_SYMBOL(__napi_schedule_irqoff
);
5238 bool napi_complete_done(struct napi_struct
*n
, int work_done
)
5240 unsigned long flags
, val
, new;
5243 * 1) Don't let napi dequeue from the cpu poll list
5244 * just in case its running on a different cpu.
5245 * 2) If we are busy polling, do nothing here, we have
5246 * the guarantee we will be called later.
5248 if (unlikely(n
->state
& (NAPIF_STATE_NPSVC
|
5249 NAPIF_STATE_IN_BUSY_POLL
)))
5253 unsigned long timeout
= 0;
5256 timeout
= n
->dev
->gro_flush_timeout
;
5259 hrtimer_start(&n
->timer
, ns_to_ktime(timeout
),
5260 HRTIMER_MODE_REL_PINNED
);
5262 napi_gro_flush(n
, false);
5264 if (unlikely(!list_empty(&n
->poll_list
))) {
5265 /* If n->poll_list is not empty, we need to mask irqs */
5266 local_irq_save(flags
);
5267 list_del_init(&n
->poll_list
);
5268 local_irq_restore(flags
);
5272 val
= READ_ONCE(n
->state
);
5274 WARN_ON_ONCE(!(val
& NAPIF_STATE_SCHED
));
5276 new = val
& ~(NAPIF_STATE_MISSED
| NAPIF_STATE_SCHED
);
5278 /* If STATE_MISSED was set, leave STATE_SCHED set,
5279 * because we will call napi->poll() one more time.
5280 * This C code was suggested by Alexander Duyck to help gcc.
5282 new |= (val
& NAPIF_STATE_MISSED
) / NAPIF_STATE_MISSED
*
5284 } while (cmpxchg(&n
->state
, val
, new) != val
);
5286 if (unlikely(val
& NAPIF_STATE_MISSED
)) {
5293 EXPORT_SYMBOL(napi_complete_done
);
5295 /* must be called under rcu_read_lock(), as we dont take a reference */
5296 static struct napi_struct
*napi_by_id(unsigned int napi_id
)
5298 unsigned int hash
= napi_id
% HASH_SIZE(napi_hash
);
5299 struct napi_struct
*napi
;
5301 hlist_for_each_entry_rcu(napi
, &napi_hash
[hash
], napi_hash_node
)
5302 if (napi
->napi_id
== napi_id
)
5308 #if defined(CONFIG_NET_RX_BUSY_POLL)
5310 #define BUSY_POLL_BUDGET 8
5312 static void busy_poll_stop(struct napi_struct
*napi
, void *have_poll_lock
)
5316 /* Busy polling means there is a high chance device driver hard irq
5317 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5318 * set in napi_schedule_prep().
5319 * Since we are about to call napi->poll() once more, we can safely
5320 * clear NAPI_STATE_MISSED.
5322 * Note: x86 could use a single "lock and ..." instruction
5323 * to perform these two clear_bit()
5325 clear_bit(NAPI_STATE_MISSED
, &napi
->state
);
5326 clear_bit(NAPI_STATE_IN_BUSY_POLL
, &napi
->state
);
5330 /* All we really want here is to re-enable device interrupts.
5331 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5333 rc
= napi
->poll(napi
, BUSY_POLL_BUDGET
);
5334 trace_napi_poll(napi
, rc
, BUSY_POLL_BUDGET
);
5335 netpoll_poll_unlock(have_poll_lock
);
5336 if (rc
== BUSY_POLL_BUDGET
)
5337 __napi_schedule(napi
);
5341 void napi_busy_loop(unsigned int napi_id
,
5342 bool (*loop_end
)(void *, unsigned long),
5345 unsigned long start_time
= loop_end
? busy_loop_current_time() : 0;
5346 int (*napi_poll
)(struct napi_struct
*napi
, int budget
);
5347 void *have_poll_lock
= NULL
;
5348 struct napi_struct
*napi
;
5355 napi
= napi_by_id(napi_id
);
5365 unsigned long val
= READ_ONCE(napi
->state
);
5367 /* If multiple threads are competing for this napi,
5368 * we avoid dirtying napi->state as much as we can.
5370 if (val
& (NAPIF_STATE_DISABLE
| NAPIF_STATE_SCHED
|
5371 NAPIF_STATE_IN_BUSY_POLL
))
5373 if (cmpxchg(&napi
->state
, val
,
5374 val
| NAPIF_STATE_IN_BUSY_POLL
|
5375 NAPIF_STATE_SCHED
) != val
)
5377 have_poll_lock
= netpoll_poll_lock(napi
);
5378 napi_poll
= napi
->poll
;
5380 work
= napi_poll(napi
, BUSY_POLL_BUDGET
);
5381 trace_napi_poll(napi
, work
, BUSY_POLL_BUDGET
);
5384 __NET_ADD_STATS(dev_net(napi
->dev
),
5385 LINUX_MIB_BUSYPOLLRXPACKETS
, work
);
5388 if (!loop_end
|| loop_end(loop_end_arg
, start_time
))
5391 if (unlikely(need_resched())) {
5393 busy_poll_stop(napi
, have_poll_lock
);
5397 if (loop_end(loop_end_arg
, start_time
))
5404 busy_poll_stop(napi
, have_poll_lock
);
5409 EXPORT_SYMBOL(napi_busy_loop
);
5411 #endif /* CONFIG_NET_RX_BUSY_POLL */
5413 static void napi_hash_add(struct napi_struct
*napi
)
5415 if (test_bit(NAPI_STATE_NO_BUSY_POLL
, &napi
->state
) ||
5416 test_and_set_bit(NAPI_STATE_HASHED
, &napi
->state
))
5419 spin_lock(&napi_hash_lock
);
5421 /* 0..NR_CPUS range is reserved for sender_cpu use */
5423 if (unlikely(++napi_gen_id
< MIN_NAPI_ID
))
5424 napi_gen_id
= MIN_NAPI_ID
;
5425 } while (napi_by_id(napi_gen_id
));
5426 napi
->napi_id
= napi_gen_id
;
5428 hlist_add_head_rcu(&napi
->napi_hash_node
,
5429 &napi_hash
[napi
->napi_id
% HASH_SIZE(napi_hash
)]);
5431 spin_unlock(&napi_hash_lock
);
5434 /* Warning : caller is responsible to make sure rcu grace period
5435 * is respected before freeing memory containing @napi
5437 bool napi_hash_del(struct napi_struct
*napi
)
5439 bool rcu_sync_needed
= false;
5441 spin_lock(&napi_hash_lock
);
5443 if (test_and_clear_bit(NAPI_STATE_HASHED
, &napi
->state
)) {
5444 rcu_sync_needed
= true;
5445 hlist_del_rcu(&napi
->napi_hash_node
);
5447 spin_unlock(&napi_hash_lock
);
5448 return rcu_sync_needed
;
5450 EXPORT_SYMBOL_GPL(napi_hash_del
);
5452 static enum hrtimer_restart
napi_watchdog(struct hrtimer
*timer
)
5454 struct napi_struct
*napi
;
5456 napi
= container_of(timer
, struct napi_struct
, timer
);
5458 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
5459 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
5461 if (napi
->gro_list
&& !napi_disable_pending(napi
) &&
5462 !test_and_set_bit(NAPI_STATE_SCHED
, &napi
->state
))
5463 __napi_schedule_irqoff(napi
);
5465 return HRTIMER_NORESTART
;
5468 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
5469 int (*poll
)(struct napi_struct
*, int), int weight
)
5471 INIT_LIST_HEAD(&napi
->poll_list
);
5472 hrtimer_init(&napi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
5473 napi
->timer
.function
= napi_watchdog
;
5474 napi
->gro_count
= 0;
5475 napi
->gro_list
= NULL
;
5478 if (weight
> NAPI_POLL_WEIGHT
)
5479 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5481 napi
->weight
= weight
;
5482 list_add(&napi
->dev_list
, &dev
->napi_list
);
5484 #ifdef CONFIG_NETPOLL
5485 napi
->poll_owner
= -1;
5487 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
5488 napi_hash_add(napi
);
5490 EXPORT_SYMBOL(netif_napi_add
);
5492 void napi_disable(struct napi_struct
*n
)
5495 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
5497 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
5499 while (test_and_set_bit(NAPI_STATE_NPSVC
, &n
->state
))
5502 hrtimer_cancel(&n
->timer
);
5504 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
5506 EXPORT_SYMBOL(napi_disable
);
5508 /* Must be called in process context */
5509 void netif_napi_del(struct napi_struct
*napi
)
5512 if (napi_hash_del(napi
))
5514 list_del_init(&napi
->dev_list
);
5515 napi_free_frags(napi
);
5517 kfree_skb_list(napi
->gro_list
);
5518 napi
->gro_list
= NULL
;
5519 napi
->gro_count
= 0;
5521 EXPORT_SYMBOL(netif_napi_del
);
5523 static int napi_poll(struct napi_struct
*n
, struct list_head
*repoll
)
5528 list_del_init(&n
->poll_list
);
5530 have
= netpoll_poll_lock(n
);
5534 /* This NAPI_STATE_SCHED test is for avoiding a race
5535 * with netpoll's poll_napi(). Only the entity which
5536 * obtains the lock and sees NAPI_STATE_SCHED set will
5537 * actually make the ->poll() call. Therefore we avoid
5538 * accidentally calling ->poll() when NAPI is not scheduled.
5541 if (test_bit(NAPI_STATE_SCHED
, &n
->state
)) {
5542 work
= n
->poll(n
, weight
);
5543 trace_napi_poll(n
, work
, weight
);
5546 WARN_ON_ONCE(work
> weight
);
5548 if (likely(work
< weight
))
5551 /* Drivers must not modify the NAPI state if they
5552 * consume the entire weight. In such cases this code
5553 * still "owns" the NAPI instance and therefore can
5554 * move the instance around on the list at-will.
5556 if (unlikely(napi_disable_pending(n
))) {
5562 /* flush too old packets
5563 * If HZ < 1000, flush all packets.
5565 napi_gro_flush(n
, HZ
>= 1000);
5568 /* Some drivers may have called napi_schedule
5569 * prior to exhausting their budget.
5571 if (unlikely(!list_empty(&n
->poll_list
))) {
5572 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5573 n
->dev
? n
->dev
->name
: "backlog");
5577 list_add_tail(&n
->poll_list
, repoll
);
5580 netpoll_poll_unlock(have
);
5585 static __latent_entropy
void net_rx_action(struct softirq_action
*h
)
5587 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
5588 unsigned long time_limit
= jiffies
+
5589 usecs_to_jiffies(netdev_budget_usecs
);
5590 int budget
= netdev_budget
;
5594 local_irq_disable();
5595 list_splice_init(&sd
->poll_list
, &list
);
5599 struct napi_struct
*n
;
5601 if (list_empty(&list
)) {
5602 if (!sd_has_rps_ipi_waiting(sd
) && list_empty(&repoll
))
5607 n
= list_first_entry(&list
, struct napi_struct
, poll_list
);
5608 budget
-= napi_poll(n
, &repoll
);
5610 /* If softirq window is exhausted then punt.
5611 * Allow this to run for 2 jiffies since which will allow
5612 * an average latency of 1.5/HZ.
5614 if (unlikely(budget
<= 0 ||
5615 time_after_eq(jiffies
, time_limit
))) {
5621 local_irq_disable();
5623 list_splice_tail_init(&sd
->poll_list
, &list
);
5624 list_splice_tail(&repoll
, &list
);
5625 list_splice(&list
, &sd
->poll_list
);
5626 if (!list_empty(&sd
->poll_list
))
5627 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
5629 net_rps_action_and_irq_enable(sd
);
5631 __kfree_skb_flush();
5634 struct netdev_adjacent
{
5635 struct net_device
*dev
;
5637 /* upper master flag, there can only be one master device per list */
5640 /* counter for the number of times this device was added to us */
5643 /* private field for the users */
5646 struct list_head list
;
5647 struct rcu_head rcu
;
5650 static struct netdev_adjacent
*__netdev_find_adj(struct net_device
*adj_dev
,
5651 struct list_head
*adj_list
)
5653 struct netdev_adjacent
*adj
;
5655 list_for_each_entry(adj
, adj_list
, list
) {
5656 if (adj
->dev
== adj_dev
)
5662 static int __netdev_has_upper_dev(struct net_device
*upper_dev
, void *data
)
5664 struct net_device
*dev
= data
;
5666 return upper_dev
== dev
;
5670 * netdev_has_upper_dev - Check if device is linked to an upper device
5672 * @upper_dev: upper device to check
5674 * Find out if a device is linked to specified upper device and return true
5675 * in case it is. Note that this checks only immediate upper device,
5676 * not through a complete stack of devices. The caller must hold the RTNL lock.
5678 bool netdev_has_upper_dev(struct net_device
*dev
,
5679 struct net_device
*upper_dev
)
5683 return netdev_walk_all_upper_dev_rcu(dev
, __netdev_has_upper_dev
,
5686 EXPORT_SYMBOL(netdev_has_upper_dev
);
5689 * netdev_has_upper_dev_all - Check if device is linked to an upper device
5691 * @upper_dev: upper device to check
5693 * Find out if a device is linked to specified upper device and return true
5694 * in case it is. Note that this checks the entire upper device chain.
5695 * The caller must hold rcu lock.
5698 bool netdev_has_upper_dev_all_rcu(struct net_device
*dev
,
5699 struct net_device
*upper_dev
)
5701 return !!netdev_walk_all_upper_dev_rcu(dev
, __netdev_has_upper_dev
,
5704 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu
);
5707 * netdev_has_any_upper_dev - Check if device is linked to some device
5710 * Find out if a device is linked to an upper device and return true in case
5711 * it is. The caller must hold the RTNL lock.
5713 bool netdev_has_any_upper_dev(struct net_device
*dev
)
5717 return !list_empty(&dev
->adj_list
.upper
);
5719 EXPORT_SYMBOL(netdev_has_any_upper_dev
);
5722 * netdev_master_upper_dev_get - Get master upper device
5725 * Find a master upper device and return pointer to it or NULL in case
5726 * it's not there. The caller must hold the RTNL lock.
5728 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
)
5730 struct netdev_adjacent
*upper
;
5734 if (list_empty(&dev
->adj_list
.upper
))
5737 upper
= list_first_entry(&dev
->adj_list
.upper
,
5738 struct netdev_adjacent
, list
);
5739 if (likely(upper
->master
))
5743 EXPORT_SYMBOL(netdev_master_upper_dev_get
);
5746 * netdev_has_any_lower_dev - Check if device is linked to some device
5749 * Find out if a device is linked to a lower device and return true in case
5750 * it is. The caller must hold the RTNL lock.
5752 static bool netdev_has_any_lower_dev(struct net_device
*dev
)
5756 return !list_empty(&dev
->adj_list
.lower
);
5759 void *netdev_adjacent_get_private(struct list_head
*adj_list
)
5761 struct netdev_adjacent
*adj
;
5763 adj
= list_entry(adj_list
, struct netdev_adjacent
, list
);
5765 return adj
->private;
5767 EXPORT_SYMBOL(netdev_adjacent_get_private
);
5770 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5772 * @iter: list_head ** of the current position
5774 * Gets the next device from the dev's upper list, starting from iter
5775 * position. The caller must hold RCU read lock.
5777 struct net_device
*netdev_upper_get_next_dev_rcu(struct net_device
*dev
,
5778 struct list_head
**iter
)
5780 struct netdev_adjacent
*upper
;
5782 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5784 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
5786 if (&upper
->list
== &dev
->adj_list
.upper
)
5789 *iter
= &upper
->list
;
5793 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu
);
5795 static struct net_device
*netdev_next_upper_dev_rcu(struct net_device
*dev
,
5796 struct list_head
**iter
)
5798 struct netdev_adjacent
*upper
;
5800 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5802 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
5804 if (&upper
->list
== &dev
->adj_list
.upper
)
5807 *iter
= &upper
->list
;
5812 int netdev_walk_all_upper_dev_rcu(struct net_device
*dev
,
5813 int (*fn
)(struct net_device
*dev
,
5817 struct net_device
*udev
;
5818 struct list_head
*iter
;
5821 for (iter
= &dev
->adj_list
.upper
,
5822 udev
= netdev_next_upper_dev_rcu(dev
, &iter
);
5824 udev
= netdev_next_upper_dev_rcu(dev
, &iter
)) {
5825 /* first is the upper device itself */
5826 ret
= fn(udev
, data
);
5830 /* then look at all of its upper devices */
5831 ret
= netdev_walk_all_upper_dev_rcu(udev
, fn
, data
);
5838 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu
);
5841 * netdev_lower_get_next_private - Get the next ->private from the
5842 * lower neighbour list
5844 * @iter: list_head ** of the current position
5846 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5847 * list, starting from iter position. The caller must hold either hold the
5848 * RTNL lock or its own locking that guarantees that the neighbour lower
5849 * list will remain unchanged.
5851 void *netdev_lower_get_next_private(struct net_device
*dev
,
5852 struct list_head
**iter
)
5854 struct netdev_adjacent
*lower
;
5856 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
5858 if (&lower
->list
== &dev
->adj_list
.lower
)
5861 *iter
= lower
->list
.next
;
5863 return lower
->private;
5865 EXPORT_SYMBOL(netdev_lower_get_next_private
);
5868 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5869 * lower neighbour list, RCU
5872 * @iter: list_head ** of the current position
5874 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5875 * list, starting from iter position. The caller must hold RCU read lock.
5877 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
5878 struct list_head
**iter
)
5880 struct netdev_adjacent
*lower
;
5882 WARN_ON_ONCE(!rcu_read_lock_held());
5884 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
5886 if (&lower
->list
== &dev
->adj_list
.lower
)
5889 *iter
= &lower
->list
;
5891 return lower
->private;
5893 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu
);
5896 * netdev_lower_get_next - Get the next device from the lower neighbour
5899 * @iter: list_head ** of the current position
5901 * Gets the next netdev_adjacent from the dev's lower neighbour
5902 * list, starting from iter position. The caller must hold RTNL lock or
5903 * its own locking that guarantees that the neighbour lower
5904 * list will remain unchanged.
5906 void *netdev_lower_get_next(struct net_device
*dev
, struct list_head
**iter
)
5908 struct netdev_adjacent
*lower
;
5910 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
5912 if (&lower
->list
== &dev
->adj_list
.lower
)
5915 *iter
= lower
->list
.next
;
5919 EXPORT_SYMBOL(netdev_lower_get_next
);
5921 static struct net_device
*netdev_next_lower_dev(struct net_device
*dev
,
5922 struct list_head
**iter
)
5924 struct netdev_adjacent
*lower
;
5926 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
5928 if (&lower
->list
== &dev
->adj_list
.lower
)
5931 *iter
= &lower
->list
;
5936 int netdev_walk_all_lower_dev(struct net_device
*dev
,
5937 int (*fn
)(struct net_device
*dev
,
5941 struct net_device
*ldev
;
5942 struct list_head
*iter
;
5945 for (iter
= &dev
->adj_list
.lower
,
5946 ldev
= netdev_next_lower_dev(dev
, &iter
);
5948 ldev
= netdev_next_lower_dev(dev
, &iter
)) {
5949 /* first is the lower device itself */
5950 ret
= fn(ldev
, data
);
5954 /* then look at all of its lower devices */
5955 ret
= netdev_walk_all_lower_dev(ldev
, fn
, data
);
5962 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev
);
5964 static struct net_device
*netdev_next_lower_dev_rcu(struct net_device
*dev
,
5965 struct list_head
**iter
)
5967 struct netdev_adjacent
*lower
;
5969 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
5970 if (&lower
->list
== &dev
->adj_list
.lower
)
5973 *iter
= &lower
->list
;
5978 int netdev_walk_all_lower_dev_rcu(struct net_device
*dev
,
5979 int (*fn
)(struct net_device
*dev
,
5983 struct net_device
*ldev
;
5984 struct list_head
*iter
;
5987 for (iter
= &dev
->adj_list
.lower
,
5988 ldev
= netdev_next_lower_dev_rcu(dev
, &iter
);
5990 ldev
= netdev_next_lower_dev_rcu(dev
, &iter
)) {
5991 /* first is the lower device itself */
5992 ret
= fn(ldev
, data
);
5996 /* then look at all of its lower devices */
5997 ret
= netdev_walk_all_lower_dev_rcu(ldev
, fn
, data
);
6004 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu
);
6007 * netdev_lower_get_first_private_rcu - Get the first ->private from the
6008 * lower neighbour list, RCU
6012 * Gets the first netdev_adjacent->private from the dev's lower neighbour
6013 * list. The caller must hold RCU read lock.
6015 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
)
6017 struct netdev_adjacent
*lower
;
6019 lower
= list_first_or_null_rcu(&dev
->adj_list
.lower
,
6020 struct netdev_adjacent
, list
);
6022 return lower
->private;
6025 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu
);
6028 * netdev_master_upper_dev_get_rcu - Get master upper device
6031 * Find a master upper device and return pointer to it or NULL in case
6032 * it's not there. The caller must hold the RCU read lock.
6034 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
)
6036 struct netdev_adjacent
*upper
;
6038 upper
= list_first_or_null_rcu(&dev
->adj_list
.upper
,
6039 struct netdev_adjacent
, list
);
6040 if (upper
&& likely(upper
->master
))
6044 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu
);
6046 static int netdev_adjacent_sysfs_add(struct net_device
*dev
,
6047 struct net_device
*adj_dev
,
6048 struct list_head
*dev_list
)
6050 char linkname
[IFNAMSIZ
+7];
6052 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
6053 "upper_%s" : "lower_%s", adj_dev
->name
);
6054 return sysfs_create_link(&(dev
->dev
.kobj
), &(adj_dev
->dev
.kobj
),
6057 static void netdev_adjacent_sysfs_del(struct net_device
*dev
,
6059 struct list_head
*dev_list
)
6061 char linkname
[IFNAMSIZ
+7];
6063 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
6064 "upper_%s" : "lower_%s", name
);
6065 sysfs_remove_link(&(dev
->dev
.kobj
), linkname
);
6068 static inline bool netdev_adjacent_is_neigh_list(struct net_device
*dev
,
6069 struct net_device
*adj_dev
,
6070 struct list_head
*dev_list
)
6072 return (dev_list
== &dev
->adj_list
.upper
||
6073 dev_list
== &dev
->adj_list
.lower
) &&
6074 net_eq(dev_net(dev
), dev_net(adj_dev
));
6077 static int __netdev_adjacent_dev_insert(struct net_device
*dev
,
6078 struct net_device
*adj_dev
,
6079 struct list_head
*dev_list
,
6080 void *private, bool master
)
6082 struct netdev_adjacent
*adj
;
6085 adj
= __netdev_find_adj(adj_dev
, dev_list
);
6089 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6090 dev
->name
, adj_dev
->name
, adj
->ref_nr
);
6095 adj
= kmalloc(sizeof(*adj
), GFP_KERNEL
);
6100 adj
->master
= master
;
6102 adj
->private = private;
6105 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6106 dev
->name
, adj_dev
->name
, adj
->ref_nr
, adj_dev
->name
);
6108 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
)) {
6109 ret
= netdev_adjacent_sysfs_add(dev
, adj_dev
, dev_list
);
6114 /* Ensure that master link is always the first item in list. */
6116 ret
= sysfs_create_link(&(dev
->dev
.kobj
),
6117 &(adj_dev
->dev
.kobj
), "master");
6119 goto remove_symlinks
;
6121 list_add_rcu(&adj
->list
, dev_list
);
6123 list_add_tail_rcu(&adj
->list
, dev_list
);
6129 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
6130 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
6138 static void __netdev_adjacent_dev_remove(struct net_device
*dev
,
6139 struct net_device
*adj_dev
,
6141 struct list_head
*dev_list
)
6143 struct netdev_adjacent
*adj
;
6145 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6146 dev
->name
, adj_dev
->name
, ref_nr
);
6148 adj
= __netdev_find_adj(adj_dev
, dev_list
);
6151 pr_err("Adjacency does not exist for device %s from %s\n",
6152 dev
->name
, adj_dev
->name
);
6157 if (adj
->ref_nr
> ref_nr
) {
6158 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6159 dev
->name
, adj_dev
->name
, ref_nr
,
6160 adj
->ref_nr
- ref_nr
);
6161 adj
->ref_nr
-= ref_nr
;
6166 sysfs_remove_link(&(dev
->dev
.kobj
), "master");
6168 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
6169 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
6171 list_del_rcu(&adj
->list
);
6172 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
6173 adj_dev
->name
, dev
->name
, adj_dev
->name
);
6175 kfree_rcu(adj
, rcu
);
6178 static int __netdev_adjacent_dev_link_lists(struct net_device
*dev
,
6179 struct net_device
*upper_dev
,
6180 struct list_head
*up_list
,
6181 struct list_head
*down_list
,
6182 void *private, bool master
)
6186 ret
= __netdev_adjacent_dev_insert(dev
, upper_dev
, up_list
,
6191 ret
= __netdev_adjacent_dev_insert(upper_dev
, dev
, down_list
,
6194 __netdev_adjacent_dev_remove(dev
, upper_dev
, 1, up_list
);
6201 static void __netdev_adjacent_dev_unlink_lists(struct net_device
*dev
,
6202 struct net_device
*upper_dev
,
6204 struct list_head
*up_list
,
6205 struct list_head
*down_list
)
6207 __netdev_adjacent_dev_remove(dev
, upper_dev
, ref_nr
, up_list
);
6208 __netdev_adjacent_dev_remove(upper_dev
, dev
, ref_nr
, down_list
);
6211 static int __netdev_adjacent_dev_link_neighbour(struct net_device
*dev
,
6212 struct net_device
*upper_dev
,
6213 void *private, bool master
)
6215 return __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
6216 &dev
->adj_list
.upper
,
6217 &upper_dev
->adj_list
.lower
,
6221 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device
*dev
,
6222 struct net_device
*upper_dev
)
6224 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
, 1,
6225 &dev
->adj_list
.upper
,
6226 &upper_dev
->adj_list
.lower
);
6229 static int __netdev_upper_dev_link(struct net_device
*dev
,
6230 struct net_device
*upper_dev
, bool master
,
6231 void *upper_priv
, void *upper_info
)
6233 struct netdev_notifier_changeupper_info changeupper_info
;
6238 if (dev
== upper_dev
)
6241 /* To prevent loops, check if dev is not upper device to upper_dev. */
6242 if (netdev_has_upper_dev(upper_dev
, dev
))
6245 if (netdev_has_upper_dev(dev
, upper_dev
))
6248 if (master
&& netdev_master_upper_dev_get(dev
))
6251 changeupper_info
.upper_dev
= upper_dev
;
6252 changeupper_info
.master
= master
;
6253 changeupper_info
.linking
= true;
6254 changeupper_info
.upper_info
= upper_info
;
6256 ret
= call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
, dev
,
6257 &changeupper_info
.info
);
6258 ret
= notifier_to_errno(ret
);
6262 ret
= __netdev_adjacent_dev_link_neighbour(dev
, upper_dev
, upper_priv
,
6267 ret
= call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
, dev
,
6268 &changeupper_info
.info
);
6269 ret
= notifier_to_errno(ret
);
6276 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
6282 * netdev_upper_dev_link - Add a link to the upper device
6284 * @upper_dev: new upper device
6286 * Adds a link to device which is upper to this one. The caller must hold
6287 * the RTNL lock. On a failure a negative errno code is returned.
6288 * On success the reference counts are adjusted and the function
6291 int netdev_upper_dev_link(struct net_device
*dev
,
6292 struct net_device
*upper_dev
)
6294 return __netdev_upper_dev_link(dev
, upper_dev
, false, NULL
, NULL
);
6296 EXPORT_SYMBOL(netdev_upper_dev_link
);
6299 * netdev_master_upper_dev_link - Add a master link to the upper device
6301 * @upper_dev: new upper device
6302 * @upper_priv: upper device private
6303 * @upper_info: upper info to be passed down via notifier
6305 * Adds a link to device which is upper to this one. In this case, only
6306 * one master upper device can be linked, although other non-master devices
6307 * might be linked as well. The caller must hold the RTNL lock.
6308 * On a failure a negative errno code is returned. On success the reference
6309 * counts are adjusted and the function returns zero.
6311 int netdev_master_upper_dev_link(struct net_device
*dev
,
6312 struct net_device
*upper_dev
,
6313 void *upper_priv
, void *upper_info
)
6315 return __netdev_upper_dev_link(dev
, upper_dev
, true,
6316 upper_priv
, upper_info
);
6318 EXPORT_SYMBOL(netdev_master_upper_dev_link
);
6321 * netdev_upper_dev_unlink - Removes a link to upper device
6323 * @upper_dev: new upper device
6325 * Removes a link to device which is upper to this one. The caller must hold
6328 void netdev_upper_dev_unlink(struct net_device
*dev
,
6329 struct net_device
*upper_dev
)
6331 struct netdev_notifier_changeupper_info changeupper_info
;
6335 changeupper_info
.upper_dev
= upper_dev
;
6336 changeupper_info
.master
= netdev_master_upper_dev_get(dev
) == upper_dev
;
6337 changeupper_info
.linking
= false;
6339 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
, dev
,
6340 &changeupper_info
.info
);
6342 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
6344 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
, dev
,
6345 &changeupper_info
.info
);
6347 EXPORT_SYMBOL(netdev_upper_dev_unlink
);
6350 * netdev_bonding_info_change - Dispatch event about slave change
6352 * @bonding_info: info to dispatch
6354 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
6355 * The caller must hold the RTNL lock.
6357 void netdev_bonding_info_change(struct net_device
*dev
,
6358 struct netdev_bonding_info
*bonding_info
)
6360 struct netdev_notifier_bonding_info info
;
6362 memcpy(&info
.bonding_info
, bonding_info
,
6363 sizeof(struct netdev_bonding_info
));
6364 call_netdevice_notifiers_info(NETDEV_BONDING_INFO
, dev
,
6367 EXPORT_SYMBOL(netdev_bonding_info_change
);
6369 static void netdev_adjacent_add_links(struct net_device
*dev
)
6371 struct netdev_adjacent
*iter
;
6373 struct net
*net
= dev_net(dev
);
6375 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
6376 if (!net_eq(net
, dev_net(iter
->dev
)))
6378 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6379 &iter
->dev
->adj_list
.lower
);
6380 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
6381 &dev
->adj_list
.upper
);
6384 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
6385 if (!net_eq(net
, dev_net(iter
->dev
)))
6387 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6388 &iter
->dev
->adj_list
.upper
);
6389 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
6390 &dev
->adj_list
.lower
);
6394 static void netdev_adjacent_del_links(struct net_device
*dev
)
6396 struct netdev_adjacent
*iter
;
6398 struct net
*net
= dev_net(dev
);
6400 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
6401 if (!net_eq(net
, dev_net(iter
->dev
)))
6403 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
6404 &iter
->dev
->adj_list
.lower
);
6405 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
6406 &dev
->adj_list
.upper
);
6409 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
6410 if (!net_eq(net
, dev_net(iter
->dev
)))
6412 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
6413 &iter
->dev
->adj_list
.upper
);
6414 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
6415 &dev
->adj_list
.lower
);
6419 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
)
6421 struct netdev_adjacent
*iter
;
6423 struct net
*net
= dev_net(dev
);
6425 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
6426 if (!net_eq(net
, dev_net(iter
->dev
)))
6428 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
6429 &iter
->dev
->adj_list
.lower
);
6430 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6431 &iter
->dev
->adj_list
.lower
);
6434 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
6435 if (!net_eq(net
, dev_net(iter
->dev
)))
6437 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
6438 &iter
->dev
->adj_list
.upper
);
6439 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6440 &iter
->dev
->adj_list
.upper
);
6444 void *netdev_lower_dev_get_private(struct net_device
*dev
,
6445 struct net_device
*lower_dev
)
6447 struct netdev_adjacent
*lower
;
6451 lower
= __netdev_find_adj(lower_dev
, &dev
->adj_list
.lower
);
6455 return lower
->private;
6457 EXPORT_SYMBOL(netdev_lower_dev_get_private
);
6460 int dev_get_nest_level(struct net_device
*dev
)
6462 struct net_device
*lower
= NULL
;
6463 struct list_head
*iter
;
6469 netdev_for_each_lower_dev(dev
, lower
, iter
) {
6470 nest
= dev_get_nest_level(lower
);
6471 if (max_nest
< nest
)
6475 return max_nest
+ 1;
6477 EXPORT_SYMBOL(dev_get_nest_level
);
6480 * netdev_lower_change - Dispatch event about lower device state change
6481 * @lower_dev: device
6482 * @lower_state_info: state to dispatch
6484 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
6485 * The caller must hold the RTNL lock.
6487 void netdev_lower_state_changed(struct net_device
*lower_dev
,
6488 void *lower_state_info
)
6490 struct netdev_notifier_changelowerstate_info changelowerstate_info
;
6493 changelowerstate_info
.lower_state_info
= lower_state_info
;
6494 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE
, lower_dev
,
6495 &changelowerstate_info
.info
);
6497 EXPORT_SYMBOL(netdev_lower_state_changed
);
6499 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
6501 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6503 if (ops
->ndo_change_rx_flags
)
6504 ops
->ndo_change_rx_flags(dev
, flags
);
6507 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
, bool notify
)
6509 unsigned int old_flags
= dev
->flags
;
6515 dev
->flags
|= IFF_PROMISC
;
6516 dev
->promiscuity
+= inc
;
6517 if (dev
->promiscuity
== 0) {
6520 * If inc causes overflow, untouch promisc and return error.
6523 dev
->flags
&= ~IFF_PROMISC
;
6525 dev
->promiscuity
-= inc
;
6526 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6531 if (dev
->flags
!= old_flags
) {
6532 pr_info("device %s %s promiscuous mode\n",
6534 dev
->flags
& IFF_PROMISC
? "entered" : "left");
6535 if (audit_enabled
) {
6536 current_uid_gid(&uid
, &gid
);
6537 audit_log(current
->audit_context
, GFP_ATOMIC
,
6538 AUDIT_ANOM_PROMISCUOUS
,
6539 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6540 dev
->name
, (dev
->flags
& IFF_PROMISC
),
6541 (old_flags
& IFF_PROMISC
),
6542 from_kuid(&init_user_ns
, audit_get_loginuid(current
)),
6543 from_kuid(&init_user_ns
, uid
),
6544 from_kgid(&init_user_ns
, gid
),
6545 audit_get_sessionid(current
));
6548 dev_change_rx_flags(dev
, IFF_PROMISC
);
6551 __dev_notify_flags(dev
, old_flags
, IFF_PROMISC
);
6556 * dev_set_promiscuity - update promiscuity count on a device
6560 * Add or remove promiscuity from a device. While the count in the device
6561 * remains above zero the interface remains promiscuous. Once it hits zero
6562 * the device reverts back to normal filtering operation. A negative inc
6563 * value is used to drop promiscuity on the device.
6564 * Return 0 if successful or a negative errno code on error.
6566 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
6568 unsigned int old_flags
= dev
->flags
;
6571 err
= __dev_set_promiscuity(dev
, inc
, true);
6574 if (dev
->flags
!= old_flags
)
6575 dev_set_rx_mode(dev
);
6578 EXPORT_SYMBOL(dev_set_promiscuity
);
6580 static int __dev_set_allmulti(struct net_device
*dev
, int inc
, bool notify
)
6582 unsigned int old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
6586 dev
->flags
|= IFF_ALLMULTI
;
6587 dev
->allmulti
+= inc
;
6588 if (dev
->allmulti
== 0) {
6591 * If inc causes overflow, untouch allmulti and return error.
6594 dev
->flags
&= ~IFF_ALLMULTI
;
6596 dev
->allmulti
-= inc
;
6597 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6602 if (dev
->flags
^ old_flags
) {
6603 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
6604 dev_set_rx_mode(dev
);
6606 __dev_notify_flags(dev
, old_flags
,
6607 dev
->gflags
^ old_gflags
);
6613 * dev_set_allmulti - update allmulti count on a device
6617 * Add or remove reception of all multicast frames to a device. While the
6618 * count in the device remains above zero the interface remains listening
6619 * to all interfaces. Once it hits zero the device reverts back to normal
6620 * filtering operation. A negative @inc value is used to drop the counter
6621 * when releasing a resource needing all multicasts.
6622 * Return 0 if successful or a negative errno code on error.
6625 int dev_set_allmulti(struct net_device
*dev
, int inc
)
6627 return __dev_set_allmulti(dev
, inc
, true);
6629 EXPORT_SYMBOL(dev_set_allmulti
);
6632 * Upload unicast and multicast address lists to device and
6633 * configure RX filtering. When the device doesn't support unicast
6634 * filtering it is put in promiscuous mode while unicast addresses
6637 void __dev_set_rx_mode(struct net_device
*dev
)
6639 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6641 /* dev_open will call this function so the list will stay sane. */
6642 if (!(dev
->flags
&IFF_UP
))
6645 if (!netif_device_present(dev
))
6648 if (!(dev
->priv_flags
& IFF_UNICAST_FLT
)) {
6649 /* Unicast addresses changes may only happen under the rtnl,
6650 * therefore calling __dev_set_promiscuity here is safe.
6652 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
6653 __dev_set_promiscuity(dev
, 1, false);
6654 dev
->uc_promisc
= true;
6655 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
6656 __dev_set_promiscuity(dev
, -1, false);
6657 dev
->uc_promisc
= false;
6661 if (ops
->ndo_set_rx_mode
)
6662 ops
->ndo_set_rx_mode(dev
);
6665 void dev_set_rx_mode(struct net_device
*dev
)
6667 netif_addr_lock_bh(dev
);
6668 __dev_set_rx_mode(dev
);
6669 netif_addr_unlock_bh(dev
);
6673 * dev_get_flags - get flags reported to userspace
6676 * Get the combination of flag bits exported through APIs to userspace.
6678 unsigned int dev_get_flags(const struct net_device
*dev
)
6682 flags
= (dev
->flags
& ~(IFF_PROMISC
|
6687 (dev
->gflags
& (IFF_PROMISC
|
6690 if (netif_running(dev
)) {
6691 if (netif_oper_up(dev
))
6692 flags
|= IFF_RUNNING
;
6693 if (netif_carrier_ok(dev
))
6694 flags
|= IFF_LOWER_UP
;
6695 if (netif_dormant(dev
))
6696 flags
|= IFF_DORMANT
;
6701 EXPORT_SYMBOL(dev_get_flags
);
6703 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
)
6705 unsigned int old_flags
= dev
->flags
;
6711 * Set the flags on our device.
6714 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
6715 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
6717 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
6721 * Load in the correct multicast list now the flags have changed.
6724 if ((old_flags
^ flags
) & IFF_MULTICAST
)
6725 dev_change_rx_flags(dev
, IFF_MULTICAST
);
6727 dev_set_rx_mode(dev
);
6730 * Have we downed the interface. We handle IFF_UP ourselves
6731 * according to user attempts to set it, rather than blindly
6736 if ((old_flags
^ flags
) & IFF_UP
) {
6737 if (old_flags
& IFF_UP
)
6740 ret
= __dev_open(dev
);
6743 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
6744 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
6745 unsigned int old_flags
= dev
->flags
;
6747 dev
->gflags
^= IFF_PROMISC
;
6749 if (__dev_set_promiscuity(dev
, inc
, false) >= 0)
6750 if (dev
->flags
!= old_flags
)
6751 dev_set_rx_mode(dev
);
6754 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
6755 * is important. Some (broken) drivers set IFF_PROMISC, when
6756 * IFF_ALLMULTI is requested not asking us and not reporting.
6758 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
6759 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
6761 dev
->gflags
^= IFF_ALLMULTI
;
6762 __dev_set_allmulti(dev
, inc
, false);
6768 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
,
6769 unsigned int gchanges
)
6771 unsigned int changes
= dev
->flags
^ old_flags
;
6774 rtmsg_ifinfo(RTM_NEWLINK
, dev
, gchanges
, GFP_ATOMIC
);
6776 if (changes
& IFF_UP
) {
6777 if (dev
->flags
& IFF_UP
)
6778 call_netdevice_notifiers(NETDEV_UP
, dev
);
6780 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
6783 if (dev
->flags
& IFF_UP
&&
6784 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
))) {
6785 struct netdev_notifier_change_info change_info
;
6787 change_info
.flags_changed
= changes
;
6788 call_netdevice_notifiers_info(NETDEV_CHANGE
, dev
,
6794 * dev_change_flags - change device settings
6796 * @flags: device state flags
6798 * Change settings on device based state flags. The flags are
6799 * in the userspace exported format.
6801 int dev_change_flags(struct net_device
*dev
, unsigned int flags
)
6804 unsigned int changes
, old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
6806 ret
= __dev_change_flags(dev
, flags
);
6810 changes
= (old_flags
^ dev
->flags
) | (old_gflags
^ dev
->gflags
);
6811 __dev_notify_flags(dev
, old_flags
, changes
);
6814 EXPORT_SYMBOL(dev_change_flags
);
6816 int __dev_set_mtu(struct net_device
*dev
, int new_mtu
)
6818 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6820 if (ops
->ndo_change_mtu
)
6821 return ops
->ndo_change_mtu(dev
, new_mtu
);
6826 EXPORT_SYMBOL(__dev_set_mtu
);
6829 * dev_set_mtu - Change maximum transfer unit
6831 * @new_mtu: new transfer unit
6833 * Change the maximum transfer size of the network device.
6835 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
6839 if (new_mtu
== dev
->mtu
)
6842 /* MTU must be positive, and in range */
6843 if (new_mtu
< 0 || new_mtu
< dev
->min_mtu
) {
6844 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
6845 dev
->name
, new_mtu
, dev
->min_mtu
);
6849 if (dev
->max_mtu
> 0 && new_mtu
> dev
->max_mtu
) {
6850 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
6851 dev
->name
, new_mtu
, dev
->max_mtu
);
6855 if (!netif_device_present(dev
))
6858 err
= call_netdevice_notifiers(NETDEV_PRECHANGEMTU
, dev
);
6859 err
= notifier_to_errno(err
);
6863 orig_mtu
= dev
->mtu
;
6864 err
= __dev_set_mtu(dev
, new_mtu
);
6867 err
= call_netdevice_notifiers(NETDEV_CHANGEMTU
, dev
);
6868 err
= notifier_to_errno(err
);
6870 /* setting mtu back and notifying everyone again,
6871 * so that they have a chance to revert changes.
6873 __dev_set_mtu(dev
, orig_mtu
);
6874 call_netdevice_notifiers(NETDEV_CHANGEMTU
, dev
);
6879 EXPORT_SYMBOL(dev_set_mtu
);
6882 * dev_set_group - Change group this device belongs to
6884 * @new_group: group this device should belong to
6886 void dev_set_group(struct net_device
*dev
, int new_group
)
6888 dev
->group
= new_group
;
6890 EXPORT_SYMBOL(dev_set_group
);
6893 * dev_set_mac_address - Change Media Access Control Address
6897 * Change the hardware (MAC) address of the device
6899 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
)
6901 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6904 if (!ops
->ndo_set_mac_address
)
6906 if (sa
->sa_family
!= dev
->type
)
6908 if (!netif_device_present(dev
))
6910 err
= ops
->ndo_set_mac_address(dev
, sa
);
6913 dev
->addr_assign_type
= NET_ADDR_SET
;
6914 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
6915 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
6918 EXPORT_SYMBOL(dev_set_mac_address
);
6921 * dev_change_carrier - Change device carrier
6923 * @new_carrier: new value
6925 * Change device carrier
6927 int dev_change_carrier(struct net_device
*dev
, bool new_carrier
)
6929 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6931 if (!ops
->ndo_change_carrier
)
6933 if (!netif_device_present(dev
))
6935 return ops
->ndo_change_carrier(dev
, new_carrier
);
6937 EXPORT_SYMBOL(dev_change_carrier
);
6940 * dev_get_phys_port_id - Get device physical port ID
6944 * Get device physical port ID
6946 int dev_get_phys_port_id(struct net_device
*dev
,
6947 struct netdev_phys_item_id
*ppid
)
6949 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6951 if (!ops
->ndo_get_phys_port_id
)
6953 return ops
->ndo_get_phys_port_id(dev
, ppid
);
6955 EXPORT_SYMBOL(dev_get_phys_port_id
);
6958 * dev_get_phys_port_name - Get device physical port name
6961 * @len: limit of bytes to copy to name
6963 * Get device physical port name
6965 int dev_get_phys_port_name(struct net_device
*dev
,
6966 char *name
, size_t len
)
6968 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6970 if (!ops
->ndo_get_phys_port_name
)
6972 return ops
->ndo_get_phys_port_name(dev
, name
, len
);
6974 EXPORT_SYMBOL(dev_get_phys_port_name
);
6977 * dev_change_proto_down - update protocol port state information
6979 * @proto_down: new value
6981 * This info can be used by switch drivers to set the phys state of the
6984 int dev_change_proto_down(struct net_device
*dev
, bool proto_down
)
6986 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6988 if (!ops
->ndo_change_proto_down
)
6990 if (!netif_device_present(dev
))
6992 return ops
->ndo_change_proto_down(dev
, proto_down
);
6994 EXPORT_SYMBOL(dev_change_proto_down
);
6996 u8
__dev_xdp_attached(struct net_device
*dev
, xdp_op_t xdp_op
, u32
*prog_id
)
6998 struct netdev_xdp xdp
;
7000 memset(&xdp
, 0, sizeof(xdp
));
7001 xdp
.command
= XDP_QUERY_PROG
;
7003 /* Query must always succeed. */
7004 WARN_ON(xdp_op(dev
, &xdp
) < 0);
7006 *prog_id
= xdp
.prog_id
;
7008 return xdp
.prog_attached
;
7011 static int dev_xdp_install(struct net_device
*dev
, xdp_op_t xdp_op
,
7012 struct netlink_ext_ack
*extack
, u32 flags
,
7013 struct bpf_prog
*prog
)
7015 struct netdev_xdp xdp
;
7017 memset(&xdp
, 0, sizeof(xdp
));
7018 if (flags
& XDP_FLAGS_HW_MODE
)
7019 xdp
.command
= XDP_SETUP_PROG_HW
;
7021 xdp
.command
= XDP_SETUP_PROG
;
7022 xdp
.extack
= extack
;
7026 return xdp_op(dev
, &xdp
);
7030 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
7032 * @extack: netlink extended ack
7033 * @fd: new program fd or negative value to clear
7034 * @flags: xdp-related flags
7036 * Set or clear a bpf program for a device
7038 int dev_change_xdp_fd(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
7041 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7042 struct bpf_prog
*prog
= NULL
;
7043 xdp_op_t xdp_op
, xdp_chk
;
7048 xdp_op
= xdp_chk
= ops
->ndo_xdp
;
7049 if (!xdp_op
&& (flags
& (XDP_FLAGS_DRV_MODE
| XDP_FLAGS_HW_MODE
)))
7051 if (!xdp_op
|| (flags
& XDP_FLAGS_SKB_MODE
))
7052 xdp_op
= generic_xdp_install
;
7053 if (xdp_op
== xdp_chk
)
7054 xdp_chk
= generic_xdp_install
;
7057 if (xdp_chk
&& __dev_xdp_attached(dev
, xdp_chk
, NULL
))
7059 if ((flags
& XDP_FLAGS_UPDATE_IF_NOEXIST
) &&
7060 __dev_xdp_attached(dev
, xdp_op
, NULL
))
7063 prog
= bpf_prog_get_type(fd
, BPF_PROG_TYPE_XDP
);
7065 return PTR_ERR(prog
);
7068 err
= dev_xdp_install(dev
, xdp_op
, extack
, flags
, prog
);
7069 if (err
< 0 && prog
)
7076 * dev_new_index - allocate an ifindex
7077 * @net: the applicable net namespace
7079 * Returns a suitable unique value for a new device interface
7080 * number. The caller must hold the rtnl semaphore or the
7081 * dev_base_lock to be sure it remains unique.
7083 static int dev_new_index(struct net
*net
)
7085 int ifindex
= net
->ifindex
;
7090 if (!__dev_get_by_index(net
, ifindex
))
7091 return net
->ifindex
= ifindex
;
7095 /* Delayed registration/unregisteration */
7096 static LIST_HEAD(net_todo_list
);
7097 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq
);
7099 static void net_set_todo(struct net_device
*dev
)
7101 list_add_tail(&dev
->todo_list
, &net_todo_list
);
7102 dev_net(dev
)->dev_unreg_count
++;
7105 static void rollback_registered_many(struct list_head
*head
)
7107 struct net_device
*dev
, *tmp
;
7108 LIST_HEAD(close_head
);
7110 BUG_ON(dev_boot_phase
);
7113 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
7114 /* Some devices call without registering
7115 * for initialization unwind. Remove those
7116 * devices and proceed with the remaining.
7118 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
7119 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
7123 list_del(&dev
->unreg_list
);
7126 dev
->dismantle
= true;
7127 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
7130 /* If device is running, close it first. */
7131 list_for_each_entry(dev
, head
, unreg_list
)
7132 list_add_tail(&dev
->close_list
, &close_head
);
7133 dev_close_many(&close_head
, true);
7135 list_for_each_entry(dev
, head
, unreg_list
) {
7136 /* And unlink it from device chain. */
7137 unlist_netdevice(dev
);
7139 dev
->reg_state
= NETREG_UNREGISTERING
;
7141 flush_all_backlogs();
7145 list_for_each_entry(dev
, head
, unreg_list
) {
7146 struct sk_buff
*skb
= NULL
;
7148 /* Shutdown queueing discipline. */
7152 /* Notify protocols, that we are about to destroy
7153 * this device. They should clean all the things.
7155 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
7157 if (!dev
->rtnl_link_ops
||
7158 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
7159 skb
= rtmsg_ifinfo_build_skb(RTM_DELLINK
, dev
, ~0U, 0,
7163 * Flush the unicast and multicast chains
7168 if (dev
->netdev_ops
->ndo_uninit
)
7169 dev
->netdev_ops
->ndo_uninit(dev
);
7172 rtmsg_ifinfo_send(skb
, dev
, GFP_KERNEL
);
7174 /* Notifier chain MUST detach us all upper devices. */
7175 WARN_ON(netdev_has_any_upper_dev(dev
));
7176 WARN_ON(netdev_has_any_lower_dev(dev
));
7178 /* Remove entries from kobject tree */
7179 netdev_unregister_kobject(dev
);
7181 /* Remove XPS queueing entries */
7182 netif_reset_xps_queues_gt(dev
, 0);
7188 list_for_each_entry(dev
, head
, unreg_list
)
7192 static void rollback_registered(struct net_device
*dev
)
7196 list_add(&dev
->unreg_list
, &single
);
7197 rollback_registered_many(&single
);
7201 static netdev_features_t
netdev_sync_upper_features(struct net_device
*lower
,
7202 struct net_device
*upper
, netdev_features_t features
)
7204 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
7205 netdev_features_t feature
;
7208 for_each_netdev_feature(&upper_disables
, feature_bit
) {
7209 feature
= __NETIF_F_BIT(feature_bit
);
7210 if (!(upper
->wanted_features
& feature
)
7211 && (features
& feature
)) {
7212 netdev_dbg(lower
, "Dropping feature %pNF, upper dev %s has it off.\n",
7213 &feature
, upper
->name
);
7214 features
&= ~feature
;
7221 static void netdev_sync_lower_features(struct net_device
*upper
,
7222 struct net_device
*lower
, netdev_features_t features
)
7224 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
7225 netdev_features_t feature
;
7228 for_each_netdev_feature(&upper_disables
, feature_bit
) {
7229 feature
= __NETIF_F_BIT(feature_bit
);
7230 if (!(features
& feature
) && (lower
->features
& feature
)) {
7231 netdev_dbg(upper
, "Disabling feature %pNF on lower dev %s.\n",
7232 &feature
, lower
->name
);
7233 lower
->wanted_features
&= ~feature
;
7234 netdev_update_features(lower
);
7236 if (unlikely(lower
->features
& feature
))
7237 netdev_WARN(upper
, "failed to disable %pNF on %s!\n",
7238 &feature
, lower
->name
);
7243 static netdev_features_t
netdev_fix_features(struct net_device
*dev
,
7244 netdev_features_t features
)
7246 /* Fix illegal checksum combinations */
7247 if ((features
& NETIF_F_HW_CSUM
) &&
7248 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
7249 netdev_warn(dev
, "mixed HW and IP checksum settings.\n");
7250 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
7253 /* TSO requires that SG is present as well. */
7254 if ((features
& NETIF_F_ALL_TSO
) && !(features
& NETIF_F_SG
)) {
7255 netdev_dbg(dev
, "Dropping TSO features since no SG feature.\n");
7256 features
&= ~NETIF_F_ALL_TSO
;
7259 if ((features
& NETIF_F_TSO
) && !(features
& NETIF_F_HW_CSUM
) &&
7260 !(features
& NETIF_F_IP_CSUM
)) {
7261 netdev_dbg(dev
, "Dropping TSO features since no CSUM feature.\n");
7262 features
&= ~NETIF_F_TSO
;
7263 features
&= ~NETIF_F_TSO_ECN
;
7266 if ((features
& NETIF_F_TSO6
) && !(features
& NETIF_F_HW_CSUM
) &&
7267 !(features
& NETIF_F_IPV6_CSUM
)) {
7268 netdev_dbg(dev
, "Dropping TSO6 features since no CSUM feature.\n");
7269 features
&= ~NETIF_F_TSO6
;
7272 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
7273 if ((features
& NETIF_F_TSO_MANGLEID
) && !(features
& NETIF_F_TSO
))
7274 features
&= ~NETIF_F_TSO_MANGLEID
;
7276 /* TSO ECN requires that TSO is present as well. */
7277 if ((features
& NETIF_F_ALL_TSO
) == NETIF_F_TSO_ECN
)
7278 features
&= ~NETIF_F_TSO_ECN
;
7280 /* Software GSO depends on SG. */
7281 if ((features
& NETIF_F_GSO
) && !(features
& NETIF_F_SG
)) {
7282 netdev_dbg(dev
, "Dropping NETIF_F_GSO since no SG feature.\n");
7283 features
&= ~NETIF_F_GSO
;
7286 /* GSO partial features require GSO partial be set */
7287 if ((features
& dev
->gso_partial_features
) &&
7288 !(features
& NETIF_F_GSO_PARTIAL
)) {
7290 "Dropping partially supported GSO features since no GSO partial.\n");
7291 features
&= ~dev
->gso_partial_features
;
7297 int __netdev_update_features(struct net_device
*dev
)
7299 struct net_device
*upper
, *lower
;
7300 netdev_features_t features
;
7301 struct list_head
*iter
;
7306 features
= netdev_get_wanted_features(dev
);
7308 if (dev
->netdev_ops
->ndo_fix_features
)
7309 features
= dev
->netdev_ops
->ndo_fix_features(dev
, features
);
7311 /* driver might be less strict about feature dependencies */
7312 features
= netdev_fix_features(dev
, features
);
7314 /* some features can't be enabled if they're off an an upper device */
7315 netdev_for_each_upper_dev_rcu(dev
, upper
, iter
)
7316 features
= netdev_sync_upper_features(dev
, upper
, features
);
7318 if (dev
->features
== features
)
7321 netdev_dbg(dev
, "Features changed: %pNF -> %pNF\n",
7322 &dev
->features
, &features
);
7324 if (dev
->netdev_ops
->ndo_set_features
)
7325 err
= dev
->netdev_ops
->ndo_set_features(dev
, features
);
7329 if (unlikely(err
< 0)) {
7331 "set_features() failed (%d); wanted %pNF, left %pNF\n",
7332 err
, &features
, &dev
->features
);
7333 /* return non-0 since some features might have changed and
7334 * it's better to fire a spurious notification than miss it
7340 /* some features must be disabled on lower devices when disabled
7341 * on an upper device (think: bonding master or bridge)
7343 netdev_for_each_lower_dev(dev
, lower
, iter
)
7344 netdev_sync_lower_features(dev
, lower
, features
);
7347 netdev_features_t diff
= features
^ dev
->features
;
7349 if (diff
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
7350 /* udp_tunnel_{get,drop}_rx_info both need
7351 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
7352 * device, or they won't do anything.
7353 * Thus we need to update dev->features
7354 * *before* calling udp_tunnel_get_rx_info,
7355 * but *after* calling udp_tunnel_drop_rx_info.
7357 if (features
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
7358 dev
->features
= features
;
7359 udp_tunnel_get_rx_info(dev
);
7361 udp_tunnel_drop_rx_info(dev
);
7365 dev
->features
= features
;
7368 return err
< 0 ? 0 : 1;
7372 * netdev_update_features - recalculate device features
7373 * @dev: the device to check
7375 * Recalculate dev->features set and send notifications if it
7376 * has changed. Should be called after driver or hardware dependent
7377 * conditions might have changed that influence the features.
7379 void netdev_update_features(struct net_device
*dev
)
7381 if (__netdev_update_features(dev
))
7382 netdev_features_change(dev
);
7384 EXPORT_SYMBOL(netdev_update_features
);
7387 * netdev_change_features - recalculate device features
7388 * @dev: the device to check
7390 * Recalculate dev->features set and send notifications even
7391 * if they have not changed. Should be called instead of
7392 * netdev_update_features() if also dev->vlan_features might
7393 * have changed to allow the changes to be propagated to stacked
7396 void netdev_change_features(struct net_device
*dev
)
7398 __netdev_update_features(dev
);
7399 netdev_features_change(dev
);
7401 EXPORT_SYMBOL(netdev_change_features
);
7404 * netif_stacked_transfer_operstate - transfer operstate
7405 * @rootdev: the root or lower level device to transfer state from
7406 * @dev: the device to transfer operstate to
7408 * Transfer operational state from root to device. This is normally
7409 * called when a stacking relationship exists between the root
7410 * device and the device(a leaf device).
7412 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
7413 struct net_device
*dev
)
7415 if (rootdev
->operstate
== IF_OPER_DORMANT
)
7416 netif_dormant_on(dev
);
7418 netif_dormant_off(dev
);
7420 if (netif_carrier_ok(rootdev
))
7421 netif_carrier_on(dev
);
7423 netif_carrier_off(dev
);
7425 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
7428 static int netif_alloc_rx_queues(struct net_device
*dev
)
7430 unsigned int i
, count
= dev
->num_rx_queues
;
7431 struct netdev_rx_queue
*rx
;
7432 size_t sz
= count
* sizeof(*rx
);
7436 rx
= kvzalloc(sz
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
7442 for (i
= 0; i
< count
; i
++)
7448 static void netdev_init_one_queue(struct net_device
*dev
,
7449 struct netdev_queue
*queue
, void *_unused
)
7451 /* Initialize queue lock */
7452 spin_lock_init(&queue
->_xmit_lock
);
7453 netdev_set_xmit_lockdep_class(&queue
->_xmit_lock
, dev
->type
);
7454 queue
->xmit_lock_owner
= -1;
7455 netdev_queue_numa_node_write(queue
, NUMA_NO_NODE
);
7458 dql_init(&queue
->dql
, HZ
);
7462 static void netif_free_tx_queues(struct net_device
*dev
)
7467 static int netif_alloc_netdev_queues(struct net_device
*dev
)
7469 unsigned int count
= dev
->num_tx_queues
;
7470 struct netdev_queue
*tx
;
7471 size_t sz
= count
* sizeof(*tx
);
7473 if (count
< 1 || count
> 0xffff)
7476 tx
= kvzalloc(sz
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
7482 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
7483 spin_lock_init(&dev
->tx_global_lock
);
7488 void netif_tx_stop_all_queues(struct net_device
*dev
)
7492 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
7493 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
7495 netif_tx_stop_queue(txq
);
7498 EXPORT_SYMBOL(netif_tx_stop_all_queues
);
7501 * register_netdevice - register a network device
7502 * @dev: device to register
7504 * Take a completed network device structure and add it to the kernel
7505 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7506 * chain. 0 is returned on success. A negative errno code is returned
7507 * on a failure to set up the device, or if the name is a duplicate.
7509 * Callers must hold the rtnl semaphore. You may want
7510 * register_netdev() instead of this.
7513 * The locking appears insufficient to guarantee two parallel registers
7514 * will not get the same name.
7517 int register_netdevice(struct net_device
*dev
)
7520 struct net
*net
= dev_net(dev
);
7522 BUG_ON(dev_boot_phase
);
7527 /* When net_device's are persistent, this will be fatal. */
7528 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
7531 spin_lock_init(&dev
->addr_list_lock
);
7532 netdev_set_addr_lockdep_class(dev
);
7534 ret
= dev_get_valid_name(net
, dev
, dev
->name
);
7538 /* Init, if this function is available */
7539 if (dev
->netdev_ops
->ndo_init
) {
7540 ret
= dev
->netdev_ops
->ndo_init(dev
);
7548 if (((dev
->hw_features
| dev
->features
) &
7549 NETIF_F_HW_VLAN_CTAG_FILTER
) &&
7550 (!dev
->netdev_ops
->ndo_vlan_rx_add_vid
||
7551 !dev
->netdev_ops
->ndo_vlan_rx_kill_vid
)) {
7552 netdev_WARN(dev
, "Buggy VLAN acceleration in driver!\n");
7559 dev
->ifindex
= dev_new_index(net
);
7560 else if (__dev_get_by_index(net
, dev
->ifindex
))
7563 /* Transfer changeable features to wanted_features and enable
7564 * software offloads (GSO and GRO).
7566 dev
->hw_features
|= NETIF_F_SOFT_FEATURES
;
7567 dev
->features
|= NETIF_F_SOFT_FEATURES
;
7569 if (dev
->netdev_ops
->ndo_udp_tunnel_add
) {
7570 dev
->features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
7571 dev
->hw_features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
7574 dev
->wanted_features
= dev
->features
& dev
->hw_features
;
7576 if (!(dev
->flags
& IFF_LOOPBACK
))
7577 dev
->hw_features
|= NETIF_F_NOCACHE_COPY
;
7579 /* If IPv4 TCP segmentation offload is supported we should also
7580 * allow the device to enable segmenting the frame with the option
7581 * of ignoring a static IP ID value. This doesn't enable the
7582 * feature itself but allows the user to enable it later.
7584 if (dev
->hw_features
& NETIF_F_TSO
)
7585 dev
->hw_features
|= NETIF_F_TSO_MANGLEID
;
7586 if (dev
->vlan_features
& NETIF_F_TSO
)
7587 dev
->vlan_features
|= NETIF_F_TSO_MANGLEID
;
7588 if (dev
->mpls_features
& NETIF_F_TSO
)
7589 dev
->mpls_features
|= NETIF_F_TSO_MANGLEID
;
7590 if (dev
->hw_enc_features
& NETIF_F_TSO
)
7591 dev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
7593 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
7595 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
7597 /* Make NETIF_F_SG inheritable to tunnel devices.
7599 dev
->hw_enc_features
|= NETIF_F_SG
| NETIF_F_GSO_PARTIAL
;
7601 /* Make NETIF_F_SG inheritable to MPLS.
7603 dev
->mpls_features
|= NETIF_F_SG
;
7605 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
7606 ret
= notifier_to_errno(ret
);
7610 ret
= netdev_register_kobject(dev
);
7613 dev
->reg_state
= NETREG_REGISTERED
;
7615 __netdev_update_features(dev
);
7618 * Default initial state at registry is that the
7619 * device is present.
7622 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
7624 linkwatch_init_dev(dev
);
7626 dev_init_scheduler(dev
);
7628 list_netdevice(dev
);
7629 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
7631 /* If the device has permanent device address, driver should
7632 * set dev_addr and also addr_assign_type should be set to
7633 * NET_ADDR_PERM (default value).
7635 if (dev
->addr_assign_type
== NET_ADDR_PERM
)
7636 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
7638 /* Notify protocols, that a new device appeared. */
7639 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
7640 ret
= notifier_to_errno(ret
);
7642 rollback_registered(dev
);
7643 dev
->reg_state
= NETREG_UNREGISTERED
;
7646 * Prevent userspace races by waiting until the network
7647 * device is fully setup before sending notifications.
7649 if (!dev
->rtnl_link_ops
||
7650 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
7651 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
7657 if (dev
->netdev_ops
->ndo_uninit
)
7658 dev
->netdev_ops
->ndo_uninit(dev
);
7659 if (dev
->priv_destructor
)
7660 dev
->priv_destructor(dev
);
7663 EXPORT_SYMBOL(register_netdevice
);
7666 * init_dummy_netdev - init a dummy network device for NAPI
7667 * @dev: device to init
7669 * This takes a network device structure and initialize the minimum
7670 * amount of fields so it can be used to schedule NAPI polls without
7671 * registering a full blown interface. This is to be used by drivers
7672 * that need to tie several hardware interfaces to a single NAPI
7673 * poll scheduler due to HW limitations.
7675 int init_dummy_netdev(struct net_device
*dev
)
7677 /* Clear everything. Note we don't initialize spinlocks
7678 * are they aren't supposed to be taken by any of the
7679 * NAPI code and this dummy netdev is supposed to be
7680 * only ever used for NAPI polls
7682 memset(dev
, 0, sizeof(struct net_device
));
7684 /* make sure we BUG if trying to hit standard
7685 * register/unregister code path
7687 dev
->reg_state
= NETREG_DUMMY
;
7689 /* NAPI wants this */
7690 INIT_LIST_HEAD(&dev
->napi_list
);
7692 /* a dummy interface is started by default */
7693 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
7694 set_bit(__LINK_STATE_START
, &dev
->state
);
7696 /* Note : We dont allocate pcpu_refcnt for dummy devices,
7697 * because users of this 'device' dont need to change
7703 EXPORT_SYMBOL_GPL(init_dummy_netdev
);
7707 * register_netdev - register a network device
7708 * @dev: device to register
7710 * Take a completed network device structure and add it to the kernel
7711 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7712 * chain. 0 is returned on success. A negative errno code is returned
7713 * on a failure to set up the device, or if the name is a duplicate.
7715 * This is a wrapper around register_netdevice that takes the rtnl semaphore
7716 * and expands the device name if you passed a format string to
7719 int register_netdev(struct net_device
*dev
)
7724 err
= register_netdevice(dev
);
7728 EXPORT_SYMBOL(register_netdev
);
7730 int netdev_refcnt_read(const struct net_device
*dev
)
7734 for_each_possible_cpu(i
)
7735 refcnt
+= *per_cpu_ptr(dev
->pcpu_refcnt
, i
);
7738 EXPORT_SYMBOL(netdev_refcnt_read
);
7741 * netdev_wait_allrefs - wait until all references are gone.
7742 * @dev: target net_device
7744 * This is called when unregistering network devices.
7746 * Any protocol or device that holds a reference should register
7747 * for netdevice notification, and cleanup and put back the
7748 * reference if they receive an UNREGISTER event.
7749 * We can get stuck here if buggy protocols don't correctly
7752 static void netdev_wait_allrefs(struct net_device
*dev
)
7754 unsigned long rebroadcast_time
, warning_time
;
7757 linkwatch_forget_dev(dev
);
7759 rebroadcast_time
= warning_time
= jiffies
;
7760 refcnt
= netdev_refcnt_read(dev
);
7762 while (refcnt
!= 0) {
7763 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
7766 /* Rebroadcast unregister notification */
7767 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
7773 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
7774 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
7776 /* We must not have linkwatch events
7777 * pending on unregister. If this
7778 * happens, we simply run the queue
7779 * unscheduled, resulting in a noop
7782 linkwatch_run_queue();
7787 rebroadcast_time
= jiffies
;
7792 refcnt
= netdev_refcnt_read(dev
);
7794 if (time_after(jiffies
, warning_time
+ 10 * HZ
)) {
7795 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
7797 warning_time
= jiffies
;
7806 * register_netdevice(x1);
7807 * register_netdevice(x2);
7809 * unregister_netdevice(y1);
7810 * unregister_netdevice(y2);
7816 * We are invoked by rtnl_unlock().
7817 * This allows us to deal with problems:
7818 * 1) We can delete sysfs objects which invoke hotplug
7819 * without deadlocking with linkwatch via keventd.
7820 * 2) Since we run with the RTNL semaphore not held, we can sleep
7821 * safely in order to wait for the netdev refcnt to drop to zero.
7823 * We must not return until all unregister events added during
7824 * the interval the lock was held have been completed.
7826 void netdev_run_todo(void)
7828 struct list_head list
;
7830 /* Snapshot list, allow later requests */
7831 list_replace_init(&net_todo_list
, &list
);
7836 /* Wait for rcu callbacks to finish before next phase */
7837 if (!list_empty(&list
))
7840 while (!list_empty(&list
)) {
7841 struct net_device
*dev
7842 = list_first_entry(&list
, struct net_device
, todo_list
);
7843 list_del(&dev
->todo_list
);
7846 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
7849 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
7850 pr_err("network todo '%s' but state %d\n",
7851 dev
->name
, dev
->reg_state
);
7856 dev
->reg_state
= NETREG_UNREGISTERED
;
7858 netdev_wait_allrefs(dev
);
7861 BUG_ON(netdev_refcnt_read(dev
));
7862 BUG_ON(!list_empty(&dev
->ptype_all
));
7863 BUG_ON(!list_empty(&dev
->ptype_specific
));
7864 WARN_ON(rcu_access_pointer(dev
->ip_ptr
));
7865 WARN_ON(rcu_access_pointer(dev
->ip6_ptr
));
7866 WARN_ON(dev
->dn_ptr
);
7868 if (dev
->priv_destructor
)
7869 dev
->priv_destructor(dev
);
7870 if (dev
->needs_free_netdev
)
7873 /* Report a network device has been unregistered */
7875 dev_net(dev
)->dev_unreg_count
--;
7877 wake_up(&netdev_unregistering_wq
);
7879 /* Free network device */
7880 kobject_put(&dev
->dev
.kobj
);
7884 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
7885 * all the same fields in the same order as net_device_stats, with only
7886 * the type differing, but rtnl_link_stats64 may have additional fields
7887 * at the end for newer counters.
7889 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
7890 const struct net_device_stats
*netdev_stats
)
7892 #if BITS_PER_LONG == 64
7893 BUILD_BUG_ON(sizeof(*stats64
) < sizeof(*netdev_stats
));
7894 memcpy(stats64
, netdev_stats
, sizeof(*netdev_stats
));
7895 /* zero out counters that only exist in rtnl_link_stats64 */
7896 memset((char *)stats64
+ sizeof(*netdev_stats
), 0,
7897 sizeof(*stats64
) - sizeof(*netdev_stats
));
7899 size_t i
, n
= sizeof(*netdev_stats
) / sizeof(unsigned long);
7900 const unsigned long *src
= (const unsigned long *)netdev_stats
;
7901 u64
*dst
= (u64
*)stats64
;
7903 BUILD_BUG_ON(n
> sizeof(*stats64
) / sizeof(u64
));
7904 for (i
= 0; i
< n
; i
++)
7906 /* zero out counters that only exist in rtnl_link_stats64 */
7907 memset((char *)stats64
+ n
* sizeof(u64
), 0,
7908 sizeof(*stats64
) - n
* sizeof(u64
));
7911 EXPORT_SYMBOL(netdev_stats_to_stats64
);
7914 * dev_get_stats - get network device statistics
7915 * @dev: device to get statistics from
7916 * @storage: place to store stats
7918 * Get network statistics from device. Return @storage.
7919 * The device driver may provide its own method by setting
7920 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
7921 * otherwise the internal statistics structure is used.
7923 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
7924 struct rtnl_link_stats64
*storage
)
7926 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7928 if (ops
->ndo_get_stats64
) {
7929 memset(storage
, 0, sizeof(*storage
));
7930 ops
->ndo_get_stats64(dev
, storage
);
7931 } else if (ops
->ndo_get_stats
) {
7932 netdev_stats_to_stats64(storage
, ops
->ndo_get_stats(dev
));
7934 netdev_stats_to_stats64(storage
, &dev
->stats
);
7936 storage
->rx_dropped
+= (unsigned long)atomic_long_read(&dev
->rx_dropped
);
7937 storage
->tx_dropped
+= (unsigned long)atomic_long_read(&dev
->tx_dropped
);
7938 storage
->rx_nohandler
+= (unsigned long)atomic_long_read(&dev
->rx_nohandler
);
7941 EXPORT_SYMBOL(dev_get_stats
);
7943 struct netdev_queue
*dev_ingress_queue_create(struct net_device
*dev
)
7945 struct netdev_queue
*queue
= dev_ingress_queue(dev
);
7947 #ifdef CONFIG_NET_CLS_ACT
7950 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
7953 netdev_init_one_queue(dev
, queue
, NULL
);
7954 RCU_INIT_POINTER(queue
->qdisc
, &noop_qdisc
);
7955 queue
->qdisc_sleeping
= &noop_qdisc
;
7956 rcu_assign_pointer(dev
->ingress_queue
, queue
);
7961 static const struct ethtool_ops default_ethtool_ops
;
7963 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
7964 const struct ethtool_ops
*ops
)
7966 if (dev
->ethtool_ops
== &default_ethtool_ops
)
7967 dev
->ethtool_ops
= ops
;
7969 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops
);
7971 void netdev_freemem(struct net_device
*dev
)
7973 char *addr
= (char *)dev
- dev
->padded
;
7979 * alloc_netdev_mqs - allocate network device
7980 * @sizeof_priv: size of private data to allocate space for
7981 * @name: device name format string
7982 * @name_assign_type: origin of device name
7983 * @setup: callback to initialize device
7984 * @txqs: the number of TX subqueues to allocate
7985 * @rxqs: the number of RX subqueues to allocate
7987 * Allocates a struct net_device with private data area for driver use
7988 * and performs basic initialization. Also allocates subqueue structs
7989 * for each queue on the device.
7991 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
7992 unsigned char name_assign_type
,
7993 void (*setup
)(struct net_device
*),
7994 unsigned int txqs
, unsigned int rxqs
)
7996 struct net_device
*dev
;
7998 struct net_device
*p
;
8000 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
8003 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
8009 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
8014 alloc_size
= sizeof(struct net_device
);
8016 /* ensure 32-byte alignment of private area */
8017 alloc_size
= ALIGN(alloc_size
, NETDEV_ALIGN
);
8018 alloc_size
+= sizeof_priv
;
8020 /* ensure 32-byte alignment of whole construct */
8021 alloc_size
+= NETDEV_ALIGN
- 1;
8023 p
= kvzalloc(alloc_size
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
8027 dev
= PTR_ALIGN(p
, NETDEV_ALIGN
);
8028 dev
->padded
= (char *)dev
- (char *)p
;
8030 dev
->pcpu_refcnt
= alloc_percpu(int);
8031 if (!dev
->pcpu_refcnt
)
8034 if (dev_addr_init(dev
))
8040 dev_net_set(dev
, &init_net
);
8042 dev
->gso_max_size
= GSO_MAX_SIZE
;
8043 dev
->gso_max_segs
= GSO_MAX_SEGS
;
8045 INIT_LIST_HEAD(&dev
->napi_list
);
8046 INIT_LIST_HEAD(&dev
->unreg_list
);
8047 INIT_LIST_HEAD(&dev
->close_list
);
8048 INIT_LIST_HEAD(&dev
->link_watch_list
);
8049 INIT_LIST_HEAD(&dev
->adj_list
.upper
);
8050 INIT_LIST_HEAD(&dev
->adj_list
.lower
);
8051 INIT_LIST_HEAD(&dev
->ptype_all
);
8052 INIT_LIST_HEAD(&dev
->ptype_specific
);
8053 #ifdef CONFIG_NET_SCHED
8054 hash_init(dev
->qdisc_hash
);
8056 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
;
8059 if (!dev
->tx_queue_len
) {
8060 dev
->priv_flags
|= IFF_NO_QUEUE
;
8061 dev
->tx_queue_len
= DEFAULT_TX_QUEUE_LEN
;
8064 dev
->num_tx_queues
= txqs
;
8065 dev
->real_num_tx_queues
= txqs
;
8066 if (netif_alloc_netdev_queues(dev
))
8070 dev
->num_rx_queues
= rxqs
;
8071 dev
->real_num_rx_queues
= rxqs
;
8072 if (netif_alloc_rx_queues(dev
))
8076 strcpy(dev
->name
, name
);
8077 dev
->name_assign_type
= name_assign_type
;
8078 dev
->group
= INIT_NETDEV_GROUP
;
8079 if (!dev
->ethtool_ops
)
8080 dev
->ethtool_ops
= &default_ethtool_ops
;
8082 nf_hook_ingress_init(dev
);
8091 free_percpu(dev
->pcpu_refcnt
);
8093 netdev_freemem(dev
);
8096 EXPORT_SYMBOL(alloc_netdev_mqs
);
8099 * free_netdev - free network device
8102 * This function does the last stage of destroying an allocated device
8103 * interface. The reference to the device object is released. If this
8104 * is the last reference then it will be freed.Must be called in process
8107 void free_netdev(struct net_device
*dev
)
8109 struct napi_struct
*p
, *n
;
8110 struct bpf_prog
*prog
;
8113 netif_free_tx_queues(dev
);
8118 kfree(rcu_dereference_protected(dev
->ingress_queue
, 1));
8120 /* Flush device addresses */
8121 dev_addr_flush(dev
);
8123 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
8126 free_percpu(dev
->pcpu_refcnt
);
8127 dev
->pcpu_refcnt
= NULL
;
8129 prog
= rcu_dereference_protected(dev
->xdp_prog
, 1);
8132 static_key_slow_dec(&generic_xdp_needed
);
8135 /* Compatibility with error handling in drivers */
8136 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
8137 netdev_freemem(dev
);
8141 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
8142 dev
->reg_state
= NETREG_RELEASED
;
8144 /* will free via device release */
8145 put_device(&dev
->dev
);
8147 EXPORT_SYMBOL(free_netdev
);
8150 * synchronize_net - Synchronize with packet receive processing
8152 * Wait for packets currently being received to be done.
8153 * Does not block later packets from starting.
8155 void synchronize_net(void)
8158 if (rtnl_is_locked())
8159 synchronize_rcu_expedited();
8163 EXPORT_SYMBOL(synchronize_net
);
8166 * unregister_netdevice_queue - remove device from the kernel
8170 * This function shuts down a device interface and removes it
8171 * from the kernel tables.
8172 * If head not NULL, device is queued to be unregistered later.
8174 * Callers must hold the rtnl semaphore. You may want
8175 * unregister_netdev() instead of this.
8178 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
8183 list_move_tail(&dev
->unreg_list
, head
);
8185 rollback_registered(dev
);
8186 /* Finish processing unregister after unlock */
8190 EXPORT_SYMBOL(unregister_netdevice_queue
);
8193 * unregister_netdevice_many - unregister many devices
8194 * @head: list of devices
8196 * Note: As most callers use a stack allocated list_head,
8197 * we force a list_del() to make sure stack wont be corrupted later.
8199 void unregister_netdevice_many(struct list_head
*head
)
8201 struct net_device
*dev
;
8203 if (!list_empty(head
)) {
8204 rollback_registered_many(head
);
8205 list_for_each_entry(dev
, head
, unreg_list
)
8210 EXPORT_SYMBOL(unregister_netdevice_many
);
8213 * unregister_netdev - remove device from the kernel
8216 * This function shuts down a device interface and removes it
8217 * from the kernel tables.
8219 * This is just a wrapper for unregister_netdevice that takes
8220 * the rtnl semaphore. In general you want to use this and not
8221 * unregister_netdevice.
8223 void unregister_netdev(struct net_device
*dev
)
8226 unregister_netdevice(dev
);
8229 EXPORT_SYMBOL(unregister_netdev
);
8232 * dev_change_net_namespace - move device to different nethost namespace
8234 * @net: network namespace
8235 * @pat: If not NULL name pattern to try if the current device name
8236 * is already taken in the destination network namespace.
8238 * This function shuts down a device interface and moves it
8239 * to a new network namespace. On success 0 is returned, on
8240 * a failure a netagive errno code is returned.
8242 * Callers must hold the rtnl semaphore.
8245 int dev_change_net_namespace(struct net_device
*dev
, struct net
*net
, const char *pat
)
8251 /* Don't allow namespace local devices to be moved. */
8253 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
8256 /* Ensure the device has been registrered */
8257 if (dev
->reg_state
!= NETREG_REGISTERED
)
8260 /* Get out if there is nothing todo */
8262 if (net_eq(dev_net(dev
), net
))
8265 /* Pick the destination device name, and ensure
8266 * we can use it in the destination network namespace.
8269 if (__dev_get_by_name(net
, dev
->name
)) {
8270 /* We get here if we can't use the current device name */
8273 if (dev_get_valid_name(net
, dev
, pat
) < 0)
8278 * And now a mini version of register_netdevice unregister_netdevice.
8281 /* If device is running close it first. */
8284 /* And unlink it from device chain */
8286 unlist_netdevice(dev
);
8290 /* Shutdown queueing discipline. */
8293 /* Notify protocols, that we are about to destroy
8294 * this device. They should clean all the things.
8296 * Note that dev->reg_state stays at NETREG_REGISTERED.
8297 * This is wanted because this way 8021q and macvlan know
8298 * the device is just moving and can keep their slaves up.
8300 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
8302 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
8303 rtmsg_ifinfo(RTM_DELLINK
, dev
, ~0U, GFP_KERNEL
);
8306 * Flush the unicast and multicast chains
8311 /* Send a netdev-removed uevent to the old namespace */
8312 kobject_uevent(&dev
->dev
.kobj
, KOBJ_REMOVE
);
8313 netdev_adjacent_del_links(dev
);
8315 /* Actually switch the network namespace */
8316 dev_net_set(dev
, net
);
8318 /* If there is an ifindex conflict assign a new one */
8319 if (__dev_get_by_index(net
, dev
->ifindex
))
8320 dev
->ifindex
= dev_new_index(net
);
8322 /* Send a netdev-add uevent to the new namespace */
8323 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ADD
);
8324 netdev_adjacent_add_links(dev
);
8326 /* Fixup kobjects */
8327 err
= device_rename(&dev
->dev
, dev
->name
);
8330 /* Add the device back in the hashes */
8331 list_netdevice(dev
);
8333 /* Notify protocols, that a new device appeared. */
8334 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
8337 * Prevent userspace races by waiting until the network
8338 * device is fully setup before sending notifications.
8340 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
8347 EXPORT_SYMBOL_GPL(dev_change_net_namespace
);
8349 static int dev_cpu_dead(unsigned int oldcpu
)
8351 struct sk_buff
**list_skb
;
8352 struct sk_buff
*skb
;
8354 struct softnet_data
*sd
, *oldsd
, *remsd
= NULL
;
8356 local_irq_disable();
8357 cpu
= smp_processor_id();
8358 sd
= &per_cpu(softnet_data
, cpu
);
8359 oldsd
= &per_cpu(softnet_data
, oldcpu
);
8361 /* Find end of our completion_queue. */
8362 list_skb
= &sd
->completion_queue
;
8364 list_skb
= &(*list_skb
)->next
;
8365 /* Append completion queue from offline CPU. */
8366 *list_skb
= oldsd
->completion_queue
;
8367 oldsd
->completion_queue
= NULL
;
8369 /* Append output queue from offline CPU. */
8370 if (oldsd
->output_queue
) {
8371 *sd
->output_queue_tailp
= oldsd
->output_queue
;
8372 sd
->output_queue_tailp
= oldsd
->output_queue_tailp
;
8373 oldsd
->output_queue
= NULL
;
8374 oldsd
->output_queue_tailp
= &oldsd
->output_queue
;
8376 /* Append NAPI poll list from offline CPU, with one exception :
8377 * process_backlog() must be called by cpu owning percpu backlog.
8378 * We properly handle process_queue & input_pkt_queue later.
8380 while (!list_empty(&oldsd
->poll_list
)) {
8381 struct napi_struct
*napi
= list_first_entry(&oldsd
->poll_list
,
8385 list_del_init(&napi
->poll_list
);
8386 if (napi
->poll
== process_backlog
)
8389 ____napi_schedule(sd
, napi
);
8392 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
8396 remsd
= oldsd
->rps_ipi_list
;
8397 oldsd
->rps_ipi_list
= NULL
;
8399 /* send out pending IPI's on offline CPU */
8400 net_rps_send_ipi(remsd
);
8402 /* Process offline CPU's input_pkt_queue */
8403 while ((skb
= __skb_dequeue(&oldsd
->process_queue
))) {
8405 input_queue_head_incr(oldsd
);
8407 while ((skb
= skb_dequeue(&oldsd
->input_pkt_queue
))) {
8409 input_queue_head_incr(oldsd
);
8416 * netdev_increment_features - increment feature set by one
8417 * @all: current feature set
8418 * @one: new feature set
8419 * @mask: mask feature set
8421 * Computes a new feature set after adding a device with feature set
8422 * @one to the master device with current feature set @all. Will not
8423 * enable anything that is off in @mask. Returns the new feature set.
8425 netdev_features_t
netdev_increment_features(netdev_features_t all
,
8426 netdev_features_t one
, netdev_features_t mask
)
8428 if (mask
& NETIF_F_HW_CSUM
)
8429 mask
|= NETIF_F_CSUM_MASK
;
8430 mask
|= NETIF_F_VLAN_CHALLENGED
;
8432 all
|= one
& (NETIF_F_ONE_FOR_ALL
| NETIF_F_CSUM_MASK
) & mask
;
8433 all
&= one
| ~NETIF_F_ALL_FOR_ALL
;
8435 /* If one device supports hw checksumming, set for all. */
8436 if (all
& NETIF_F_HW_CSUM
)
8437 all
&= ~(NETIF_F_CSUM_MASK
& ~NETIF_F_HW_CSUM
);
8441 EXPORT_SYMBOL(netdev_increment_features
);
8443 static struct hlist_head
* __net_init
netdev_create_hash(void)
8446 struct hlist_head
*hash
;
8448 hash
= kmalloc(sizeof(*hash
) * NETDEV_HASHENTRIES
, GFP_KERNEL
);
8450 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
8451 INIT_HLIST_HEAD(&hash
[i
]);
8456 /* Initialize per network namespace state */
8457 static int __net_init
netdev_init(struct net
*net
)
8459 if (net
!= &init_net
)
8460 INIT_LIST_HEAD(&net
->dev_base_head
);
8462 net
->dev_name_head
= netdev_create_hash();
8463 if (net
->dev_name_head
== NULL
)
8466 net
->dev_index_head
= netdev_create_hash();
8467 if (net
->dev_index_head
== NULL
)
8473 kfree(net
->dev_name_head
);
8479 * netdev_drivername - network driver for the device
8480 * @dev: network device
8482 * Determine network driver for device.
8484 const char *netdev_drivername(const struct net_device
*dev
)
8486 const struct device_driver
*driver
;
8487 const struct device
*parent
;
8488 const char *empty
= "";
8490 parent
= dev
->dev
.parent
;
8494 driver
= parent
->driver
;
8495 if (driver
&& driver
->name
)
8496 return driver
->name
;
8500 static void __netdev_printk(const char *level
, const struct net_device
*dev
,
8501 struct va_format
*vaf
)
8503 if (dev
&& dev
->dev
.parent
) {
8504 dev_printk_emit(level
[1] - '0',
8507 dev_driver_string(dev
->dev
.parent
),
8508 dev_name(dev
->dev
.parent
),
8509 netdev_name(dev
), netdev_reg_state(dev
),
8512 printk("%s%s%s: %pV",
8513 level
, netdev_name(dev
), netdev_reg_state(dev
), vaf
);
8515 printk("%s(NULL net_device): %pV", level
, vaf
);
8519 void netdev_printk(const char *level
, const struct net_device
*dev
,
8520 const char *format
, ...)
8522 struct va_format vaf
;
8525 va_start(args
, format
);
8530 __netdev_printk(level
, dev
, &vaf
);
8534 EXPORT_SYMBOL(netdev_printk
);
8536 #define define_netdev_printk_level(func, level) \
8537 void func(const struct net_device *dev, const char *fmt, ...) \
8539 struct va_format vaf; \
8542 va_start(args, fmt); \
8547 __netdev_printk(level, dev, &vaf); \
8551 EXPORT_SYMBOL(func);
8553 define_netdev_printk_level(netdev_emerg
, KERN_EMERG
);
8554 define_netdev_printk_level(netdev_alert
, KERN_ALERT
);
8555 define_netdev_printk_level(netdev_crit
, KERN_CRIT
);
8556 define_netdev_printk_level(netdev_err
, KERN_ERR
);
8557 define_netdev_printk_level(netdev_warn
, KERN_WARNING
);
8558 define_netdev_printk_level(netdev_notice
, KERN_NOTICE
);
8559 define_netdev_printk_level(netdev_info
, KERN_INFO
);
8561 static void __net_exit
netdev_exit(struct net
*net
)
8563 kfree(net
->dev_name_head
);
8564 kfree(net
->dev_index_head
);
8567 static struct pernet_operations __net_initdata netdev_net_ops
= {
8568 .init
= netdev_init
,
8569 .exit
= netdev_exit
,
8572 static void __net_exit
default_device_exit(struct net
*net
)
8574 struct net_device
*dev
, *aux
;
8576 * Push all migratable network devices back to the
8577 * initial network namespace
8580 for_each_netdev_safe(net
, dev
, aux
) {
8582 char fb_name
[IFNAMSIZ
];
8584 /* Ignore unmoveable devices (i.e. loopback) */
8585 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
8588 /* Leave virtual devices for the generic cleanup */
8589 if (dev
->rtnl_link_ops
)
8592 /* Push remaining network devices to init_net */
8593 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
8594 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
8596 pr_emerg("%s: failed to move %s to init_net: %d\n",
8597 __func__
, dev
->name
, err
);
8604 static void __net_exit
rtnl_lock_unregistering(struct list_head
*net_list
)
8606 /* Return with the rtnl_lock held when there are no network
8607 * devices unregistering in any network namespace in net_list.
8611 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
8613 add_wait_queue(&netdev_unregistering_wq
, &wait
);
8615 unregistering
= false;
8617 list_for_each_entry(net
, net_list
, exit_list
) {
8618 if (net
->dev_unreg_count
> 0) {
8619 unregistering
= true;
8627 wait_woken(&wait
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
8629 remove_wait_queue(&netdev_unregistering_wq
, &wait
);
8632 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
8634 /* At exit all network devices most be removed from a network
8635 * namespace. Do this in the reverse order of registration.
8636 * Do this across as many network namespaces as possible to
8637 * improve batching efficiency.
8639 struct net_device
*dev
;
8641 LIST_HEAD(dev_kill_list
);
8643 /* To prevent network device cleanup code from dereferencing
8644 * loopback devices or network devices that have been freed
8645 * wait here for all pending unregistrations to complete,
8646 * before unregistring the loopback device and allowing the
8647 * network namespace be freed.
8649 * The netdev todo list containing all network devices
8650 * unregistrations that happen in default_device_exit_batch
8651 * will run in the rtnl_unlock() at the end of
8652 * default_device_exit_batch.
8654 rtnl_lock_unregistering(net_list
);
8655 list_for_each_entry(net
, net_list
, exit_list
) {
8656 for_each_netdev_reverse(net
, dev
) {
8657 if (dev
->rtnl_link_ops
&& dev
->rtnl_link_ops
->dellink
)
8658 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
8660 unregister_netdevice_queue(dev
, &dev_kill_list
);
8663 unregister_netdevice_many(&dev_kill_list
);
8667 static struct pernet_operations __net_initdata default_device_ops
= {
8668 .exit
= default_device_exit
,
8669 .exit_batch
= default_device_exit_batch
,
8673 * Initialize the DEV module. At boot time this walks the device list and
8674 * unhooks any devices that fail to initialise (normally hardware not
8675 * present) and leaves us with a valid list of present and active devices.
8680 * This is called single threaded during boot, so no need
8681 * to take the rtnl semaphore.
8683 static int __init
net_dev_init(void)
8685 int i
, rc
= -ENOMEM
;
8687 BUG_ON(!dev_boot_phase
);
8689 if (dev_proc_init())
8692 if (netdev_kobject_init())
8695 INIT_LIST_HEAD(&ptype_all
);
8696 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
8697 INIT_LIST_HEAD(&ptype_base
[i
]);
8699 INIT_LIST_HEAD(&offload_base
);
8701 if (register_pernet_subsys(&netdev_net_ops
))
8705 * Initialise the packet receive queues.
8708 for_each_possible_cpu(i
) {
8709 struct work_struct
*flush
= per_cpu_ptr(&flush_works
, i
);
8710 struct softnet_data
*sd
= &per_cpu(softnet_data
, i
);
8712 INIT_WORK(flush
, flush_backlog
);
8714 skb_queue_head_init(&sd
->input_pkt_queue
);
8715 skb_queue_head_init(&sd
->process_queue
);
8716 INIT_LIST_HEAD(&sd
->poll_list
);
8717 sd
->output_queue_tailp
= &sd
->output_queue
;
8719 sd
->csd
.func
= rps_trigger_softirq
;
8724 sd
->backlog
.poll
= process_backlog
;
8725 sd
->backlog
.weight
= weight_p
;
8730 /* The loopback device is special if any other network devices
8731 * is present in a network namespace the loopback device must
8732 * be present. Since we now dynamically allocate and free the
8733 * loopback device ensure this invariant is maintained by
8734 * keeping the loopback device as the first device on the
8735 * list of network devices. Ensuring the loopback devices
8736 * is the first device that appears and the last network device
8739 if (register_pernet_device(&loopback_net_ops
))
8742 if (register_pernet_device(&default_device_ops
))
8745 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
8746 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
8748 rc
= cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD
, "net/dev:dead",
8749 NULL
, dev_cpu_dead
);
8756 subsys_initcall(net_dev_init
);