]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/core/dev.c
net: realtek: r8169: implement set_link_ksettings()
[mirror_ubuntu-jammy-kernel.git] / net / core / dev.c
CommitLineData
1da177e4 1/*
722c9a0c 2 * NET3 Protocol independent device support routines.
1da177e4
LT
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
722c9a0c 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
722c9a0c 24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
1da177e4
LT
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
722c9a0c 39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
1da177e4
LT
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
722c9a0c 49 * Alan Cox : Fixed nasty side effect of device close
1da177e4
LT
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
722c9a0c 70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
1da177e4
LT
72 * - netif_rx() feedback
73 */
74
7c0f6ba6 75#include <linux/uaccess.h>
1da177e4 76#include <linux/bitops.h>
4fc268d2 77#include <linux/capability.h>
1da177e4
LT
78#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
08e9897d 81#include <linux/hash.h>
5a0e3ad6 82#include <linux/slab.h>
1da177e4 83#include <linux/sched.h>
f1083048 84#include <linux/sched/mm.h>
4a3e2f71 85#include <linux/mutex.h>
1da177e4
LT
86#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
0187bdfb 95#include <linux/ethtool.h>
1da177e4
LT
96#include <linux/notifier.h>
97#include <linux/skbuff.h>
a7862b45 98#include <linux/bpf.h>
b5cdae32 99#include <linux/bpf_trace.h>
457c4cbc 100#include <net/net_namespace.h>
1da177e4 101#include <net/sock.h>
02d62e86 102#include <net/busy_poll.h>
1da177e4 103#include <linux/rtnetlink.h>
1da177e4 104#include <linux/stat.h>
1da177e4 105#include <net/dst.h>
fc4099f1 106#include <net/dst_metadata.h>
1da177e4 107#include <net/pkt_sched.h>
87d83093 108#include <net/pkt_cls.h>
1da177e4 109#include <net/checksum.h>
44540960 110#include <net/xfrm.h>
1da177e4
LT
111#include <linux/highmem.h>
112#include <linux/init.h>
1da177e4 113#include <linux/module.h>
1da177e4
LT
114#include <linux/netpoll.h>
115#include <linux/rcupdate.h>
116#include <linux/delay.h>
1da177e4 117#include <net/iw_handler.h>
1da177e4 118#include <asm/current.h>
5bdb9886 119#include <linux/audit.h>
db217334 120#include <linux/dmaengine.h>
f6a78bfc 121#include <linux/err.h>
c7fa9d18 122#include <linux/ctype.h>
723e98b7 123#include <linux/if_arp.h>
6de329e2 124#include <linux/if_vlan.h>
8f0f2223 125#include <linux/ip.h>
ad55dcaf 126#include <net/ip.h>
25cd9ba0 127#include <net/mpls.h>
8f0f2223
DM
128#include <linux/ipv6.h>
129#include <linux/in.h>
b6b2fed1
DM
130#include <linux/jhash.h>
131#include <linux/random.h>
9cbc1cb8 132#include <trace/events/napi.h>
cf66ba58 133#include <trace/events/net.h>
07dc22e7 134#include <trace/events/skb.h>
5acbbd42 135#include <linux/pci.h>
caeda9b9 136#include <linux/inetdevice.h>
c445477d 137#include <linux/cpu_rmap.h>
c5905afb 138#include <linux/static_key.h>
af12fa6e 139#include <linux/hashtable.h>
60877a32 140#include <linux/vmalloc.h>
529d0489 141#include <linux/if_macvlan.h>
e7fd2885 142#include <linux/errqueue.h>
3b47d303 143#include <linux/hrtimer.h>
e687ad60 144#include <linux/netfilter_ingress.h>
40e4e713 145#include <linux/crash_dump.h>
b72b5bf6 146#include <linux/sctp.h>
ae847f40 147#include <net/udp_tunnel.h>
6621dd29 148#include <linux/net_namespace.h>
1da177e4 149
342709ef
PE
150#include "net-sysfs.h"
151
d565b0a1
HX
152/* Instead of increasing this, you should create a hash table. */
153#define MAX_GRO_SKBS 8
154
5d38a079
HX
155/* This should be increased if a protocol with a bigger head is added. */
156#define GRO_MAX_HEAD (MAX_HEADER + 128)
157
1da177e4 158static DEFINE_SPINLOCK(ptype_lock);
62532da9 159static DEFINE_SPINLOCK(offload_lock);
900ff8c6
CW
160struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
161struct list_head ptype_all __read_mostly; /* Taps */
62532da9 162static struct list_head offload_base __read_mostly;
1da177e4 163
ae78dbfa 164static int netif_rx_internal(struct sk_buff *skb);
54951194 165static int call_netdevice_notifiers_info(unsigned long val,
54951194 166 struct netdev_notifier_info *info);
90b602f8 167static struct napi_struct *napi_by_id(unsigned int napi_id);
ae78dbfa 168
1da177e4 169/*
7562f876 170 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
171 * semaphore.
172 *
c6d14c84 173 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
174 *
175 * Writers must hold the rtnl semaphore while they loop through the
7562f876 176 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
177 * actual updates. This allows pure readers to access the list even
178 * while a writer is preparing to update it.
179 *
180 * To put it another way, dev_base_lock is held for writing only to
181 * protect against pure readers; the rtnl semaphore provides the
182 * protection against other writers.
183 *
184 * See, for example usages, register_netdevice() and
185 * unregister_netdevice(), which must be called with the rtnl
186 * semaphore held.
187 */
1da177e4 188DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
189EXPORT_SYMBOL(dev_base_lock);
190
6c557001
FW
191static DEFINE_MUTEX(ifalias_mutex);
192
af12fa6e
ET
193/* protects napi_hash addition/deletion and napi_gen_id */
194static DEFINE_SPINLOCK(napi_hash_lock);
195
52bd2d62 196static unsigned int napi_gen_id = NR_CPUS;
6180d9de 197static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
af12fa6e 198
18afa4b0 199static seqcount_t devnet_rename_seq;
c91f6df2 200
4e985ada
TG
201static inline void dev_base_seq_inc(struct net *net)
202{
643aa9cb 203 while (++net->dev_base_seq == 0)
204 ;
4e985ada
TG
205}
206
881d966b 207static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4 208{
8387ff25 209 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
95c96174 210
08e9897d 211 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
212}
213
881d966b 214static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 215{
7c28bd0b 216 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
217}
218
e36fa2f7 219static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
220{
221#ifdef CONFIG_RPS
e36fa2f7 222 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
223#endif
224}
225
e36fa2f7 226static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
227{
228#ifdef CONFIG_RPS
e36fa2f7 229 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
230#endif
231}
232
ce286d32 233/* Device list insertion */
53759be9 234static void list_netdevice(struct net_device *dev)
ce286d32 235{
c346dca1 236 struct net *net = dev_net(dev);
ce286d32
EB
237
238 ASSERT_RTNL();
239
240 write_lock_bh(&dev_base_lock);
c6d14c84 241 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 242 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
243 hlist_add_head_rcu(&dev->index_hlist,
244 dev_index_hash(net, dev->ifindex));
ce286d32 245 write_unlock_bh(&dev_base_lock);
4e985ada
TG
246
247 dev_base_seq_inc(net);
ce286d32
EB
248}
249
fb699dfd
ED
250/* Device list removal
251 * caller must respect a RCU grace period before freeing/reusing dev
252 */
ce286d32
EB
253static void unlist_netdevice(struct net_device *dev)
254{
255 ASSERT_RTNL();
256
257 /* Unlink dev from the device chain */
258 write_lock_bh(&dev_base_lock);
c6d14c84 259 list_del_rcu(&dev->dev_list);
72c9528b 260 hlist_del_rcu(&dev->name_hlist);
fb699dfd 261 hlist_del_rcu(&dev->index_hlist);
ce286d32 262 write_unlock_bh(&dev_base_lock);
4e985ada
TG
263
264 dev_base_seq_inc(dev_net(dev));
ce286d32
EB
265}
266
1da177e4
LT
267/*
268 * Our notifier list
269 */
270
f07d5b94 271static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
272
273/*
274 * Device drivers call our routines to queue packets here. We empty the
275 * queue in the local softnet handler.
276 */
bea3348e 277
9958da05 278DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 279EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 280
cf508b12 281#ifdef CONFIG_LOCKDEP
723e98b7 282/*
c773e847 283 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
284 * according to dev->type
285 */
643aa9cb 286static const unsigned short netdev_lock_type[] = {
287 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
723e98b7
JP
288 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
289 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
290 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
291 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
292 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
293 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
294 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
295 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
296 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
297 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
298 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
211ed865
PG
299 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
300 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
301 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
723e98b7 302
643aa9cb 303static const char *const netdev_lock_name[] = {
304 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
305 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
306 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
307 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
308 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
309 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
310 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
311 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
312 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
313 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
314 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
315 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
316 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
317 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
318 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
319
320static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 321static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
322
323static inline unsigned short netdev_lock_pos(unsigned short dev_type)
324{
325 int i;
326
327 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
328 if (netdev_lock_type[i] == dev_type)
329 return i;
330 /* the last key is used by default */
331 return ARRAY_SIZE(netdev_lock_type) - 1;
332}
333
cf508b12
DM
334static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
335 unsigned short dev_type)
723e98b7
JP
336{
337 int i;
338
339 i = netdev_lock_pos(dev_type);
340 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
341 netdev_lock_name[i]);
342}
cf508b12
DM
343
344static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
345{
346 int i;
347
348 i = netdev_lock_pos(dev->type);
349 lockdep_set_class_and_name(&dev->addr_list_lock,
350 &netdev_addr_lock_key[i],
351 netdev_lock_name[i]);
352}
723e98b7 353#else
cf508b12
DM
354static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
355 unsigned short dev_type)
356{
357}
358static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
359{
360}
361#endif
1da177e4
LT
362
363/*******************************************************************************
eb13da1a 364 *
365 * Protocol management and registration routines
366 *
367 *******************************************************************************/
1da177e4 368
1da177e4 369
1da177e4
LT
370/*
371 * Add a protocol ID to the list. Now that the input handler is
372 * smarter we can dispense with all the messy stuff that used to be
373 * here.
374 *
375 * BEWARE!!! Protocol handlers, mangling input packets,
376 * MUST BE last in hash buckets and checking protocol handlers
377 * MUST start from promiscuous ptype_all chain in net_bh.
378 * It is true now, do not change it.
379 * Explanation follows: if protocol handler, mangling packet, will
380 * be the first on list, it is not able to sense, that packet
381 * is cloned and should be copied-on-write, so that it will
382 * change it and subsequent readers will get broken packet.
383 * --ANK (980803)
384 */
385
c07b68e8
ED
386static inline struct list_head *ptype_head(const struct packet_type *pt)
387{
388 if (pt->type == htons(ETH_P_ALL))
7866a621 389 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
c07b68e8 390 else
7866a621
SN
391 return pt->dev ? &pt->dev->ptype_specific :
392 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
c07b68e8
ED
393}
394
1da177e4
LT
395/**
396 * dev_add_pack - add packet handler
397 * @pt: packet type declaration
398 *
399 * Add a protocol handler to the networking stack. The passed &packet_type
400 * is linked into kernel lists and may not be freed until it has been
401 * removed from the kernel lists.
402 *
4ec93edb 403 * This call does not sleep therefore it can not
1da177e4
LT
404 * guarantee all CPU's that are in middle of receiving packets
405 * will see the new packet type (until the next received packet).
406 */
407
408void dev_add_pack(struct packet_type *pt)
409{
c07b68e8 410 struct list_head *head = ptype_head(pt);
1da177e4 411
c07b68e8
ED
412 spin_lock(&ptype_lock);
413 list_add_rcu(&pt->list, head);
414 spin_unlock(&ptype_lock);
1da177e4 415}
d1b19dff 416EXPORT_SYMBOL(dev_add_pack);
1da177e4 417
1da177e4
LT
418/**
419 * __dev_remove_pack - remove packet handler
420 * @pt: packet type declaration
421 *
422 * Remove a protocol handler that was previously added to the kernel
423 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
424 * from the kernel lists and can be freed or reused once this function
4ec93edb 425 * returns.
1da177e4
LT
426 *
427 * The packet type might still be in use by receivers
428 * and must not be freed until after all the CPU's have gone
429 * through a quiescent state.
430 */
431void __dev_remove_pack(struct packet_type *pt)
432{
c07b68e8 433 struct list_head *head = ptype_head(pt);
1da177e4
LT
434 struct packet_type *pt1;
435
c07b68e8 436 spin_lock(&ptype_lock);
1da177e4
LT
437
438 list_for_each_entry(pt1, head, list) {
439 if (pt == pt1) {
440 list_del_rcu(&pt->list);
441 goto out;
442 }
443 }
444
7b6cd1ce 445 pr_warn("dev_remove_pack: %p not found\n", pt);
1da177e4 446out:
c07b68e8 447 spin_unlock(&ptype_lock);
1da177e4 448}
d1b19dff
ED
449EXPORT_SYMBOL(__dev_remove_pack);
450
1da177e4
LT
451/**
452 * dev_remove_pack - remove packet handler
453 * @pt: packet type declaration
454 *
455 * Remove a protocol handler that was previously added to the kernel
456 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
457 * from the kernel lists and can be freed or reused once this function
458 * returns.
459 *
460 * This call sleeps to guarantee that no CPU is looking at the packet
461 * type after return.
462 */
463void dev_remove_pack(struct packet_type *pt)
464{
465 __dev_remove_pack(pt);
4ec93edb 466
1da177e4
LT
467 synchronize_net();
468}
d1b19dff 469EXPORT_SYMBOL(dev_remove_pack);
1da177e4 470
62532da9
VY
471
472/**
473 * dev_add_offload - register offload handlers
474 * @po: protocol offload declaration
475 *
476 * Add protocol offload handlers to the networking stack. The passed
477 * &proto_offload is linked into kernel lists and may not be freed until
478 * it has been removed from the kernel lists.
479 *
480 * This call does not sleep therefore it can not
481 * guarantee all CPU's that are in middle of receiving packets
482 * will see the new offload handlers (until the next received packet).
483 */
484void dev_add_offload(struct packet_offload *po)
485{
bdef7de4 486 struct packet_offload *elem;
62532da9
VY
487
488 spin_lock(&offload_lock);
bdef7de4
DM
489 list_for_each_entry(elem, &offload_base, list) {
490 if (po->priority < elem->priority)
491 break;
492 }
493 list_add_rcu(&po->list, elem->list.prev);
62532da9
VY
494 spin_unlock(&offload_lock);
495}
496EXPORT_SYMBOL(dev_add_offload);
497
498/**
499 * __dev_remove_offload - remove offload handler
500 * @po: packet offload declaration
501 *
502 * Remove a protocol offload handler that was previously added to the
503 * kernel offload handlers by dev_add_offload(). The passed &offload_type
504 * is removed from the kernel lists and can be freed or reused once this
505 * function returns.
506 *
507 * The packet type might still be in use by receivers
508 * and must not be freed until after all the CPU's have gone
509 * through a quiescent state.
510 */
1d143d9f 511static void __dev_remove_offload(struct packet_offload *po)
62532da9
VY
512{
513 struct list_head *head = &offload_base;
514 struct packet_offload *po1;
515
c53aa505 516 spin_lock(&offload_lock);
62532da9
VY
517
518 list_for_each_entry(po1, head, list) {
519 if (po == po1) {
520 list_del_rcu(&po->list);
521 goto out;
522 }
523 }
524
525 pr_warn("dev_remove_offload: %p not found\n", po);
526out:
c53aa505 527 spin_unlock(&offload_lock);
62532da9 528}
62532da9
VY
529
530/**
531 * dev_remove_offload - remove packet offload handler
532 * @po: packet offload declaration
533 *
534 * Remove a packet offload handler that was previously added to the kernel
535 * offload handlers by dev_add_offload(). The passed &offload_type is
536 * removed from the kernel lists and can be freed or reused once this
537 * function returns.
538 *
539 * This call sleeps to guarantee that no CPU is looking at the packet
540 * type after return.
541 */
542void dev_remove_offload(struct packet_offload *po)
543{
544 __dev_remove_offload(po);
545
546 synchronize_net();
547}
548EXPORT_SYMBOL(dev_remove_offload);
549
1da177e4 550/******************************************************************************
eb13da1a 551 *
552 * Device Boot-time Settings Routines
553 *
554 ******************************************************************************/
1da177e4
LT
555
556/* Boot time configuration table */
557static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
558
559/**
560 * netdev_boot_setup_add - add new setup entry
561 * @name: name of the device
562 * @map: configured settings for the device
563 *
564 * Adds new setup entry to the dev_boot_setup list. The function
565 * returns 0 on error and 1 on success. This is a generic routine to
566 * all netdevices.
567 */
568static int netdev_boot_setup_add(char *name, struct ifmap *map)
569{
570 struct netdev_boot_setup *s;
571 int i;
572
573 s = dev_boot_setup;
574 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
575 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
576 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 577 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
578 memcpy(&s[i].map, map, sizeof(s[i].map));
579 break;
580 }
581 }
582
583 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
584}
585
586/**
722c9a0c 587 * netdev_boot_setup_check - check boot time settings
588 * @dev: the netdevice
1da177e4 589 *
722c9a0c 590 * Check boot time settings for the device.
591 * The found settings are set for the device to be used
592 * later in the device probing.
593 * Returns 0 if no settings found, 1 if they are.
1da177e4
LT
594 */
595int netdev_boot_setup_check(struct net_device *dev)
596{
597 struct netdev_boot_setup *s = dev_boot_setup;
598 int i;
599
600 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
601 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 602 !strcmp(dev->name, s[i].name)) {
722c9a0c 603 dev->irq = s[i].map.irq;
604 dev->base_addr = s[i].map.base_addr;
605 dev->mem_start = s[i].map.mem_start;
606 dev->mem_end = s[i].map.mem_end;
1da177e4
LT
607 return 1;
608 }
609 }
610 return 0;
611}
d1b19dff 612EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
613
614
615/**
722c9a0c 616 * netdev_boot_base - get address from boot time settings
617 * @prefix: prefix for network device
618 * @unit: id for network device
619 *
620 * Check boot time settings for the base address of device.
621 * The found settings are set for the device to be used
622 * later in the device probing.
623 * Returns 0 if no settings found.
1da177e4
LT
624 */
625unsigned long netdev_boot_base(const char *prefix, int unit)
626{
627 const struct netdev_boot_setup *s = dev_boot_setup;
628 char name[IFNAMSIZ];
629 int i;
630
631 sprintf(name, "%s%d", prefix, unit);
632
633 /*
634 * If device already registered then return base of 1
635 * to indicate not to probe for this interface
636 */
881d966b 637 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
638 return 1;
639
640 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
641 if (!strcmp(name, s[i].name))
642 return s[i].map.base_addr;
643 return 0;
644}
645
646/*
647 * Saves at boot time configured settings for any netdevice.
648 */
649int __init netdev_boot_setup(char *str)
650{
651 int ints[5];
652 struct ifmap map;
653
654 str = get_options(str, ARRAY_SIZE(ints), ints);
655 if (!str || !*str)
656 return 0;
657
658 /* Save settings */
659 memset(&map, 0, sizeof(map));
660 if (ints[0] > 0)
661 map.irq = ints[1];
662 if (ints[0] > 1)
663 map.base_addr = ints[2];
664 if (ints[0] > 2)
665 map.mem_start = ints[3];
666 if (ints[0] > 3)
667 map.mem_end = ints[4];
668
669 /* Add new entry to the list */
670 return netdev_boot_setup_add(str, &map);
671}
672
673__setup("netdev=", netdev_boot_setup);
674
675/*******************************************************************************
eb13da1a 676 *
677 * Device Interface Subroutines
678 *
679 *******************************************************************************/
1da177e4 680
a54acb3a
ND
681/**
682 * dev_get_iflink - get 'iflink' value of a interface
683 * @dev: targeted interface
684 *
685 * Indicates the ifindex the interface is linked to.
686 * Physical interfaces have the same 'ifindex' and 'iflink' values.
687 */
688
689int dev_get_iflink(const struct net_device *dev)
690{
691 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
692 return dev->netdev_ops->ndo_get_iflink(dev);
693
7a66bbc9 694 return dev->ifindex;
a54acb3a
ND
695}
696EXPORT_SYMBOL(dev_get_iflink);
697
fc4099f1
PS
698/**
699 * dev_fill_metadata_dst - Retrieve tunnel egress information.
700 * @dev: targeted interface
701 * @skb: The packet.
702 *
703 * For better visibility of tunnel traffic OVS needs to retrieve
704 * egress tunnel information for a packet. Following API allows
705 * user to get this info.
706 */
707int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
708{
709 struct ip_tunnel_info *info;
710
711 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
712 return -EINVAL;
713
714 info = skb_tunnel_info_unclone(skb);
715 if (!info)
716 return -ENOMEM;
717 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
718 return -EINVAL;
719
720 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
721}
722EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
723
1da177e4
LT
724/**
725 * __dev_get_by_name - find a device by its name
c4ea43c5 726 * @net: the applicable net namespace
1da177e4
LT
727 * @name: name to find
728 *
729 * Find an interface by name. Must be called under RTNL semaphore
730 * or @dev_base_lock. If the name is found a pointer to the device
731 * is returned. If the name is not found then %NULL is returned. The
732 * reference counters are not incremented so the caller must be
733 * careful with locks.
734 */
735
881d966b 736struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4 737{
0bd8d536
ED
738 struct net_device *dev;
739 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 740
b67bfe0d 741 hlist_for_each_entry(dev, head, name_hlist)
1da177e4
LT
742 if (!strncmp(dev->name, name, IFNAMSIZ))
743 return dev;
0bd8d536 744
1da177e4
LT
745 return NULL;
746}
d1b19dff 747EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 748
72c9528b 749/**
722c9a0c 750 * dev_get_by_name_rcu - find a device by its name
751 * @net: the applicable net namespace
752 * @name: name to find
753 *
754 * Find an interface by name.
755 * If the name is found a pointer to the device is returned.
756 * If the name is not found then %NULL is returned.
757 * The reference counters are not incremented so the caller must be
758 * careful with locks. The caller must hold RCU lock.
72c9528b
ED
759 */
760
761struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
762{
72c9528b
ED
763 struct net_device *dev;
764 struct hlist_head *head = dev_name_hash(net, name);
765
b67bfe0d 766 hlist_for_each_entry_rcu(dev, head, name_hlist)
72c9528b
ED
767 if (!strncmp(dev->name, name, IFNAMSIZ))
768 return dev;
769
770 return NULL;
771}
772EXPORT_SYMBOL(dev_get_by_name_rcu);
773
1da177e4
LT
774/**
775 * dev_get_by_name - find a device by its name
c4ea43c5 776 * @net: the applicable net namespace
1da177e4
LT
777 * @name: name to find
778 *
779 * Find an interface by name. This can be called from any
780 * context and does its own locking. The returned handle has
781 * the usage count incremented and the caller must use dev_put() to
782 * release it when it is no longer needed. %NULL is returned if no
783 * matching device is found.
784 */
785
881d966b 786struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
787{
788 struct net_device *dev;
789
72c9528b
ED
790 rcu_read_lock();
791 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
792 if (dev)
793 dev_hold(dev);
72c9528b 794 rcu_read_unlock();
1da177e4
LT
795 return dev;
796}
d1b19dff 797EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
798
799/**
800 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 801 * @net: the applicable net namespace
1da177e4
LT
802 * @ifindex: index of device
803 *
804 * Search for an interface by index. Returns %NULL if the device
805 * is not found or a pointer to the device. The device has not
806 * had its reference counter increased so the caller must be careful
807 * about locking. The caller must hold either the RTNL semaphore
808 * or @dev_base_lock.
809 */
810
881d966b 811struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4 812{
0bd8d536
ED
813 struct net_device *dev;
814 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 815
b67bfe0d 816 hlist_for_each_entry(dev, head, index_hlist)
1da177e4
LT
817 if (dev->ifindex == ifindex)
818 return dev;
0bd8d536 819
1da177e4
LT
820 return NULL;
821}
d1b19dff 822EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 823
fb699dfd
ED
824/**
825 * dev_get_by_index_rcu - find a device by its ifindex
826 * @net: the applicable net namespace
827 * @ifindex: index of device
828 *
829 * Search for an interface by index. Returns %NULL if the device
830 * is not found or a pointer to the device. The device has not
831 * had its reference counter increased so the caller must be careful
832 * about locking. The caller must hold RCU lock.
833 */
834
835struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
836{
fb699dfd
ED
837 struct net_device *dev;
838 struct hlist_head *head = dev_index_hash(net, ifindex);
839
b67bfe0d 840 hlist_for_each_entry_rcu(dev, head, index_hlist)
fb699dfd
ED
841 if (dev->ifindex == ifindex)
842 return dev;
843
844 return NULL;
845}
846EXPORT_SYMBOL(dev_get_by_index_rcu);
847
1da177e4
LT
848
849/**
850 * dev_get_by_index - find a device by its ifindex
c4ea43c5 851 * @net: the applicable net namespace
1da177e4
LT
852 * @ifindex: index of device
853 *
854 * Search for an interface by index. Returns NULL if the device
855 * is not found or a pointer to the device. The device returned has
856 * had a reference added and the pointer is safe until the user calls
857 * dev_put to indicate they have finished with it.
858 */
859
881d966b 860struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
861{
862 struct net_device *dev;
863
fb699dfd
ED
864 rcu_read_lock();
865 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
866 if (dev)
867 dev_hold(dev);
fb699dfd 868 rcu_read_unlock();
1da177e4
LT
869 return dev;
870}
d1b19dff 871EXPORT_SYMBOL(dev_get_by_index);
1da177e4 872
90b602f8
ML
873/**
874 * dev_get_by_napi_id - find a device by napi_id
875 * @napi_id: ID of the NAPI struct
876 *
877 * Search for an interface by NAPI ID. Returns %NULL if the device
878 * is not found or a pointer to the device. The device has not had
879 * its reference counter increased so the caller must be careful
880 * about locking. The caller must hold RCU lock.
881 */
882
883struct net_device *dev_get_by_napi_id(unsigned int napi_id)
884{
885 struct napi_struct *napi;
886
887 WARN_ON_ONCE(!rcu_read_lock_held());
888
889 if (napi_id < MIN_NAPI_ID)
890 return NULL;
891
892 napi = napi_by_id(napi_id);
893
894 return napi ? napi->dev : NULL;
895}
896EXPORT_SYMBOL(dev_get_by_napi_id);
897
5dbe7c17
NS
898/**
899 * netdev_get_name - get a netdevice name, knowing its ifindex.
900 * @net: network namespace
901 * @name: a pointer to the buffer where the name will be stored.
902 * @ifindex: the ifindex of the interface to get the name from.
903 *
904 * The use of raw_seqcount_begin() and cond_resched() before
905 * retrying is required as we want to give the writers a chance
906 * to complete when CONFIG_PREEMPT is not set.
907 */
908int netdev_get_name(struct net *net, char *name, int ifindex)
909{
910 struct net_device *dev;
911 unsigned int seq;
912
913retry:
914 seq = raw_seqcount_begin(&devnet_rename_seq);
915 rcu_read_lock();
916 dev = dev_get_by_index_rcu(net, ifindex);
917 if (!dev) {
918 rcu_read_unlock();
919 return -ENODEV;
920 }
921
922 strcpy(name, dev->name);
923 rcu_read_unlock();
924 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
925 cond_resched();
926 goto retry;
927 }
928
929 return 0;
930}
931
1da177e4 932/**
941666c2 933 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 934 * @net: the applicable net namespace
1da177e4
LT
935 * @type: media type of device
936 * @ha: hardware address
937 *
938 * Search for an interface by MAC address. Returns NULL if the device
c506653d
ED
939 * is not found or a pointer to the device.
940 * The caller must hold RCU or RTNL.
941666c2 941 * The returned device has not had its ref count increased
1da177e4
LT
942 * and the caller must therefore be careful about locking
943 *
1da177e4
LT
944 */
945
941666c2
ED
946struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
947 const char *ha)
1da177e4
LT
948{
949 struct net_device *dev;
950
941666c2 951 for_each_netdev_rcu(net, dev)
1da177e4
LT
952 if (dev->type == type &&
953 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
954 return dev;
955
956 return NULL;
1da177e4 957}
941666c2 958EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 959
881d966b 960struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
961{
962 struct net_device *dev;
963
4e9cac2b 964 ASSERT_RTNL();
881d966b 965 for_each_netdev(net, dev)
4e9cac2b 966 if (dev->type == type)
7562f876
PE
967 return dev;
968
969 return NULL;
4e9cac2b 970}
4e9cac2b
PM
971EXPORT_SYMBOL(__dev_getfirstbyhwtype);
972
881d966b 973struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 974{
99fe3c39 975 struct net_device *dev, *ret = NULL;
4e9cac2b 976
99fe3c39
ED
977 rcu_read_lock();
978 for_each_netdev_rcu(net, dev)
979 if (dev->type == type) {
980 dev_hold(dev);
981 ret = dev;
982 break;
983 }
984 rcu_read_unlock();
985 return ret;
1da177e4 986}
1da177e4
LT
987EXPORT_SYMBOL(dev_getfirstbyhwtype);
988
989/**
6c555490 990 * __dev_get_by_flags - find any device with given flags
c4ea43c5 991 * @net: the applicable net namespace
1da177e4
LT
992 * @if_flags: IFF_* values
993 * @mask: bitmask of bits in if_flags to check
994 *
995 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04 996 * is not found or a pointer to the device. Must be called inside
6c555490 997 * rtnl_lock(), and result refcount is unchanged.
1da177e4
LT
998 */
999
6c555490
WC
1000struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1001 unsigned short mask)
1da177e4 1002{
7562f876 1003 struct net_device *dev, *ret;
1da177e4 1004
6c555490
WC
1005 ASSERT_RTNL();
1006
7562f876 1007 ret = NULL;
6c555490 1008 for_each_netdev(net, dev) {
1da177e4 1009 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 1010 ret = dev;
1da177e4
LT
1011 break;
1012 }
1013 }
7562f876 1014 return ret;
1da177e4 1015}
6c555490 1016EXPORT_SYMBOL(__dev_get_by_flags);
1da177e4
LT
1017
1018/**
1019 * dev_valid_name - check if name is okay for network device
1020 * @name: name string
1021 *
1022 * Network device names need to be valid file names to
c7fa9d18
DM
1023 * to allow sysfs to work. We also disallow any kind of
1024 * whitespace.
1da177e4 1025 */
95f050bf 1026bool dev_valid_name(const char *name)
1da177e4 1027{
c7fa9d18 1028 if (*name == '\0')
95f050bf 1029 return false;
b6fe17d6 1030 if (strlen(name) >= IFNAMSIZ)
95f050bf 1031 return false;
c7fa9d18 1032 if (!strcmp(name, ".") || !strcmp(name, ".."))
95f050bf 1033 return false;
c7fa9d18
DM
1034
1035 while (*name) {
a4176a93 1036 if (*name == '/' || *name == ':' || isspace(*name))
95f050bf 1037 return false;
c7fa9d18
DM
1038 name++;
1039 }
95f050bf 1040 return true;
1da177e4 1041}
d1b19dff 1042EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
1043
1044/**
b267b179
EB
1045 * __dev_alloc_name - allocate a name for a device
1046 * @net: network namespace to allocate the device name in
1da177e4 1047 * @name: name format string
b267b179 1048 * @buf: scratch buffer and result name string
1da177e4
LT
1049 *
1050 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
1051 * id. It scans list of devices to build up a free map, then chooses
1052 * the first empty slot. The caller must hold the dev_base or rtnl lock
1053 * while allocating the name and adding the device in order to avoid
1054 * duplicates.
1055 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1056 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
1057 */
1058
b267b179 1059static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
1060{
1061 int i = 0;
1da177e4
LT
1062 const char *p;
1063 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 1064 unsigned long *inuse;
1da177e4
LT
1065 struct net_device *d;
1066
93809105
RV
1067 if (!dev_valid_name(name))
1068 return -EINVAL;
1069
51f299dd 1070 p = strchr(name, '%');
1da177e4
LT
1071 if (p) {
1072 /*
1073 * Verify the string as this thing may have come from
1074 * the user. There must be either one "%d" and no other "%"
1075 * characters.
1076 */
1077 if (p[1] != 'd' || strchr(p + 2, '%'))
1078 return -EINVAL;
1079
1080 /* Use one page as a bit array of possible slots */
cfcabdcc 1081 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
1082 if (!inuse)
1083 return -ENOMEM;
1084
881d966b 1085 for_each_netdev(net, d) {
1da177e4
LT
1086 if (!sscanf(d->name, name, &i))
1087 continue;
1088 if (i < 0 || i >= max_netdevices)
1089 continue;
1090
1091 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 1092 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
1093 if (!strncmp(buf, d->name, IFNAMSIZ))
1094 set_bit(i, inuse);
1095 }
1096
1097 i = find_first_zero_bit(inuse, max_netdevices);
1098 free_page((unsigned long) inuse);
1099 }
1100
6224abda 1101 snprintf(buf, IFNAMSIZ, name, i);
b267b179 1102 if (!__dev_get_by_name(net, buf))
1da177e4 1103 return i;
1da177e4
LT
1104
1105 /* It is possible to run out of possible slots
1106 * when the name is long and there isn't enough space left
1107 * for the digits, or if all bits are used.
1108 */
d6f295e9 1109 return p ? -ENFILE : -EEXIST;
1da177e4
LT
1110}
1111
2c88b855
RV
1112static int dev_alloc_name_ns(struct net *net,
1113 struct net_device *dev,
1114 const char *name)
1115{
1116 char buf[IFNAMSIZ];
1117 int ret;
1118
c46d7642 1119 BUG_ON(!net);
2c88b855
RV
1120 ret = __dev_alloc_name(net, name, buf);
1121 if (ret >= 0)
1122 strlcpy(dev->name, buf, IFNAMSIZ);
1123 return ret;
1da177e4
LT
1124}
1125
b267b179
EB
1126/**
1127 * dev_alloc_name - allocate a name for a device
1128 * @dev: device
1129 * @name: name format string
1130 *
1131 * Passed a format string - eg "lt%d" it will try and find a suitable
1132 * id. It scans list of devices to build up a free map, then chooses
1133 * the first empty slot. The caller must hold the dev_base or rtnl lock
1134 * while allocating the name and adding the device in order to avoid
1135 * duplicates.
1136 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1137 * Returns the number of the unit assigned or a negative errno code.
1138 */
1139
1140int dev_alloc_name(struct net_device *dev, const char *name)
1141{
c46d7642 1142 return dev_alloc_name_ns(dev_net(dev), dev, name);
b267b179 1143}
d1b19dff 1144EXPORT_SYMBOL(dev_alloc_name);
b267b179 1145
0ad646c8
CW
1146int dev_get_valid_name(struct net *net, struct net_device *dev,
1147 const char *name)
828de4f6 1148{
87c320e5 1149 return dev_alloc_name_ns(net, dev, name);
d9031024 1150}
0ad646c8 1151EXPORT_SYMBOL(dev_get_valid_name);
1da177e4
LT
1152
1153/**
1154 * dev_change_name - change name of a device
1155 * @dev: device
1156 * @newname: name (or format string) must be at least IFNAMSIZ
1157 *
1158 * Change name of a device, can pass format strings "eth%d".
1159 * for wildcarding.
1160 */
cf04a4c7 1161int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 1162{
238fa362 1163 unsigned char old_assign_type;
fcc5a03a 1164 char oldname[IFNAMSIZ];
1da177e4 1165 int err = 0;
fcc5a03a 1166 int ret;
881d966b 1167 struct net *net;
1da177e4
LT
1168
1169 ASSERT_RTNL();
c346dca1 1170 BUG_ON(!dev_net(dev));
1da177e4 1171
c346dca1 1172 net = dev_net(dev);
1da177e4
LT
1173 if (dev->flags & IFF_UP)
1174 return -EBUSY;
1175
30e6c9fa 1176 write_seqcount_begin(&devnet_rename_seq);
c91f6df2
BH
1177
1178 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
30e6c9fa 1179 write_seqcount_end(&devnet_rename_seq);
c8d90dca 1180 return 0;
c91f6df2 1181 }
c8d90dca 1182
fcc5a03a
HX
1183 memcpy(oldname, dev->name, IFNAMSIZ);
1184
828de4f6 1185 err = dev_get_valid_name(net, dev, newname);
c91f6df2 1186 if (err < 0) {
30e6c9fa 1187 write_seqcount_end(&devnet_rename_seq);
d9031024 1188 return err;
c91f6df2 1189 }
1da177e4 1190
6fe82a39
VF
1191 if (oldname[0] && !strchr(oldname, '%'))
1192 netdev_info(dev, "renamed from %s\n", oldname);
1193
238fa362
TG
1194 old_assign_type = dev->name_assign_type;
1195 dev->name_assign_type = NET_NAME_RENAMED;
1196
fcc5a03a 1197rollback:
a1b3f594
EB
1198 ret = device_rename(&dev->dev, dev->name);
1199 if (ret) {
1200 memcpy(dev->name, oldname, IFNAMSIZ);
238fa362 1201 dev->name_assign_type = old_assign_type;
30e6c9fa 1202 write_seqcount_end(&devnet_rename_seq);
a1b3f594 1203 return ret;
dcc99773 1204 }
7f988eab 1205
30e6c9fa 1206 write_seqcount_end(&devnet_rename_seq);
c91f6df2 1207
5bb025fa
VF
1208 netdev_adjacent_rename_links(dev, oldname);
1209
7f988eab 1210 write_lock_bh(&dev_base_lock);
372b2312 1211 hlist_del_rcu(&dev->name_hlist);
72c9528b
ED
1212 write_unlock_bh(&dev_base_lock);
1213
1214 synchronize_rcu();
1215
1216 write_lock_bh(&dev_base_lock);
1217 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1218 write_unlock_bh(&dev_base_lock);
1219
056925ab 1220 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1221 ret = notifier_to_errno(ret);
1222
1223 if (ret) {
91e9c07b
ED
1224 /* err >= 0 after dev_alloc_name() or stores the first errno */
1225 if (err >= 0) {
fcc5a03a 1226 err = ret;
30e6c9fa 1227 write_seqcount_begin(&devnet_rename_seq);
fcc5a03a 1228 memcpy(dev->name, oldname, IFNAMSIZ);
5bb025fa 1229 memcpy(oldname, newname, IFNAMSIZ);
238fa362
TG
1230 dev->name_assign_type = old_assign_type;
1231 old_assign_type = NET_NAME_RENAMED;
fcc5a03a 1232 goto rollback;
91e9c07b 1233 } else {
7b6cd1ce 1234 pr_err("%s: name change rollback failed: %d\n",
91e9c07b 1235 dev->name, ret);
fcc5a03a
HX
1236 }
1237 }
1da177e4
LT
1238
1239 return err;
1240}
1241
0b815a1a
SH
1242/**
1243 * dev_set_alias - change ifalias of a device
1244 * @dev: device
1245 * @alias: name up to IFALIASZ
f0db275a 1246 * @len: limit of bytes to copy from info
0b815a1a
SH
1247 *
1248 * Set ifalias for a device,
1249 */
1250int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1251{
6c557001 1252 struct dev_ifalias *new_alias = NULL;
0b815a1a
SH
1253
1254 if (len >= IFALIASZ)
1255 return -EINVAL;
1256
6c557001
FW
1257 if (len) {
1258 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1259 if (!new_alias)
1260 return -ENOMEM;
1261
1262 memcpy(new_alias->ifalias, alias, len);
1263 new_alias->ifalias[len] = 0;
96ca4a2c
OH
1264 }
1265
6c557001
FW
1266 mutex_lock(&ifalias_mutex);
1267 rcu_swap_protected(dev->ifalias, new_alias,
1268 mutex_is_locked(&ifalias_mutex));
1269 mutex_unlock(&ifalias_mutex);
1270
1271 if (new_alias)
1272 kfree_rcu(new_alias, rcuhead);
0b815a1a 1273
0b815a1a
SH
1274 return len;
1275}
1276
6c557001
FW
1277/**
1278 * dev_get_alias - get ifalias of a device
1279 * @dev: device
20e88320 1280 * @name: buffer to store name of ifalias
6c557001
FW
1281 * @len: size of buffer
1282 *
1283 * get ifalias for a device. Caller must make sure dev cannot go
1284 * away, e.g. rcu read lock or own a reference count to device.
1285 */
1286int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1287{
1288 const struct dev_ifalias *alias;
1289 int ret = 0;
1290
1291 rcu_read_lock();
1292 alias = rcu_dereference(dev->ifalias);
1293 if (alias)
1294 ret = snprintf(name, len, "%s", alias->ifalias);
1295 rcu_read_unlock();
1296
1297 return ret;
1298}
0b815a1a 1299
d8a33ac4 1300/**
3041a069 1301 * netdev_features_change - device changes features
d8a33ac4
SH
1302 * @dev: device to cause notification
1303 *
1304 * Called to indicate a device has changed features.
1305 */
1306void netdev_features_change(struct net_device *dev)
1307{
056925ab 1308 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1309}
1310EXPORT_SYMBOL(netdev_features_change);
1311
1da177e4
LT
1312/**
1313 * netdev_state_change - device changes state
1314 * @dev: device to cause notification
1315 *
1316 * Called to indicate a device has changed state. This function calls
1317 * the notifier chains for netdev_chain and sends a NEWLINK message
1318 * to the routing socket.
1319 */
1320void netdev_state_change(struct net_device *dev)
1321{
1322 if (dev->flags & IFF_UP) {
51d0c047
DA
1323 struct netdev_notifier_change_info change_info = {
1324 .info.dev = dev,
1325 };
54951194 1326
51d0c047 1327 call_netdevice_notifiers_info(NETDEV_CHANGE,
54951194 1328 &change_info.info);
7f294054 1329 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1da177e4
LT
1330 }
1331}
d1b19dff 1332EXPORT_SYMBOL(netdev_state_change);
1da177e4 1333
ee89bab1 1334/**
722c9a0c 1335 * netdev_notify_peers - notify network peers about existence of @dev
1336 * @dev: network device
ee89bab1
AW
1337 *
1338 * Generate traffic such that interested network peers are aware of
1339 * @dev, such as by generating a gratuitous ARP. This may be used when
1340 * a device wants to inform the rest of the network about some sort of
1341 * reconfiguration such as a failover event or virtual machine
1342 * migration.
1343 */
1344void netdev_notify_peers(struct net_device *dev)
c1da4ac7 1345{
ee89bab1
AW
1346 rtnl_lock();
1347 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
37c343b4 1348 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
ee89bab1 1349 rtnl_unlock();
c1da4ac7 1350}
ee89bab1 1351EXPORT_SYMBOL(netdev_notify_peers);
c1da4ac7 1352
bd380811 1353static int __dev_open(struct net_device *dev)
1da177e4 1354{
d314774c 1355 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1356 int ret;
1da177e4 1357
e46b66bc
BH
1358 ASSERT_RTNL();
1359
1da177e4
LT
1360 if (!netif_device_present(dev))
1361 return -ENODEV;
1362
ca99ca14
NH
1363 /* Block netpoll from trying to do any rx path servicing.
1364 * If we don't do this there is a chance ndo_poll_controller
1365 * or ndo_poll may be running while we open the device
1366 */
66b5552f 1367 netpoll_poll_disable(dev);
ca99ca14 1368
3b8bcfd5
JB
1369 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1370 ret = notifier_to_errno(ret);
1371 if (ret)
1372 return ret;
1373
1da177e4 1374 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1375
d314774c
SH
1376 if (ops->ndo_validate_addr)
1377 ret = ops->ndo_validate_addr(dev);
bada339b 1378
d314774c
SH
1379 if (!ret && ops->ndo_open)
1380 ret = ops->ndo_open(dev);
1da177e4 1381
66b5552f 1382 netpoll_poll_enable(dev);
ca99ca14 1383
bada339b
JG
1384 if (ret)
1385 clear_bit(__LINK_STATE_START, &dev->state);
1386 else {
1da177e4 1387 dev->flags |= IFF_UP;
4417da66 1388 dev_set_rx_mode(dev);
1da177e4 1389 dev_activate(dev);
7bf23575 1390 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 1391 }
bada339b 1392
1da177e4
LT
1393 return ret;
1394}
1395
1396/**
bd380811
PM
1397 * dev_open - prepare an interface for use.
1398 * @dev: device to open
1da177e4 1399 *
bd380811
PM
1400 * Takes a device from down to up state. The device's private open
1401 * function is invoked and then the multicast lists are loaded. Finally
1402 * the device is moved into the up state and a %NETDEV_UP message is
1403 * sent to the netdev notifier chain.
1404 *
1405 * Calling this function on an active interface is a nop. On a failure
1406 * a negative errno code is returned.
1da177e4 1407 */
bd380811
PM
1408int dev_open(struct net_device *dev)
1409{
1410 int ret;
1411
bd380811
PM
1412 if (dev->flags & IFF_UP)
1413 return 0;
1414
bd380811
PM
1415 ret = __dev_open(dev);
1416 if (ret < 0)
1417 return ret;
1418
7f294054 1419 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
bd380811
PM
1420 call_netdevice_notifiers(NETDEV_UP, dev);
1421
1422 return ret;
1423}
1424EXPORT_SYMBOL(dev_open);
1425
7051b88a 1426static void __dev_close_many(struct list_head *head)
1da177e4 1427{
44345724 1428 struct net_device *dev;
e46b66bc 1429
bd380811 1430 ASSERT_RTNL();
9d5010db
DM
1431 might_sleep();
1432
5cde2829 1433 list_for_each_entry(dev, head, close_list) {
3f4df206 1434 /* Temporarily disable netpoll until the interface is down */
66b5552f 1435 netpoll_poll_disable(dev);
3f4df206 1436
44345724 1437 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1438
44345724 1439 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1440
44345724
OP
1441 /* Synchronize to scheduled poll. We cannot touch poll list, it
1442 * can be even on different cpu. So just clear netif_running().
1443 *
1444 * dev->stop() will invoke napi_disable() on all of it's
1445 * napi_struct instances on this device.
1446 */
4e857c58 1447 smp_mb__after_atomic(); /* Commit netif_running(). */
44345724 1448 }
1da177e4 1449
44345724 1450 dev_deactivate_many(head);
d8b2a4d2 1451
5cde2829 1452 list_for_each_entry(dev, head, close_list) {
44345724 1453 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1454
44345724
OP
1455 /*
1456 * Call the device specific close. This cannot fail.
1457 * Only if device is UP
1458 *
1459 * We allow it to be called even after a DETACH hot-plug
1460 * event.
1461 */
1462 if (ops->ndo_stop)
1463 ops->ndo_stop(dev);
1464
44345724 1465 dev->flags &= ~IFF_UP;
66b5552f 1466 netpoll_poll_enable(dev);
44345724 1467 }
44345724
OP
1468}
1469
7051b88a 1470static void __dev_close(struct net_device *dev)
44345724
OP
1471{
1472 LIST_HEAD(single);
1473
5cde2829 1474 list_add(&dev->close_list, &single);
7051b88a 1475 __dev_close_many(&single);
f87e6f47 1476 list_del(&single);
44345724
OP
1477}
1478
7051b88a 1479void dev_close_many(struct list_head *head, bool unlink)
44345724
OP
1480{
1481 struct net_device *dev, *tmp;
1da177e4 1482
5cde2829
EB
1483 /* Remove the devices that don't need to be closed */
1484 list_for_each_entry_safe(dev, tmp, head, close_list)
44345724 1485 if (!(dev->flags & IFF_UP))
5cde2829 1486 list_del_init(&dev->close_list);
44345724
OP
1487
1488 __dev_close_many(head);
1da177e4 1489
5cde2829 1490 list_for_each_entry_safe(dev, tmp, head, close_list) {
7f294054 1491 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
44345724 1492 call_netdevice_notifiers(NETDEV_DOWN, dev);
99c4a26a
DM
1493 if (unlink)
1494 list_del_init(&dev->close_list);
44345724 1495 }
bd380811 1496}
99c4a26a 1497EXPORT_SYMBOL(dev_close_many);
bd380811
PM
1498
1499/**
1500 * dev_close - shutdown an interface.
1501 * @dev: device to shutdown
1502 *
1503 * This function moves an active device into down state. A
1504 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1505 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1506 * chain.
1507 */
7051b88a 1508void dev_close(struct net_device *dev)
bd380811 1509{
e14a5993
ED
1510 if (dev->flags & IFF_UP) {
1511 LIST_HEAD(single);
1da177e4 1512
5cde2829 1513 list_add(&dev->close_list, &single);
99c4a26a 1514 dev_close_many(&single, true);
e14a5993
ED
1515 list_del(&single);
1516 }
1da177e4 1517}
d1b19dff 1518EXPORT_SYMBOL(dev_close);
1da177e4
LT
1519
1520
0187bdfb
BH
1521/**
1522 * dev_disable_lro - disable Large Receive Offload on a device
1523 * @dev: device
1524 *
1525 * Disable Large Receive Offload (LRO) on a net device. Must be
1526 * called under RTNL. This is needed if received packets may be
1527 * forwarded to another interface.
1528 */
1529void dev_disable_lro(struct net_device *dev)
1530{
fbe168ba
MK
1531 struct net_device *lower_dev;
1532 struct list_head *iter;
529d0489 1533
bc5787c6
MM
1534 dev->wanted_features &= ~NETIF_F_LRO;
1535 netdev_update_features(dev);
27660515 1536
22d5969f
MM
1537 if (unlikely(dev->features & NETIF_F_LRO))
1538 netdev_WARN(dev, "failed to disable LRO!\n");
fbe168ba
MK
1539
1540 netdev_for_each_lower_dev(dev, lower_dev, iter)
1541 dev_disable_lro(lower_dev);
0187bdfb
BH
1542}
1543EXPORT_SYMBOL(dev_disable_lro);
1544
351638e7
JP
1545static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1546 struct net_device *dev)
1547{
51d0c047
DA
1548 struct netdev_notifier_info info = {
1549 .dev = dev,
1550 };
351638e7 1551
351638e7
JP
1552 return nb->notifier_call(nb, val, &info);
1553}
0187bdfb 1554
881d966b
EB
1555static int dev_boot_phase = 1;
1556
1da177e4 1557/**
722c9a0c 1558 * register_netdevice_notifier - register a network notifier block
1559 * @nb: notifier
1da177e4 1560 *
722c9a0c 1561 * Register a notifier to be called when network device events occur.
1562 * The notifier passed is linked into the kernel structures and must
1563 * not be reused until it has been unregistered. A negative errno code
1564 * is returned on a failure.
1da177e4 1565 *
722c9a0c 1566 * When registered all registration and up events are replayed
1567 * to the new notifier to allow device to have a race free
1568 * view of the network device list.
1da177e4
LT
1569 */
1570
1571int register_netdevice_notifier(struct notifier_block *nb)
1572{
1573 struct net_device *dev;
fcc5a03a 1574 struct net_device *last;
881d966b 1575 struct net *net;
1da177e4
LT
1576 int err;
1577
1578 rtnl_lock();
f07d5b94 1579 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1580 if (err)
1581 goto unlock;
881d966b
EB
1582 if (dev_boot_phase)
1583 goto unlock;
1584 for_each_net(net) {
1585 for_each_netdev(net, dev) {
351638e7 1586 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
881d966b
EB
1587 err = notifier_to_errno(err);
1588 if (err)
1589 goto rollback;
1590
1591 if (!(dev->flags & IFF_UP))
1592 continue;
1da177e4 1593
351638e7 1594 call_netdevice_notifier(nb, NETDEV_UP, dev);
881d966b 1595 }
1da177e4 1596 }
fcc5a03a
HX
1597
1598unlock:
1da177e4
LT
1599 rtnl_unlock();
1600 return err;
fcc5a03a
HX
1601
1602rollback:
1603 last = dev;
881d966b
EB
1604 for_each_net(net) {
1605 for_each_netdev(net, dev) {
1606 if (dev == last)
8f891489 1607 goto outroll;
fcc5a03a 1608
881d966b 1609 if (dev->flags & IFF_UP) {
351638e7
JP
1610 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1611 dev);
1612 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
881d966b 1613 }
351638e7 1614 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
fcc5a03a 1615 }
fcc5a03a 1616 }
c67625a1 1617
8f891489 1618outroll:
c67625a1 1619 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1620 goto unlock;
1da177e4 1621}
d1b19dff 1622EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1623
1624/**
722c9a0c 1625 * unregister_netdevice_notifier - unregister a network notifier block
1626 * @nb: notifier
1da177e4 1627 *
722c9a0c 1628 * Unregister a notifier previously registered by
1629 * register_netdevice_notifier(). The notifier is unlinked into the
1630 * kernel structures and may then be reused. A negative errno code
1631 * is returned on a failure.
7d3d43da 1632 *
722c9a0c 1633 * After unregistering unregister and down device events are synthesized
1634 * for all devices on the device list to the removed notifier to remove
1635 * the need for special case cleanup code.
1da177e4
LT
1636 */
1637
1638int unregister_netdevice_notifier(struct notifier_block *nb)
1639{
7d3d43da
EB
1640 struct net_device *dev;
1641 struct net *net;
9f514950
HX
1642 int err;
1643
1644 rtnl_lock();
f07d5b94 1645 err = raw_notifier_chain_unregister(&netdev_chain, nb);
7d3d43da
EB
1646 if (err)
1647 goto unlock;
1648
1649 for_each_net(net) {
1650 for_each_netdev(net, dev) {
1651 if (dev->flags & IFF_UP) {
351638e7
JP
1652 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1653 dev);
1654 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
7d3d43da 1655 }
351638e7 1656 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
7d3d43da
EB
1657 }
1658 }
1659unlock:
9f514950
HX
1660 rtnl_unlock();
1661 return err;
1da177e4 1662}
d1b19dff 1663EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4 1664
351638e7
JP
1665/**
1666 * call_netdevice_notifiers_info - call all network notifier blocks
1667 * @val: value passed unmodified to notifier function
1668 * @dev: net_device pointer passed unmodified to notifier function
1669 * @info: notifier information data
1670 *
1671 * Call all network notifier blocks. Parameters and return value
1672 * are as for raw_notifier_call_chain().
1673 */
1674
1d143d9f 1675static int call_netdevice_notifiers_info(unsigned long val,
1d143d9f 1676 struct netdev_notifier_info *info)
351638e7
JP
1677{
1678 ASSERT_RTNL();
351638e7
JP
1679 return raw_notifier_call_chain(&netdev_chain, val, info);
1680}
351638e7 1681
1da177e4
LT
1682/**
1683 * call_netdevice_notifiers - call all network notifier blocks
1684 * @val: value passed unmodified to notifier function
c4ea43c5 1685 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1686 *
1687 * Call all network notifier blocks. Parameters and return value
f07d5b94 1688 * are as for raw_notifier_call_chain().
1da177e4
LT
1689 */
1690
ad7379d4 1691int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1692{
51d0c047
DA
1693 struct netdev_notifier_info info = {
1694 .dev = dev,
1695 };
351638e7 1696
51d0c047 1697 return call_netdevice_notifiers_info(val, &info);
1da177e4 1698}
edf947f1 1699EXPORT_SYMBOL(call_netdevice_notifiers);
1da177e4 1700
1cf51900 1701#ifdef CONFIG_NET_INGRESS
4577139b
DB
1702static struct static_key ingress_needed __read_mostly;
1703
1704void net_inc_ingress_queue(void)
1705{
1706 static_key_slow_inc(&ingress_needed);
1707}
1708EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1709
1710void net_dec_ingress_queue(void)
1711{
1712 static_key_slow_dec(&ingress_needed);
1713}
1714EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1715#endif
1716
1f211a1b
DB
1717#ifdef CONFIG_NET_EGRESS
1718static struct static_key egress_needed __read_mostly;
1719
1720void net_inc_egress_queue(void)
1721{
1722 static_key_slow_inc(&egress_needed);
1723}
1724EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1725
1726void net_dec_egress_queue(void)
1727{
1728 static_key_slow_dec(&egress_needed);
1729}
1730EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1731#endif
1732
c5905afb 1733static struct static_key netstamp_needed __read_mostly;
b90e5794 1734#ifdef HAVE_JUMP_LABEL
b90e5794 1735static atomic_t netstamp_needed_deferred;
13baa00a 1736static atomic_t netstamp_wanted;
5fa8bbda 1737static void netstamp_clear(struct work_struct *work)
1da177e4 1738{
b90e5794 1739 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
13baa00a 1740 int wanted;
b90e5794 1741
13baa00a
ED
1742 wanted = atomic_add_return(deferred, &netstamp_wanted);
1743 if (wanted > 0)
1744 static_key_enable(&netstamp_needed);
1745 else
1746 static_key_disable(&netstamp_needed);
5fa8bbda
ED
1747}
1748static DECLARE_WORK(netstamp_work, netstamp_clear);
b90e5794 1749#endif
5fa8bbda
ED
1750
1751void net_enable_timestamp(void)
1752{
13baa00a
ED
1753#ifdef HAVE_JUMP_LABEL
1754 int wanted;
1755
1756 while (1) {
1757 wanted = atomic_read(&netstamp_wanted);
1758 if (wanted <= 0)
1759 break;
1760 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1761 return;
1762 }
1763 atomic_inc(&netstamp_needed_deferred);
1764 schedule_work(&netstamp_work);
1765#else
c5905afb 1766 static_key_slow_inc(&netstamp_needed);
13baa00a 1767#endif
1da177e4 1768}
d1b19dff 1769EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1770
1771void net_disable_timestamp(void)
1772{
b90e5794 1773#ifdef HAVE_JUMP_LABEL
13baa00a
ED
1774 int wanted;
1775
1776 while (1) {
1777 wanted = atomic_read(&netstamp_wanted);
1778 if (wanted <= 1)
1779 break;
1780 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1781 return;
1782 }
1783 atomic_dec(&netstamp_needed_deferred);
5fa8bbda
ED
1784 schedule_work(&netstamp_work);
1785#else
c5905afb 1786 static_key_slow_dec(&netstamp_needed);
5fa8bbda 1787#endif
1da177e4 1788}
d1b19dff 1789EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1790
3b098e2d 1791static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4 1792{
2456e855 1793 skb->tstamp = 0;
c5905afb 1794 if (static_key_false(&netstamp_needed))
a61bbcf2 1795 __net_timestamp(skb);
1da177e4
LT
1796}
1797
588f0330 1798#define net_timestamp_check(COND, SKB) \
c5905afb 1799 if (static_key_false(&netstamp_needed)) { \
2456e855 1800 if ((COND) && !(SKB)->tstamp) \
588f0330
ED
1801 __net_timestamp(SKB); \
1802 } \
3b098e2d 1803
f4b05d27 1804bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
79b569f0
DL
1805{
1806 unsigned int len;
1807
1808 if (!(dev->flags & IFF_UP))
1809 return false;
1810
1811 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1812 if (skb->len <= len)
1813 return true;
1814
1815 /* if TSO is enabled, we don't care about the length as the packet
1816 * could be forwarded without being segmented before
1817 */
1818 if (skb_is_gso(skb))
1819 return true;
1820
1821 return false;
1822}
1ee481fb 1823EXPORT_SYMBOL_GPL(is_skb_forwardable);
79b569f0 1824
a0265d28
HX
1825int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1826{
4e3264d2 1827 int ret = ____dev_forward_skb(dev, skb);
a0265d28 1828
4e3264d2
MKL
1829 if (likely(!ret)) {
1830 skb->protocol = eth_type_trans(skb, dev);
1831 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1832 }
a0265d28 1833
4e3264d2 1834 return ret;
a0265d28
HX
1835}
1836EXPORT_SYMBOL_GPL(__dev_forward_skb);
1837
44540960
AB
1838/**
1839 * dev_forward_skb - loopback an skb to another netif
1840 *
1841 * @dev: destination network device
1842 * @skb: buffer to forward
1843 *
1844 * return values:
1845 * NET_RX_SUCCESS (no congestion)
6ec82562 1846 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1847 *
1848 * dev_forward_skb can be used for injecting an skb from the
1849 * start_xmit function of one device into the receive queue
1850 * of another device.
1851 *
1852 * The receiving device may be in another namespace, so
1853 * we have to clear all information in the skb that could
1854 * impact namespace isolation.
1855 */
1856int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1857{
a0265d28 1858 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
44540960
AB
1859}
1860EXPORT_SYMBOL_GPL(dev_forward_skb);
1861
71d9dec2
CG
1862static inline int deliver_skb(struct sk_buff *skb,
1863 struct packet_type *pt_prev,
1864 struct net_device *orig_dev)
1865{
1f8b977a 1866 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1080e512 1867 return -ENOMEM;
63354797 1868 refcount_inc(&skb->users);
71d9dec2
CG
1869 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1870}
1871
7866a621
SN
1872static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1873 struct packet_type **pt,
fbcb2170
JP
1874 struct net_device *orig_dev,
1875 __be16 type,
7866a621
SN
1876 struct list_head *ptype_list)
1877{
1878 struct packet_type *ptype, *pt_prev = *pt;
1879
1880 list_for_each_entry_rcu(ptype, ptype_list, list) {
1881 if (ptype->type != type)
1882 continue;
1883 if (pt_prev)
fbcb2170 1884 deliver_skb(skb, pt_prev, orig_dev);
7866a621
SN
1885 pt_prev = ptype;
1886 }
1887 *pt = pt_prev;
1888}
1889
c0de08d0
EL
1890static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1891{
a3d744e9 1892 if (!ptype->af_packet_priv || !skb->sk)
c0de08d0
EL
1893 return false;
1894
1895 if (ptype->id_match)
1896 return ptype->id_match(ptype, skb->sk);
1897 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1898 return true;
1899
1900 return false;
1901}
1902
1da177e4
LT
1903/*
1904 * Support routine. Sends outgoing frames to any network
1905 * taps currently in use.
1906 */
1907
74b20582 1908void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1909{
1910 struct packet_type *ptype;
71d9dec2
CG
1911 struct sk_buff *skb2 = NULL;
1912 struct packet_type *pt_prev = NULL;
7866a621 1913 struct list_head *ptype_list = &ptype_all;
a61bbcf2 1914
1da177e4 1915 rcu_read_lock();
7866a621
SN
1916again:
1917 list_for_each_entry_rcu(ptype, ptype_list, list) {
1da177e4
LT
1918 /* Never send packets back to the socket
1919 * they originated from - MvS (miquels@drinkel.ow.org)
1920 */
7866a621
SN
1921 if (skb_loop_sk(ptype, skb))
1922 continue;
71d9dec2 1923
7866a621
SN
1924 if (pt_prev) {
1925 deliver_skb(skb2, pt_prev, skb->dev);
1926 pt_prev = ptype;
1927 continue;
1928 }
1da177e4 1929
7866a621
SN
1930 /* need to clone skb, done only once */
1931 skb2 = skb_clone(skb, GFP_ATOMIC);
1932 if (!skb2)
1933 goto out_unlock;
70978182 1934
7866a621 1935 net_timestamp_set(skb2);
1da177e4 1936
7866a621
SN
1937 /* skb->nh should be correctly
1938 * set by sender, so that the second statement is
1939 * just protection against buggy protocols.
1940 */
1941 skb_reset_mac_header(skb2);
1942
1943 if (skb_network_header(skb2) < skb2->data ||
1944 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1945 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1946 ntohs(skb2->protocol),
1947 dev->name);
1948 skb_reset_network_header(skb2);
1da177e4 1949 }
7866a621
SN
1950
1951 skb2->transport_header = skb2->network_header;
1952 skb2->pkt_type = PACKET_OUTGOING;
1953 pt_prev = ptype;
1954 }
1955
1956 if (ptype_list == &ptype_all) {
1957 ptype_list = &dev->ptype_all;
1958 goto again;
1da177e4 1959 }
7866a621 1960out_unlock:
581fe0ea
WB
1961 if (pt_prev) {
1962 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
1963 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1964 else
1965 kfree_skb(skb2);
1966 }
1da177e4
LT
1967 rcu_read_unlock();
1968}
74b20582 1969EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
1da177e4 1970
2c53040f
BH
1971/**
1972 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
4f57c087
JF
1973 * @dev: Network device
1974 * @txq: number of queues available
1975 *
1976 * If real_num_tx_queues is changed the tc mappings may no longer be
1977 * valid. To resolve this verify the tc mapping remains valid and if
1978 * not NULL the mapping. With no priorities mapping to this
1979 * offset/count pair it will no longer be used. In the worst case TC0
1980 * is invalid nothing can be done so disable priority mappings. If is
1981 * expected that drivers will fix this mapping if they can before
1982 * calling netif_set_real_num_tx_queues.
1983 */
bb134d22 1984static void netif_setup_tc(struct net_device *dev, unsigned int txq)
4f57c087
JF
1985{
1986 int i;
1987 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1988
1989 /* If TC0 is invalidated disable TC mapping */
1990 if (tc->offset + tc->count > txq) {
7b6cd1ce 1991 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
4f57c087
JF
1992 dev->num_tc = 0;
1993 return;
1994 }
1995
1996 /* Invalidated prio to tc mappings set to TC0 */
1997 for (i = 1; i < TC_BITMASK + 1; i++) {
1998 int q = netdev_get_prio_tc_map(dev, i);
1999
2000 tc = &dev->tc_to_txq[q];
2001 if (tc->offset + tc->count > txq) {
7b6cd1ce
JP
2002 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2003 i, q);
4f57c087
JF
2004 netdev_set_prio_tc_map(dev, i, 0);
2005 }
2006 }
2007}
2008
8d059b0f
AD
2009int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2010{
2011 if (dev->num_tc) {
2012 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2013 int i;
2014
2015 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2016 if ((txq - tc->offset) < tc->count)
2017 return i;
2018 }
2019
2020 return -1;
2021 }
2022
2023 return 0;
2024}
8a5f2166 2025EXPORT_SYMBOL(netdev_txq_to_tc);
8d059b0f 2026
537c00de
AD
2027#ifdef CONFIG_XPS
2028static DEFINE_MUTEX(xps_map_mutex);
2029#define xmap_dereference(P) \
2030 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2031
6234f874
AD
2032static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2033 int tci, u16 index)
537c00de 2034{
10cdc3f3
AD
2035 struct xps_map *map = NULL;
2036 int pos;
537c00de 2037
10cdc3f3 2038 if (dev_maps)
6234f874
AD
2039 map = xmap_dereference(dev_maps->cpu_map[tci]);
2040 if (!map)
2041 return false;
537c00de 2042
6234f874
AD
2043 for (pos = map->len; pos--;) {
2044 if (map->queues[pos] != index)
2045 continue;
2046
2047 if (map->len > 1) {
2048 map->queues[pos] = map->queues[--map->len];
10cdc3f3 2049 break;
537c00de 2050 }
6234f874
AD
2051
2052 RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
2053 kfree_rcu(map, rcu);
2054 return false;
537c00de
AD
2055 }
2056
6234f874 2057 return true;
10cdc3f3
AD
2058}
2059
6234f874
AD
2060static bool remove_xps_queue_cpu(struct net_device *dev,
2061 struct xps_dev_maps *dev_maps,
2062 int cpu, u16 offset, u16 count)
2063{
184c449f
AD
2064 int num_tc = dev->num_tc ? : 1;
2065 bool active = false;
2066 int tci;
6234f874 2067
184c449f
AD
2068 for (tci = cpu * num_tc; num_tc--; tci++) {
2069 int i, j;
2070
2071 for (i = count, j = offset; i--; j++) {
2072 if (!remove_xps_queue(dev_maps, cpu, j))
2073 break;
2074 }
2075
2076 active |= i < 0;
6234f874
AD
2077 }
2078
184c449f 2079 return active;
6234f874
AD
2080}
2081
2082static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2083 u16 count)
10cdc3f3
AD
2084{
2085 struct xps_dev_maps *dev_maps;
024e9679 2086 int cpu, i;
10cdc3f3
AD
2087 bool active = false;
2088
2089 mutex_lock(&xps_map_mutex);
2090 dev_maps = xmap_dereference(dev->xps_maps);
2091
2092 if (!dev_maps)
2093 goto out_no_maps;
2094
6234f874
AD
2095 for_each_possible_cpu(cpu)
2096 active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
2097 offset, count);
10cdc3f3
AD
2098
2099 if (!active) {
537c00de
AD
2100 RCU_INIT_POINTER(dev->xps_maps, NULL);
2101 kfree_rcu(dev_maps, rcu);
2102 }
2103
6234f874 2104 for (i = offset + (count - 1); count--; i--)
024e9679
AD
2105 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2106 NUMA_NO_NODE);
2107
537c00de
AD
2108out_no_maps:
2109 mutex_unlock(&xps_map_mutex);
2110}
2111
6234f874
AD
2112static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2113{
2114 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2115}
2116
01c5f864
AD
2117static struct xps_map *expand_xps_map(struct xps_map *map,
2118 int cpu, u16 index)
2119{
2120 struct xps_map *new_map;
2121 int alloc_len = XPS_MIN_MAP_ALLOC;
2122 int i, pos;
2123
2124 for (pos = 0; map && pos < map->len; pos++) {
2125 if (map->queues[pos] != index)
2126 continue;
2127 return map;
2128 }
2129
2130 /* Need to add queue to this CPU's existing map */
2131 if (map) {
2132 if (pos < map->alloc_len)
2133 return map;
2134
2135 alloc_len = map->alloc_len * 2;
2136 }
2137
2138 /* Need to allocate new map to store queue on this CPU's map */
2139 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2140 cpu_to_node(cpu));
2141 if (!new_map)
2142 return NULL;
2143
2144 for (i = 0; i < pos; i++)
2145 new_map->queues[i] = map->queues[i];
2146 new_map->alloc_len = alloc_len;
2147 new_map->len = pos;
2148
2149 return new_map;
2150}
2151
3573540c
MT
2152int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2153 u16 index)
537c00de 2154{
01c5f864 2155 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
184c449f
AD
2156 int i, cpu, tci, numa_node_id = -2;
2157 int maps_sz, num_tc = 1, tc = 0;
537c00de 2158 struct xps_map *map, *new_map;
01c5f864 2159 bool active = false;
537c00de 2160
184c449f
AD
2161 if (dev->num_tc) {
2162 num_tc = dev->num_tc;
2163 tc = netdev_txq_to_tc(dev, index);
2164 if (tc < 0)
2165 return -EINVAL;
2166 }
2167
2168 maps_sz = XPS_DEV_MAPS_SIZE(num_tc);
2169 if (maps_sz < L1_CACHE_BYTES)
2170 maps_sz = L1_CACHE_BYTES;
2171
537c00de
AD
2172 mutex_lock(&xps_map_mutex);
2173
2174 dev_maps = xmap_dereference(dev->xps_maps);
2175
01c5f864 2176 /* allocate memory for queue storage */
184c449f 2177 for_each_cpu_and(cpu, cpu_online_mask, mask) {
01c5f864
AD
2178 if (!new_dev_maps)
2179 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2bb60cb9
AD
2180 if (!new_dev_maps) {
2181 mutex_unlock(&xps_map_mutex);
01c5f864 2182 return -ENOMEM;
2bb60cb9 2183 }
01c5f864 2184
184c449f
AD
2185 tci = cpu * num_tc + tc;
2186 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) :
01c5f864
AD
2187 NULL;
2188
2189 map = expand_xps_map(map, cpu, index);
2190 if (!map)
2191 goto error;
2192
184c449f 2193 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
01c5f864
AD
2194 }
2195
2196 if (!new_dev_maps)
2197 goto out_no_new_maps;
2198
537c00de 2199 for_each_possible_cpu(cpu) {
184c449f
AD
2200 /* copy maps belonging to foreign traffic classes */
2201 for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) {
2202 /* fill in the new device map from the old device map */
2203 map = xmap_dereference(dev_maps->cpu_map[tci]);
2204 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2205 }
2206
2207 /* We need to explicitly update tci as prevous loop
2208 * could break out early if dev_maps is NULL.
2209 */
2210 tci = cpu * num_tc + tc;
2211
01c5f864
AD
2212 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2213 /* add queue to CPU maps */
2214 int pos = 0;
2215
184c449f 2216 map = xmap_dereference(new_dev_maps->cpu_map[tci]);
01c5f864
AD
2217 while ((pos < map->len) && (map->queues[pos] != index))
2218 pos++;
2219
2220 if (pos == map->len)
2221 map->queues[map->len++] = index;
537c00de 2222#ifdef CONFIG_NUMA
537c00de
AD
2223 if (numa_node_id == -2)
2224 numa_node_id = cpu_to_node(cpu);
2225 else if (numa_node_id != cpu_to_node(cpu))
2226 numa_node_id = -1;
537c00de 2227#endif
01c5f864
AD
2228 } else if (dev_maps) {
2229 /* fill in the new device map from the old device map */
184c449f
AD
2230 map = xmap_dereference(dev_maps->cpu_map[tci]);
2231 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
537c00de 2232 }
01c5f864 2233
184c449f
AD
2234 /* copy maps belonging to foreign traffic classes */
2235 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2236 /* fill in the new device map from the old device map */
2237 map = xmap_dereference(dev_maps->cpu_map[tci]);
2238 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2239 }
537c00de
AD
2240 }
2241
01c5f864
AD
2242 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2243
537c00de 2244 /* Cleanup old maps */
184c449f
AD
2245 if (!dev_maps)
2246 goto out_no_old_maps;
2247
2248 for_each_possible_cpu(cpu) {
2249 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2250 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2251 map = xmap_dereference(dev_maps->cpu_map[tci]);
01c5f864
AD
2252 if (map && map != new_map)
2253 kfree_rcu(map, rcu);
2254 }
537c00de
AD
2255 }
2256
184c449f
AD
2257 kfree_rcu(dev_maps, rcu);
2258
2259out_no_old_maps:
01c5f864
AD
2260 dev_maps = new_dev_maps;
2261 active = true;
537c00de 2262
01c5f864
AD
2263out_no_new_maps:
2264 /* update Tx queue numa node */
537c00de
AD
2265 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2266 (numa_node_id >= 0) ? numa_node_id :
2267 NUMA_NO_NODE);
2268
01c5f864
AD
2269 if (!dev_maps)
2270 goto out_no_maps;
2271
2272 /* removes queue from unused CPUs */
2273 for_each_possible_cpu(cpu) {
184c449f
AD
2274 for (i = tc, tci = cpu * num_tc; i--; tci++)
2275 active |= remove_xps_queue(dev_maps, tci, index);
2276 if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu))
2277 active |= remove_xps_queue(dev_maps, tci, index);
2278 for (i = num_tc - tc, tci++; --i; tci++)
2279 active |= remove_xps_queue(dev_maps, tci, index);
01c5f864
AD
2280 }
2281
2282 /* free map if not active */
2283 if (!active) {
2284 RCU_INIT_POINTER(dev->xps_maps, NULL);
2285 kfree_rcu(dev_maps, rcu);
2286 }
2287
2288out_no_maps:
537c00de
AD
2289 mutex_unlock(&xps_map_mutex);
2290
2291 return 0;
2292error:
01c5f864
AD
2293 /* remove any maps that we added */
2294 for_each_possible_cpu(cpu) {
184c449f
AD
2295 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2296 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2297 map = dev_maps ?
2298 xmap_dereference(dev_maps->cpu_map[tci]) :
2299 NULL;
2300 if (new_map && new_map != map)
2301 kfree(new_map);
2302 }
01c5f864
AD
2303 }
2304
537c00de
AD
2305 mutex_unlock(&xps_map_mutex);
2306
537c00de
AD
2307 kfree(new_dev_maps);
2308 return -ENOMEM;
2309}
2310EXPORT_SYMBOL(netif_set_xps_queue);
2311
2312#endif
9cf1f6a8
AD
2313void netdev_reset_tc(struct net_device *dev)
2314{
6234f874
AD
2315#ifdef CONFIG_XPS
2316 netif_reset_xps_queues_gt(dev, 0);
2317#endif
9cf1f6a8
AD
2318 dev->num_tc = 0;
2319 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2320 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2321}
2322EXPORT_SYMBOL(netdev_reset_tc);
2323
2324int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2325{
2326 if (tc >= dev->num_tc)
2327 return -EINVAL;
2328
6234f874
AD
2329#ifdef CONFIG_XPS
2330 netif_reset_xps_queues(dev, offset, count);
2331#endif
9cf1f6a8
AD
2332 dev->tc_to_txq[tc].count = count;
2333 dev->tc_to_txq[tc].offset = offset;
2334 return 0;
2335}
2336EXPORT_SYMBOL(netdev_set_tc_queue);
2337
2338int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2339{
2340 if (num_tc > TC_MAX_QUEUE)
2341 return -EINVAL;
2342
6234f874
AD
2343#ifdef CONFIG_XPS
2344 netif_reset_xps_queues_gt(dev, 0);
2345#endif
9cf1f6a8
AD
2346 dev->num_tc = num_tc;
2347 return 0;
2348}
2349EXPORT_SYMBOL(netdev_set_num_tc);
2350
f0796d5c
JF
2351/*
2352 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2353 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2354 */
e6484930 2355int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 2356{
1d24eb48
TH
2357 int rc;
2358
e6484930
TH
2359 if (txq < 1 || txq > dev->num_tx_queues)
2360 return -EINVAL;
f0796d5c 2361
5c56580b
BH
2362 if (dev->reg_state == NETREG_REGISTERED ||
2363 dev->reg_state == NETREG_UNREGISTERING) {
e6484930
TH
2364 ASSERT_RTNL();
2365
1d24eb48
TH
2366 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2367 txq);
bf264145
TH
2368 if (rc)
2369 return rc;
2370
4f57c087
JF
2371 if (dev->num_tc)
2372 netif_setup_tc(dev, txq);
2373
024e9679 2374 if (txq < dev->real_num_tx_queues) {
e6484930 2375 qdisc_reset_all_tx_gt(dev, txq);
024e9679
AD
2376#ifdef CONFIG_XPS
2377 netif_reset_xps_queues_gt(dev, txq);
2378#endif
2379 }
f0796d5c 2380 }
e6484930
TH
2381
2382 dev->real_num_tx_queues = txq;
2383 return 0;
f0796d5c
JF
2384}
2385EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 2386
a953be53 2387#ifdef CONFIG_SYSFS
62fe0b40
BH
2388/**
2389 * netif_set_real_num_rx_queues - set actual number of RX queues used
2390 * @dev: Network device
2391 * @rxq: Actual number of RX queues
2392 *
2393 * This must be called either with the rtnl_lock held or before
2394 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
2395 * negative error code. If called before registration, it always
2396 * succeeds.
62fe0b40
BH
2397 */
2398int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2399{
2400 int rc;
2401
bd25fa7b
TH
2402 if (rxq < 1 || rxq > dev->num_rx_queues)
2403 return -EINVAL;
2404
62fe0b40
BH
2405 if (dev->reg_state == NETREG_REGISTERED) {
2406 ASSERT_RTNL();
2407
62fe0b40
BH
2408 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2409 rxq);
2410 if (rc)
2411 return rc;
62fe0b40
BH
2412 }
2413
2414 dev->real_num_rx_queues = rxq;
2415 return 0;
2416}
2417EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2418#endif
2419
2c53040f
BH
2420/**
2421 * netif_get_num_default_rss_queues - default number of RSS queues
16917b87
YM
2422 *
2423 * This routine should set an upper limit on the number of RSS queues
2424 * used by default by multiqueue devices.
2425 */
a55b138b 2426int netif_get_num_default_rss_queues(void)
16917b87 2427{
40e4e713
HS
2428 return is_kdump_kernel() ?
2429 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
16917b87
YM
2430}
2431EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2432
3bcb846c 2433static void __netif_reschedule(struct Qdisc *q)
56079431 2434{
def82a1d
JP
2435 struct softnet_data *sd;
2436 unsigned long flags;
56079431 2437
def82a1d 2438 local_irq_save(flags);
903ceff7 2439 sd = this_cpu_ptr(&softnet_data);
a9cbd588
CG
2440 q->next_sched = NULL;
2441 *sd->output_queue_tailp = q;
2442 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
2443 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2444 local_irq_restore(flags);
2445}
2446
2447void __netif_schedule(struct Qdisc *q)
2448{
2449 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2450 __netif_reschedule(q);
56079431
DV
2451}
2452EXPORT_SYMBOL(__netif_schedule);
2453
e6247027
ED
2454struct dev_kfree_skb_cb {
2455 enum skb_free_reason reason;
2456};
2457
2458static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
56079431 2459{
e6247027
ED
2460 return (struct dev_kfree_skb_cb *)skb->cb;
2461}
2462
46e5da40
JF
2463void netif_schedule_queue(struct netdev_queue *txq)
2464{
2465 rcu_read_lock();
2466 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2467 struct Qdisc *q = rcu_dereference(txq->qdisc);
2468
2469 __netif_schedule(q);
2470 }
2471 rcu_read_unlock();
2472}
2473EXPORT_SYMBOL(netif_schedule_queue);
2474
46e5da40
JF
2475void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2476{
2477 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2478 struct Qdisc *q;
2479
2480 rcu_read_lock();
2481 q = rcu_dereference(dev_queue->qdisc);
2482 __netif_schedule(q);
2483 rcu_read_unlock();
2484 }
2485}
2486EXPORT_SYMBOL(netif_tx_wake_queue);
2487
e6247027 2488void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
56079431 2489{
e6247027 2490 unsigned long flags;
56079431 2491
9899886d
MJ
2492 if (unlikely(!skb))
2493 return;
2494
63354797 2495 if (likely(refcount_read(&skb->users) == 1)) {
e6247027 2496 smp_rmb();
63354797
RE
2497 refcount_set(&skb->users, 0);
2498 } else if (likely(!refcount_dec_and_test(&skb->users))) {
e6247027 2499 return;
bea3348e 2500 }
e6247027
ED
2501 get_kfree_skb_cb(skb)->reason = reason;
2502 local_irq_save(flags);
2503 skb->next = __this_cpu_read(softnet_data.completion_queue);
2504 __this_cpu_write(softnet_data.completion_queue, skb);
2505 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2506 local_irq_restore(flags);
56079431 2507}
e6247027 2508EXPORT_SYMBOL(__dev_kfree_skb_irq);
56079431 2509
e6247027 2510void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
56079431
DV
2511{
2512 if (in_irq() || irqs_disabled())
e6247027 2513 __dev_kfree_skb_irq(skb, reason);
56079431
DV
2514 else
2515 dev_kfree_skb(skb);
2516}
e6247027 2517EXPORT_SYMBOL(__dev_kfree_skb_any);
56079431
DV
2518
2519
bea3348e
SH
2520/**
2521 * netif_device_detach - mark device as removed
2522 * @dev: network device
2523 *
2524 * Mark device as removed from system and therefore no longer available.
2525 */
56079431
DV
2526void netif_device_detach(struct net_device *dev)
2527{
2528 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2529 netif_running(dev)) {
d543103a 2530 netif_tx_stop_all_queues(dev);
56079431
DV
2531 }
2532}
2533EXPORT_SYMBOL(netif_device_detach);
2534
bea3348e
SH
2535/**
2536 * netif_device_attach - mark device as attached
2537 * @dev: network device
2538 *
2539 * Mark device as attached from system and restart if needed.
2540 */
56079431
DV
2541void netif_device_attach(struct net_device *dev)
2542{
2543 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2544 netif_running(dev)) {
d543103a 2545 netif_tx_wake_all_queues(dev);
4ec93edb 2546 __netdev_watchdog_up(dev);
56079431
DV
2547 }
2548}
2549EXPORT_SYMBOL(netif_device_attach);
2550
5605c762
JP
2551/*
2552 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2553 * to be used as a distribution range.
2554 */
2555u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2556 unsigned int num_tx_queues)
2557{
2558 u32 hash;
2559 u16 qoffset = 0;
2560 u16 qcount = num_tx_queues;
2561
2562 if (skb_rx_queue_recorded(skb)) {
2563 hash = skb_get_rx_queue(skb);
2564 while (unlikely(hash >= num_tx_queues))
2565 hash -= num_tx_queues;
2566 return hash;
2567 }
2568
2569 if (dev->num_tc) {
2570 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
f4563a75 2571
5605c762
JP
2572 qoffset = dev->tc_to_txq[tc].offset;
2573 qcount = dev->tc_to_txq[tc].count;
2574 }
2575
2576 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2577}
2578EXPORT_SYMBOL(__skb_tx_hash);
2579
36c92474
BH
2580static void skb_warn_bad_offload(const struct sk_buff *skb)
2581{
84d15ae5 2582 static const netdev_features_t null_features;
36c92474 2583 struct net_device *dev = skb->dev;
88ad4175 2584 const char *name = "";
36c92474 2585
c846ad9b
BG
2586 if (!net_ratelimit())
2587 return;
2588
88ad4175
BM
2589 if (dev) {
2590 if (dev->dev.parent)
2591 name = dev_driver_string(dev->dev.parent);
2592 else
2593 name = netdev_name(dev);
2594 }
36c92474
BH
2595 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2596 "gso_type=%d ip_summed=%d\n",
88ad4175 2597 name, dev ? &dev->features : &null_features,
65e9d2fa 2598 skb->sk ? &skb->sk->sk_route_caps : &null_features,
36c92474
BH
2599 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2600 skb_shinfo(skb)->gso_type, skb->ip_summed);
2601}
2602
1da177e4
LT
2603/*
2604 * Invalidate hardware checksum when packet is to be mangled, and
2605 * complete checksum manually on outgoing path.
2606 */
84fa7933 2607int skb_checksum_help(struct sk_buff *skb)
1da177e4 2608{
d3bc23e7 2609 __wsum csum;
663ead3b 2610 int ret = 0, offset;
1da177e4 2611
84fa7933 2612 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
2613 goto out_set_summed;
2614
2615 if (unlikely(skb_shinfo(skb)->gso_size)) {
36c92474
BH
2616 skb_warn_bad_offload(skb);
2617 return -EINVAL;
1da177e4
LT
2618 }
2619
cef401de
ED
2620 /* Before computing a checksum, we should make sure no frag could
2621 * be modified by an external entity : checksum could be wrong.
2622 */
2623 if (skb_has_shared_frag(skb)) {
2624 ret = __skb_linearize(skb);
2625 if (ret)
2626 goto out;
2627 }
2628
55508d60 2629 offset = skb_checksum_start_offset(skb);
a030847e
HX
2630 BUG_ON(offset >= skb_headlen(skb));
2631 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2632
2633 offset += skb->csum_offset;
2634 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2635
2636 if (skb_cloned(skb) &&
2637 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
2638 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2639 if (ret)
2640 goto out;
2641 }
2642
4f2e4ad5 2643 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
a430a43d 2644out_set_summed:
1da177e4 2645 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 2646out:
1da177e4
LT
2647 return ret;
2648}
d1b19dff 2649EXPORT_SYMBOL(skb_checksum_help);
1da177e4 2650
b72b5bf6
DC
2651int skb_crc32c_csum_help(struct sk_buff *skb)
2652{
2653 __le32 crc32c_csum;
2654 int ret = 0, offset, start;
2655
2656 if (skb->ip_summed != CHECKSUM_PARTIAL)
2657 goto out;
2658
2659 if (unlikely(skb_is_gso(skb)))
2660 goto out;
2661
2662 /* Before computing a checksum, we should make sure no frag could
2663 * be modified by an external entity : checksum could be wrong.
2664 */
2665 if (unlikely(skb_has_shared_frag(skb))) {
2666 ret = __skb_linearize(skb);
2667 if (ret)
2668 goto out;
2669 }
2670 start = skb_checksum_start_offset(skb);
2671 offset = start + offsetof(struct sctphdr, checksum);
2672 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
2673 ret = -EINVAL;
2674 goto out;
2675 }
2676 if (skb_cloned(skb) &&
2677 !skb_clone_writable(skb, offset + sizeof(__le32))) {
2678 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2679 if (ret)
2680 goto out;
2681 }
2682 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
2683 skb->len - start, ~(__u32)0,
2684 crc32c_csum_stub));
2685 *(__le32 *)(skb->data + offset) = crc32c_csum;
2686 skb->ip_summed = CHECKSUM_NONE;
dba00306 2687 skb->csum_not_inet = 0;
b72b5bf6
DC
2688out:
2689 return ret;
2690}
2691
53d6471c 2692__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
f6a78bfc 2693{
252e3346 2694 __be16 type = skb->protocol;
f6a78bfc 2695
19acc327
PS
2696 /* Tunnel gso handlers can set protocol to ethernet. */
2697 if (type == htons(ETH_P_TEB)) {
2698 struct ethhdr *eth;
2699
2700 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2701 return 0;
2702
2703 eth = (struct ethhdr *)skb_mac_header(skb);
2704 type = eth->h_proto;
2705 }
2706
d4bcef3f 2707 return __vlan_get_protocol(skb, type, depth);
ec5f0615
PS
2708}
2709
2710/**
2711 * skb_mac_gso_segment - mac layer segmentation handler.
2712 * @skb: buffer to segment
2713 * @features: features for the output path (see dev->features)
2714 */
2715struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2716 netdev_features_t features)
2717{
2718 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2719 struct packet_offload *ptype;
53d6471c
VY
2720 int vlan_depth = skb->mac_len;
2721 __be16 type = skb_network_protocol(skb, &vlan_depth);
ec5f0615
PS
2722
2723 if (unlikely(!type))
2724 return ERR_PTR(-EINVAL);
2725
53d6471c 2726 __skb_pull(skb, vlan_depth);
f6a78bfc
HX
2727
2728 rcu_read_lock();
22061d80 2729 list_for_each_entry_rcu(ptype, &offload_base, list) {
f191a1d1 2730 if (ptype->type == type && ptype->callbacks.gso_segment) {
f191a1d1 2731 segs = ptype->callbacks.gso_segment(skb, features);
f6a78bfc
HX
2732 break;
2733 }
2734 }
2735 rcu_read_unlock();
2736
98e399f8 2737 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 2738
f6a78bfc
HX
2739 return segs;
2740}
05e8ef4a
PS
2741EXPORT_SYMBOL(skb_mac_gso_segment);
2742
2743
2744/* openvswitch calls this on rx path, so we need a different check.
2745 */
2746static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2747{
2748 if (tx_path)
93991221 2749 return skb->ip_summed != CHECKSUM_PARTIAL;
6e7bc478
ED
2750
2751 return skb->ip_summed == CHECKSUM_NONE;
05e8ef4a
PS
2752}
2753
2754/**
2755 * __skb_gso_segment - Perform segmentation on skb.
2756 * @skb: buffer to segment
2757 * @features: features for the output path (see dev->features)
2758 * @tx_path: whether it is called in TX path
2759 *
2760 * This function segments the given skb and returns a list of segments.
2761 *
2762 * It may return NULL if the skb requires no segmentation. This is
2763 * only possible when GSO is used for verifying header integrity.
9207f9d4
KK
2764 *
2765 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
05e8ef4a
PS
2766 */
2767struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2768 netdev_features_t features, bool tx_path)
2769{
b2504a5d
ED
2770 struct sk_buff *segs;
2771
05e8ef4a
PS
2772 if (unlikely(skb_needs_check(skb, tx_path))) {
2773 int err;
2774
b2504a5d 2775 /* We're going to init ->check field in TCP or UDP header */
a40e0a66 2776 err = skb_cow_head(skb, 0);
2777 if (err < 0)
05e8ef4a
PS
2778 return ERR_PTR(err);
2779 }
2780
802ab55a
AD
2781 /* Only report GSO partial support if it will enable us to
2782 * support segmentation on this frame without needing additional
2783 * work.
2784 */
2785 if (features & NETIF_F_GSO_PARTIAL) {
2786 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
2787 struct net_device *dev = skb->dev;
2788
2789 partial_features |= dev->features & dev->gso_partial_features;
2790 if (!skb_gso_ok(skb, features | partial_features))
2791 features &= ~NETIF_F_GSO_PARTIAL;
2792 }
2793
9207f9d4
KK
2794 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2795 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2796
68c33163 2797 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3347c960
ED
2798 SKB_GSO_CB(skb)->encap_level = 0;
2799
05e8ef4a
PS
2800 skb_reset_mac_header(skb);
2801 skb_reset_mac_len(skb);
2802
b2504a5d
ED
2803 segs = skb_mac_gso_segment(skb, features);
2804
2805 if (unlikely(skb_needs_check(skb, tx_path)))
2806 skb_warn_bad_offload(skb);
2807
2808 return segs;
05e8ef4a 2809}
12b0004d 2810EXPORT_SYMBOL(__skb_gso_segment);
f6a78bfc 2811
fb286bb2
HX
2812/* Take action when hardware reception checksum errors are detected. */
2813#ifdef CONFIG_BUG
2814void netdev_rx_csum_fault(struct net_device *dev)
2815{
2816 if (net_ratelimit()) {
7b6cd1ce 2817 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
fb286bb2
HX
2818 dump_stack();
2819 }
2820}
2821EXPORT_SYMBOL(netdev_rx_csum_fault);
2822#endif
2823
1da177e4
LT
2824/* Actually, we should eliminate this check as soon as we know, that:
2825 * 1. IOMMU is present and allows to map all the memory.
2826 * 2. No high memory really exists on this machine.
2827 */
2828
c1e756bf 2829static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 2830{
3d3a8533 2831#ifdef CONFIG_HIGHMEM
1da177e4 2832 int i;
f4563a75 2833
5acbbd42 2834 if (!(dev->features & NETIF_F_HIGHDMA)) {
ea2ab693
IC
2835 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2836 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
f4563a75 2837
ea2ab693 2838 if (PageHighMem(skb_frag_page(frag)))
5acbbd42 2839 return 1;
ea2ab693 2840 }
5acbbd42 2841 }
1da177e4 2842
5acbbd42
FT
2843 if (PCI_DMA_BUS_IS_PHYS) {
2844 struct device *pdev = dev->dev.parent;
1da177e4 2845
9092c658
ED
2846 if (!pdev)
2847 return 0;
5acbbd42 2848 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
ea2ab693
IC
2849 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2850 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
f4563a75 2851
5acbbd42
FT
2852 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2853 return 1;
2854 }
2855 }
3d3a8533 2856#endif
1da177e4
LT
2857 return 0;
2858}
1da177e4 2859
3b392ddb
SH
2860/* If MPLS offload request, verify we are testing hardware MPLS features
2861 * instead of standard features for the netdev.
2862 */
d0edc7bf 2863#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3b392ddb
SH
2864static netdev_features_t net_mpls_features(struct sk_buff *skb,
2865 netdev_features_t features,
2866 __be16 type)
2867{
25cd9ba0 2868 if (eth_p_mpls(type))
3b392ddb
SH
2869 features &= skb->dev->mpls_features;
2870
2871 return features;
2872}
2873#else
2874static netdev_features_t net_mpls_features(struct sk_buff *skb,
2875 netdev_features_t features,
2876 __be16 type)
2877{
2878 return features;
2879}
2880#endif
2881
c8f44aff 2882static netdev_features_t harmonize_features(struct sk_buff *skb,
c1e756bf 2883 netdev_features_t features)
f01a5236 2884{
53d6471c 2885 int tmp;
3b392ddb
SH
2886 __be16 type;
2887
2888 type = skb_network_protocol(skb, &tmp);
2889 features = net_mpls_features(skb, features, type);
53d6471c 2890
c0d680e5 2891 if (skb->ip_summed != CHECKSUM_NONE &&
3b392ddb 2892 !can_checksum_protocol(features, type)) {
996e8021 2893 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
f01a5236 2894 }
7be2c82c
ED
2895 if (illegal_highdma(skb->dev, skb))
2896 features &= ~NETIF_F_SG;
f01a5236
JG
2897
2898 return features;
2899}
2900
e38f3025
TM
2901netdev_features_t passthru_features_check(struct sk_buff *skb,
2902 struct net_device *dev,
2903 netdev_features_t features)
2904{
2905 return features;
2906}
2907EXPORT_SYMBOL(passthru_features_check);
2908
8cb65d00
TM
2909static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2910 struct net_device *dev,
2911 netdev_features_t features)
2912{
2913 return vlan_features_check(skb, features);
2914}
2915
cbc53e08
AD
2916static netdev_features_t gso_features_check(const struct sk_buff *skb,
2917 struct net_device *dev,
2918 netdev_features_t features)
2919{
2920 u16 gso_segs = skb_shinfo(skb)->gso_segs;
2921
2922 if (gso_segs > dev->gso_max_segs)
2923 return features & ~NETIF_F_GSO_MASK;
2924
802ab55a
AD
2925 /* Support for GSO partial features requires software
2926 * intervention before we can actually process the packets
2927 * so we need to strip support for any partial features now
2928 * and we can pull them back in after we have partially
2929 * segmented the frame.
2930 */
2931 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
2932 features &= ~dev->gso_partial_features;
2933
2934 /* Make sure to clear the IPv4 ID mangling feature if the
2935 * IPv4 header has the potential to be fragmented.
cbc53e08
AD
2936 */
2937 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
2938 struct iphdr *iph = skb->encapsulation ?
2939 inner_ip_hdr(skb) : ip_hdr(skb);
2940
2941 if (!(iph->frag_off & htons(IP_DF)))
2942 features &= ~NETIF_F_TSO_MANGLEID;
2943 }
2944
2945 return features;
2946}
2947
c1e756bf 2948netdev_features_t netif_skb_features(struct sk_buff *skb)
58e998c6 2949{
5f35227e 2950 struct net_device *dev = skb->dev;
fcbeb976 2951 netdev_features_t features = dev->features;
58e998c6 2952
cbc53e08
AD
2953 if (skb_is_gso(skb))
2954 features = gso_features_check(skb, dev, features);
30b678d8 2955
5f35227e
JG
2956 /* If encapsulation offload request, verify we are testing
2957 * hardware encapsulation features instead of standard
2958 * features for the netdev
2959 */
2960 if (skb->encapsulation)
2961 features &= dev->hw_enc_features;
2962
f5a7fb88
TM
2963 if (skb_vlan_tagged(skb))
2964 features = netdev_intersect_features(features,
2965 dev->vlan_features |
2966 NETIF_F_HW_VLAN_CTAG_TX |
2967 NETIF_F_HW_VLAN_STAG_TX);
f01a5236 2968
5f35227e
JG
2969 if (dev->netdev_ops->ndo_features_check)
2970 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2971 features);
8cb65d00
TM
2972 else
2973 features &= dflt_features_check(skb, dev, features);
5f35227e 2974
c1e756bf 2975 return harmonize_features(skb, features);
58e998c6 2976}
c1e756bf 2977EXPORT_SYMBOL(netif_skb_features);
58e998c6 2978
2ea25513 2979static int xmit_one(struct sk_buff *skb, struct net_device *dev,
95f6b3dd 2980 struct netdev_queue *txq, bool more)
f6a78bfc 2981{
2ea25513
DM
2982 unsigned int len;
2983 int rc;
00829823 2984
7866a621 2985 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2ea25513 2986 dev_queue_xmit_nit(skb, dev);
fc741216 2987
2ea25513
DM
2988 len = skb->len;
2989 trace_net_dev_start_xmit(skb, dev);
95f6b3dd 2990 rc = netdev_start_xmit(skb, dev, txq, more);
2ea25513 2991 trace_net_dev_xmit(skb, rc, dev, len);
adf30907 2992
2ea25513
DM
2993 return rc;
2994}
7b9c6090 2995
8dcda22a
DM
2996struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2997 struct netdev_queue *txq, int *ret)
7f2e870f
DM
2998{
2999 struct sk_buff *skb = first;
3000 int rc = NETDEV_TX_OK;
7b9c6090 3001
7f2e870f
DM
3002 while (skb) {
3003 struct sk_buff *next = skb->next;
fc70fb64 3004
7f2e870f 3005 skb->next = NULL;
95f6b3dd 3006 rc = xmit_one(skb, dev, txq, next != NULL);
7f2e870f
DM
3007 if (unlikely(!dev_xmit_complete(rc))) {
3008 skb->next = next;
3009 goto out;
3010 }
6afff0ca 3011
7f2e870f
DM
3012 skb = next;
3013 if (netif_xmit_stopped(txq) && skb) {
3014 rc = NETDEV_TX_BUSY;
3015 break;
9ccb8975 3016 }
7f2e870f 3017 }
9ccb8975 3018
7f2e870f
DM
3019out:
3020 *ret = rc;
3021 return skb;
3022}
b40863c6 3023
1ff0dc94
ED
3024static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3025 netdev_features_t features)
f6a78bfc 3026{
df8a39de 3027 if (skb_vlan_tag_present(skb) &&
5968250c
JP
3028 !vlan_hw_offload_capable(features, skb->vlan_proto))
3029 skb = __vlan_hwaccel_push_inside(skb);
eae3f88e
DM
3030 return skb;
3031}
f6a78bfc 3032
43c26a1a
DC
3033int skb_csum_hwoffload_help(struct sk_buff *skb,
3034 const netdev_features_t features)
3035{
3036 if (unlikely(skb->csum_not_inet))
3037 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3038 skb_crc32c_csum_help(skb);
3039
3040 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3041}
3042EXPORT_SYMBOL(skb_csum_hwoffload_help);
3043
55a93b3e 3044static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
eae3f88e
DM
3045{
3046 netdev_features_t features;
f6a78bfc 3047
eae3f88e
DM
3048 features = netif_skb_features(skb);
3049 skb = validate_xmit_vlan(skb, features);
3050 if (unlikely(!skb))
3051 goto out_null;
7b9c6090 3052
8b86a61d 3053 if (netif_needs_gso(skb, features)) {
ce93718f
DM
3054 struct sk_buff *segs;
3055
3056 segs = skb_gso_segment(skb, features);
cecda693 3057 if (IS_ERR(segs)) {
af6dabc9 3058 goto out_kfree_skb;
cecda693
JW
3059 } else if (segs) {
3060 consume_skb(skb);
3061 skb = segs;
f6a78bfc 3062 }
eae3f88e
DM
3063 } else {
3064 if (skb_needs_linearize(skb, features) &&
3065 __skb_linearize(skb))
3066 goto out_kfree_skb;
4ec93edb 3067
f6e27114
SK
3068 if (validate_xmit_xfrm(skb, features))
3069 goto out_kfree_skb;
3070
eae3f88e
DM
3071 /* If packet is not checksummed and device does not
3072 * support checksumming for this protocol, complete
3073 * checksumming here.
3074 */
3075 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3076 if (skb->encapsulation)
3077 skb_set_inner_transport_header(skb,
3078 skb_checksum_start_offset(skb));
3079 else
3080 skb_set_transport_header(skb,
3081 skb_checksum_start_offset(skb));
43c26a1a 3082 if (skb_csum_hwoffload_help(skb, features))
eae3f88e 3083 goto out_kfree_skb;
7b9c6090 3084 }
0c772159 3085 }
7b9c6090 3086
eae3f88e 3087 return skb;
fc70fb64 3088
f6a78bfc
HX
3089out_kfree_skb:
3090 kfree_skb(skb);
eae3f88e 3091out_null:
d21fd63e 3092 atomic_long_inc(&dev->tx_dropped);
eae3f88e
DM
3093 return NULL;
3094}
6afff0ca 3095
55a93b3e
ED
3096struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
3097{
3098 struct sk_buff *next, *head = NULL, *tail;
3099
bec3cfdc 3100 for (; skb != NULL; skb = next) {
55a93b3e
ED
3101 next = skb->next;
3102 skb->next = NULL;
bec3cfdc
ED
3103
3104 /* in case skb wont be segmented, point to itself */
3105 skb->prev = skb;
3106
55a93b3e 3107 skb = validate_xmit_skb(skb, dev);
bec3cfdc
ED
3108 if (!skb)
3109 continue;
55a93b3e 3110
bec3cfdc
ED
3111 if (!head)
3112 head = skb;
3113 else
3114 tail->next = skb;
3115 /* If skb was segmented, skb->prev points to
3116 * the last segment. If not, it still contains skb.
3117 */
3118 tail = skb->prev;
55a93b3e
ED
3119 }
3120 return head;
f6a78bfc 3121}
104ba78c 3122EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
f6a78bfc 3123
1def9238
ED
3124static void qdisc_pkt_len_init(struct sk_buff *skb)
3125{
3126 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3127
3128 qdisc_skb_cb(skb)->pkt_len = skb->len;
3129
3130 /* To get more precise estimation of bytes sent on wire,
3131 * we add to pkt_len the headers size of all segments
3132 */
3133 if (shinfo->gso_size) {
757b8b1d 3134 unsigned int hdr_len;
15e5a030 3135 u16 gso_segs = shinfo->gso_segs;
1def9238 3136
757b8b1d
ED
3137 /* mac layer + network layer */
3138 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3139
3140 /* + transport layer */
1def9238
ED
3141 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3142 hdr_len += tcp_hdrlen(skb);
3143 else
3144 hdr_len += sizeof(struct udphdr);
15e5a030
JW
3145
3146 if (shinfo->gso_type & SKB_GSO_DODGY)
3147 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3148 shinfo->gso_size);
3149
3150 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
1def9238
ED
3151 }
3152}
3153
bbd8a0d3
KK
3154static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3155 struct net_device *dev,
3156 struct netdev_queue *txq)
3157{
3158 spinlock_t *root_lock = qdisc_lock(q);
520ac30f 3159 struct sk_buff *to_free = NULL;
a2da570d 3160 bool contended;
bbd8a0d3
KK
3161 int rc;
3162
a2da570d 3163 qdisc_calculate_pkt_len(skb, q);
79640a4c
ED
3164 /*
3165 * Heuristic to force contended enqueues to serialize on a
3166 * separate lock before trying to get qdisc main lock.
f9eb8aea 3167 * This permits qdisc->running owner to get the lock more
9bf2b8c2 3168 * often and dequeue packets faster.
79640a4c 3169 */
a2da570d 3170 contended = qdisc_is_running(q);
79640a4c
ED
3171 if (unlikely(contended))
3172 spin_lock(&q->busylock);
3173
bbd8a0d3
KK
3174 spin_lock(root_lock);
3175 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
520ac30f 3176 __qdisc_drop(skb, &to_free);
bbd8a0d3
KK
3177 rc = NET_XMIT_DROP;
3178 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 3179 qdisc_run_begin(q)) {
bbd8a0d3
KK
3180 /*
3181 * This is a work-conserving queue; there are no old skbs
3182 * waiting to be sent out; and the qdisc is not running -
3183 * xmit the skb directly.
3184 */
bfe0d029 3185
bfe0d029
ED
3186 qdisc_bstats_update(q, skb);
3187
55a93b3e 3188 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
79640a4c
ED
3189 if (unlikely(contended)) {
3190 spin_unlock(&q->busylock);
3191 contended = false;
3192 }
bbd8a0d3 3193 __qdisc_run(q);
79640a4c 3194 } else
bc135b23 3195 qdisc_run_end(q);
bbd8a0d3
KK
3196
3197 rc = NET_XMIT_SUCCESS;
3198 } else {
520ac30f 3199 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
79640a4c
ED
3200 if (qdisc_run_begin(q)) {
3201 if (unlikely(contended)) {
3202 spin_unlock(&q->busylock);
3203 contended = false;
3204 }
3205 __qdisc_run(q);
3206 }
bbd8a0d3
KK
3207 }
3208 spin_unlock(root_lock);
520ac30f
ED
3209 if (unlikely(to_free))
3210 kfree_skb_list(to_free);
79640a4c
ED
3211 if (unlikely(contended))
3212 spin_unlock(&q->busylock);
bbd8a0d3
KK
3213 return rc;
3214}
3215
86f8515f 3216#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
5bc1421e
NH
3217static void skb_update_prio(struct sk_buff *skb)
3218{
6977a79d 3219 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
5bc1421e 3220
91c68ce2 3221 if (!skb->priority && skb->sk && map) {
2a56a1fe
TH
3222 unsigned int prioidx =
3223 sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
91c68ce2
ED
3224
3225 if (prioidx < map->priomap_len)
3226 skb->priority = map->priomap[prioidx];
3227 }
5bc1421e
NH
3228}
3229#else
3230#define skb_update_prio(skb)
3231#endif
3232
f60e5990 3233DEFINE_PER_CPU(int, xmit_recursion);
3234EXPORT_SYMBOL(xmit_recursion);
3235
95603e22
MM
3236/**
3237 * dev_loopback_xmit - loop back @skb
0c4b51f0
EB
3238 * @net: network namespace this loopback is happening in
3239 * @sk: sk needed to be a netfilter okfn
95603e22
MM
3240 * @skb: buffer to transmit
3241 */
0c4b51f0 3242int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
95603e22
MM
3243{
3244 skb_reset_mac_header(skb);
3245 __skb_pull(skb, skb_network_offset(skb));
3246 skb->pkt_type = PACKET_LOOPBACK;
3247 skb->ip_summed = CHECKSUM_UNNECESSARY;
3248 WARN_ON(!skb_dst(skb));
3249 skb_dst_force(skb);
3250 netif_rx_ni(skb);
3251 return 0;
3252}
3253EXPORT_SYMBOL(dev_loopback_xmit);
3254
1f211a1b
DB
3255#ifdef CONFIG_NET_EGRESS
3256static struct sk_buff *
3257sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3258{
46209401 3259 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
1f211a1b
DB
3260 struct tcf_result cl_res;
3261
46209401 3262 if (!miniq)
1f211a1b
DB
3263 return skb;
3264
8dc07fdb 3265 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
46209401 3266 mini_qdisc_bstats_cpu_update(miniq, skb);
1f211a1b 3267
46209401 3268 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
1f211a1b
DB
3269 case TC_ACT_OK:
3270 case TC_ACT_RECLASSIFY:
3271 skb->tc_index = TC_H_MIN(cl_res.classid);
3272 break;
3273 case TC_ACT_SHOT:
46209401 3274 mini_qdisc_qstats_cpu_drop(miniq);
1f211a1b 3275 *ret = NET_XMIT_DROP;
7e2c3aea
DB
3276 kfree_skb(skb);
3277 return NULL;
1f211a1b
DB
3278 case TC_ACT_STOLEN:
3279 case TC_ACT_QUEUED:
e25ea21f 3280 case TC_ACT_TRAP:
1f211a1b 3281 *ret = NET_XMIT_SUCCESS;
7e2c3aea 3282 consume_skb(skb);
1f211a1b
DB
3283 return NULL;
3284 case TC_ACT_REDIRECT:
3285 /* No need to push/pop skb's mac_header here on egress! */
3286 skb_do_redirect(skb);
3287 *ret = NET_XMIT_SUCCESS;
3288 return NULL;
3289 default:
3290 break;
3291 }
3292
3293 return skb;
3294}
3295#endif /* CONFIG_NET_EGRESS */
3296
638b2a69
JP
3297static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3298{
3299#ifdef CONFIG_XPS
3300 struct xps_dev_maps *dev_maps;
3301 struct xps_map *map;
3302 int queue_index = -1;
3303
3304 rcu_read_lock();
3305 dev_maps = rcu_dereference(dev->xps_maps);
3306 if (dev_maps) {
184c449f
AD
3307 unsigned int tci = skb->sender_cpu - 1;
3308
3309 if (dev->num_tc) {
3310 tci *= dev->num_tc;
3311 tci += netdev_get_prio_tc_map(dev, skb->priority);
3312 }
3313
3314 map = rcu_dereference(dev_maps->cpu_map[tci]);
638b2a69
JP
3315 if (map) {
3316 if (map->len == 1)
3317 queue_index = map->queues[0];
3318 else
3319 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3320 map->len)];
3321 if (unlikely(queue_index >= dev->real_num_tx_queues))
3322 queue_index = -1;
3323 }
3324 }
3325 rcu_read_unlock();
3326
3327 return queue_index;
3328#else
3329 return -1;
3330#endif
3331}
3332
3333static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3334{
3335 struct sock *sk = skb->sk;
3336 int queue_index = sk_tx_queue_get(sk);
3337
3338 if (queue_index < 0 || skb->ooo_okay ||
3339 queue_index >= dev->real_num_tx_queues) {
3340 int new_index = get_xps_queue(dev, skb);
f4563a75 3341
638b2a69
JP
3342 if (new_index < 0)
3343 new_index = skb_tx_hash(dev, skb);
3344
3345 if (queue_index != new_index && sk &&
004a5d01 3346 sk_fullsock(sk) &&
638b2a69
JP
3347 rcu_access_pointer(sk->sk_dst_cache))
3348 sk_tx_queue_set(sk, new_index);
3349
3350 queue_index = new_index;
3351 }
3352
3353 return queue_index;
3354}
3355
3356struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3357 struct sk_buff *skb,
3358 void *accel_priv)
3359{
3360 int queue_index = 0;
3361
3362#ifdef CONFIG_XPS
52bd2d62
ED
3363 u32 sender_cpu = skb->sender_cpu - 1;
3364
3365 if (sender_cpu >= (u32)NR_CPUS)
638b2a69
JP
3366 skb->sender_cpu = raw_smp_processor_id() + 1;
3367#endif
3368
3369 if (dev->real_num_tx_queues != 1) {
3370 const struct net_device_ops *ops = dev->netdev_ops;
f4563a75 3371
638b2a69
JP
3372 if (ops->ndo_select_queue)
3373 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3374 __netdev_pick_tx);
3375 else
3376 queue_index = __netdev_pick_tx(dev, skb);
3377
3378 if (!accel_priv)
3379 queue_index = netdev_cap_txqueue(dev, queue_index);
3380 }
3381
3382 skb_set_queue_mapping(skb, queue_index);
3383 return netdev_get_tx_queue(dev, queue_index);
3384}
3385
d29f749e 3386/**
9d08dd3d 3387 * __dev_queue_xmit - transmit a buffer
d29f749e 3388 * @skb: buffer to transmit
9d08dd3d 3389 * @accel_priv: private data used for L2 forwarding offload
d29f749e
DJ
3390 *
3391 * Queue a buffer for transmission to a network device. The caller must
3392 * have set the device and priority and built the buffer before calling
3393 * this function. The function can be called from an interrupt.
3394 *
3395 * A negative errno code is returned on a failure. A success does not
3396 * guarantee the frame will be transmitted as it may be dropped due
3397 * to congestion or traffic shaping.
3398 *
3399 * -----------------------------------------------------------------------------------
3400 * I notice this method can also return errors from the queue disciplines,
3401 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3402 * be positive.
3403 *
3404 * Regardless of the return value, the skb is consumed, so it is currently
3405 * difficult to retry a send to this method. (You can bump the ref count
3406 * before sending to hold a reference for retry if you are careful.)
3407 *
3408 * When calling this method, interrupts MUST be enabled. This is because
3409 * the BH enable code must have IRQs enabled so that it will not deadlock.
3410 * --BLG
3411 */
0a59f3a9 3412static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
1da177e4
LT
3413{
3414 struct net_device *dev = skb->dev;
dc2b4847 3415 struct netdev_queue *txq;
1da177e4
LT
3416 struct Qdisc *q;
3417 int rc = -ENOMEM;
3418
6d1ccff6
ED
3419 skb_reset_mac_header(skb);
3420
e7fd2885
WB
3421 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3422 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3423
4ec93edb
YH
3424 /* Disable soft irqs for various locks below. Also
3425 * stops preemption for RCU.
1da177e4 3426 */
4ec93edb 3427 rcu_read_lock_bh();
1da177e4 3428
5bc1421e
NH
3429 skb_update_prio(skb);
3430
1f211a1b
DB
3431 qdisc_pkt_len_init(skb);
3432#ifdef CONFIG_NET_CLS_ACT
8dc07fdb 3433 skb->tc_at_ingress = 0;
1f211a1b
DB
3434# ifdef CONFIG_NET_EGRESS
3435 if (static_key_false(&egress_needed)) {
3436 skb = sch_handle_egress(skb, &rc, dev);
3437 if (!skb)
3438 goto out;
3439 }
3440# endif
3441#endif
02875878
ED
3442 /* If device/qdisc don't need skb->dst, release it right now while
3443 * its hot in this cpu cache.
3444 */
3445 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3446 skb_dst_drop(skb);
3447 else
3448 skb_dst_force(skb);
3449
f663dd9a 3450 txq = netdev_pick_tx(dev, skb, accel_priv);
a898def2 3451 q = rcu_dereference_bh(txq->qdisc);
37437bb2 3452
cf66ba58 3453 trace_net_dev_queue(skb);
1da177e4 3454 if (q->enqueue) {
bbd8a0d3 3455 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 3456 goto out;
1da177e4
LT
3457 }
3458
3459 /* The device has no queue. Common case for software devices:
eb13da1a 3460 * loopback, all the sorts of tunnels...
1da177e4 3461
eb13da1a 3462 * Really, it is unlikely that netif_tx_lock protection is necessary
3463 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3464 * counters.)
3465 * However, it is possible, that they rely on protection
3466 * made by us here.
1da177e4 3467
eb13da1a 3468 * Check this and shot the lock. It is not prone from deadlocks.
3469 *Either shot noqueue qdisc, it is even simpler 8)
1da177e4
LT
3470 */
3471 if (dev->flags & IFF_UP) {
3472 int cpu = smp_processor_id(); /* ok because BHs are off */
3473
c773e847 3474 if (txq->xmit_lock_owner != cpu) {
a70b506e
DB
3475 if (unlikely(__this_cpu_read(xmit_recursion) >
3476 XMIT_RECURSION_LIMIT))
745e20f1
ED
3477 goto recursion_alert;
3478
1f59533f
JDB
3479 skb = validate_xmit_skb(skb, dev);
3480 if (!skb)
d21fd63e 3481 goto out;
1f59533f 3482
c773e847 3483 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 3484
73466498 3485 if (!netif_xmit_stopped(txq)) {
745e20f1 3486 __this_cpu_inc(xmit_recursion);
ce93718f 3487 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
745e20f1 3488 __this_cpu_dec(xmit_recursion);
572a9d7b 3489 if (dev_xmit_complete(rc)) {
c773e847 3490 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
3491 goto out;
3492 }
3493 }
c773e847 3494 HARD_TX_UNLOCK(dev, txq);
e87cc472
JP
3495 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3496 dev->name);
1da177e4
LT
3497 } else {
3498 /* Recursion is detected! It is possible,
745e20f1
ED
3499 * unfortunately
3500 */
3501recursion_alert:
e87cc472
JP
3502 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3503 dev->name);
1da177e4
LT
3504 }
3505 }
3506
3507 rc = -ENETDOWN;
d4828d85 3508 rcu_read_unlock_bh();
1da177e4 3509
015f0688 3510 atomic_long_inc(&dev->tx_dropped);
1f59533f 3511 kfree_skb_list(skb);
1da177e4
LT
3512 return rc;
3513out:
d4828d85 3514 rcu_read_unlock_bh();
1da177e4
LT
3515 return rc;
3516}
f663dd9a 3517
2b4aa3ce 3518int dev_queue_xmit(struct sk_buff *skb)
f663dd9a
JW
3519{
3520 return __dev_queue_xmit(skb, NULL);
3521}
2b4aa3ce 3522EXPORT_SYMBOL(dev_queue_xmit);
1da177e4 3523
f663dd9a
JW
3524int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3525{
3526 return __dev_queue_xmit(skb, accel_priv);
3527}
3528EXPORT_SYMBOL(dev_queue_xmit_accel);
3529
1da177e4 3530
eb13da1a 3531/*************************************************************************
3532 * Receiver routines
3533 *************************************************************************/
1da177e4 3534
6b2bedc3 3535int netdev_max_backlog __read_mostly = 1000;
c9e6bc64
ED
3536EXPORT_SYMBOL(netdev_max_backlog);
3537
3b098e2d 3538int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3 3539int netdev_budget __read_mostly = 300;
7acf8a1e 3540unsigned int __read_mostly netdev_budget_usecs = 2000;
3d48b53f
MT
3541int weight_p __read_mostly = 64; /* old backlog weight */
3542int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
3543int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
3544int dev_rx_weight __read_mostly = 64;
3545int dev_tx_weight __read_mostly = 64;
1da177e4 3546
eecfd7c4
ED
3547/* Called with irq disabled */
3548static inline void ____napi_schedule(struct softnet_data *sd,
3549 struct napi_struct *napi)
3550{
3551 list_add_tail(&napi->poll_list, &sd->poll_list);
3552 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3553}
3554
bfb564e7
KK
3555#ifdef CONFIG_RPS
3556
3557/* One global table that all flow-based protocols share. */
6e3f7faf 3558struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7 3559EXPORT_SYMBOL(rps_sock_flow_table);
567e4b79
ED
3560u32 rps_cpu_mask __read_mostly;
3561EXPORT_SYMBOL(rps_cpu_mask);
bfb564e7 3562
c5905afb 3563struct static_key rps_needed __read_mostly;
3df97ba8 3564EXPORT_SYMBOL(rps_needed);
13bfff25
ED
3565struct static_key rfs_needed __read_mostly;
3566EXPORT_SYMBOL(rfs_needed);
adc9300e 3567
c445477d
BH
3568static struct rps_dev_flow *
3569set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3570 struct rps_dev_flow *rflow, u16 next_cpu)
3571{
a31196b0 3572 if (next_cpu < nr_cpu_ids) {
c445477d
BH
3573#ifdef CONFIG_RFS_ACCEL
3574 struct netdev_rx_queue *rxqueue;
3575 struct rps_dev_flow_table *flow_table;
3576 struct rps_dev_flow *old_rflow;
3577 u32 flow_id;
3578 u16 rxq_index;
3579 int rc;
3580
3581 /* Should we steer this flow to a different hardware queue? */
69a19ee6
BH
3582 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3583 !(dev->features & NETIF_F_NTUPLE))
c445477d
BH
3584 goto out;
3585 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3586 if (rxq_index == skb_get_rx_queue(skb))
3587 goto out;
3588
3589 rxqueue = dev->_rx + rxq_index;
3590 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3591 if (!flow_table)
3592 goto out;
61b905da 3593 flow_id = skb_get_hash(skb) & flow_table->mask;
c445477d
BH
3594 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3595 rxq_index, flow_id);
3596 if (rc < 0)
3597 goto out;
3598 old_rflow = rflow;
3599 rflow = &flow_table->flows[flow_id];
c445477d
BH
3600 rflow->filter = rc;
3601 if (old_rflow->filter == rflow->filter)
3602 old_rflow->filter = RPS_NO_FILTER;
3603 out:
3604#endif
3605 rflow->last_qtail =
09994d1b 3606 per_cpu(softnet_data, next_cpu).input_queue_head;
c445477d
BH
3607 }
3608
09994d1b 3609 rflow->cpu = next_cpu;
c445477d
BH
3610 return rflow;
3611}
3612
bfb564e7
KK
3613/*
3614 * get_rps_cpu is called from netif_receive_skb and returns the target
3615 * CPU from the RPS map of the receiving queue for a given skb.
3616 * rcu_read_lock must be held on entry.
3617 */
3618static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3619 struct rps_dev_flow **rflowp)
3620{
567e4b79
ED
3621 const struct rps_sock_flow_table *sock_flow_table;
3622 struct netdev_rx_queue *rxqueue = dev->_rx;
bfb564e7 3623 struct rps_dev_flow_table *flow_table;
567e4b79 3624 struct rps_map *map;
bfb564e7 3625 int cpu = -1;
567e4b79 3626 u32 tcpu;
61b905da 3627 u32 hash;
bfb564e7
KK
3628
3629 if (skb_rx_queue_recorded(skb)) {
3630 u16 index = skb_get_rx_queue(skb);
567e4b79 3631
62fe0b40
BH
3632 if (unlikely(index >= dev->real_num_rx_queues)) {
3633 WARN_ONCE(dev->real_num_rx_queues > 1,
3634 "%s received packet on queue %u, but number "
3635 "of RX queues is %u\n",
3636 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
3637 goto done;
3638 }
567e4b79
ED
3639 rxqueue += index;
3640 }
bfb564e7 3641
567e4b79
ED
3642 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3643
3644 flow_table = rcu_dereference(rxqueue->rps_flow_table);
6e3f7faf 3645 map = rcu_dereference(rxqueue->rps_map);
567e4b79 3646 if (!flow_table && !map)
bfb564e7
KK
3647 goto done;
3648
2d47b459 3649 skb_reset_network_header(skb);
61b905da
TH
3650 hash = skb_get_hash(skb);
3651 if (!hash)
bfb564e7
KK
3652 goto done;
3653
fec5e652
TH
3654 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3655 if (flow_table && sock_flow_table) {
fec5e652 3656 struct rps_dev_flow *rflow;
567e4b79
ED
3657 u32 next_cpu;
3658 u32 ident;
3659
3660 /* First check into global flow table if there is a match */
3661 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3662 if ((ident ^ hash) & ~rps_cpu_mask)
3663 goto try_rps;
fec5e652 3664
567e4b79
ED
3665 next_cpu = ident & rps_cpu_mask;
3666
3667 /* OK, now we know there is a match,
3668 * we can look at the local (per receive queue) flow table
3669 */
61b905da 3670 rflow = &flow_table->flows[hash & flow_table->mask];
fec5e652
TH
3671 tcpu = rflow->cpu;
3672
fec5e652
TH
3673 /*
3674 * If the desired CPU (where last recvmsg was done) is
3675 * different from current CPU (one in the rx-queue flow
3676 * table entry), switch if one of the following holds:
a31196b0 3677 * - Current CPU is unset (>= nr_cpu_ids).
fec5e652
TH
3678 * - Current CPU is offline.
3679 * - The current CPU's queue tail has advanced beyond the
3680 * last packet that was enqueued using this table entry.
3681 * This guarantees that all previous packets for the flow
3682 * have been dequeued, thus preserving in order delivery.
3683 */
3684 if (unlikely(tcpu != next_cpu) &&
a31196b0 3685 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
fec5e652 3686 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
baefa31d
TH
3687 rflow->last_qtail)) >= 0)) {
3688 tcpu = next_cpu;
c445477d 3689 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
baefa31d 3690 }
c445477d 3691
a31196b0 3692 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
fec5e652
TH
3693 *rflowp = rflow;
3694 cpu = tcpu;
3695 goto done;
3696 }
3697 }
3698
567e4b79
ED
3699try_rps:
3700
0a9627f2 3701 if (map) {
8fc54f68 3702 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
0a9627f2
TH
3703 if (cpu_online(tcpu)) {
3704 cpu = tcpu;
3705 goto done;
3706 }
3707 }
3708
3709done:
0a9627f2
TH
3710 return cpu;
3711}
3712
c445477d
BH
3713#ifdef CONFIG_RFS_ACCEL
3714
3715/**
3716 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3717 * @dev: Device on which the filter was set
3718 * @rxq_index: RX queue index
3719 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3720 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3721 *
3722 * Drivers that implement ndo_rx_flow_steer() should periodically call
3723 * this function for each installed filter and remove the filters for
3724 * which it returns %true.
3725 */
3726bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3727 u32 flow_id, u16 filter_id)
3728{
3729 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3730 struct rps_dev_flow_table *flow_table;
3731 struct rps_dev_flow *rflow;
3732 bool expire = true;
a31196b0 3733 unsigned int cpu;
c445477d
BH
3734
3735 rcu_read_lock();
3736 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3737 if (flow_table && flow_id <= flow_table->mask) {
3738 rflow = &flow_table->flows[flow_id];
6aa7de05 3739 cpu = READ_ONCE(rflow->cpu);
a31196b0 3740 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
c445477d
BH
3741 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3742 rflow->last_qtail) <
3743 (int)(10 * flow_table->mask)))
3744 expire = false;
3745 }
3746 rcu_read_unlock();
3747 return expire;
3748}
3749EXPORT_SYMBOL(rps_may_expire_flow);
3750
3751#endif /* CONFIG_RFS_ACCEL */
3752
0a9627f2 3753/* Called from hardirq (IPI) context */
e36fa2f7 3754static void rps_trigger_softirq(void *data)
0a9627f2 3755{
e36fa2f7
ED
3756 struct softnet_data *sd = data;
3757
eecfd7c4 3758 ____napi_schedule(sd, &sd->backlog);
dee42870 3759 sd->received_rps++;
0a9627f2 3760}
e36fa2f7 3761
fec5e652 3762#endif /* CONFIG_RPS */
0a9627f2 3763
e36fa2f7
ED
3764/*
3765 * Check if this softnet_data structure is another cpu one
3766 * If yes, queue it to our IPI list and return 1
3767 * If no, return 0
3768 */
3769static int rps_ipi_queued(struct softnet_data *sd)
3770{
3771#ifdef CONFIG_RPS
903ceff7 3772 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
e36fa2f7
ED
3773
3774 if (sd != mysd) {
3775 sd->rps_ipi_next = mysd->rps_ipi_list;
3776 mysd->rps_ipi_list = sd;
3777
3778 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3779 return 1;
3780 }
3781#endif /* CONFIG_RPS */
3782 return 0;
3783}
3784
99bbc707
WB
3785#ifdef CONFIG_NET_FLOW_LIMIT
3786int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3787#endif
3788
3789static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3790{
3791#ifdef CONFIG_NET_FLOW_LIMIT
3792 struct sd_flow_limit *fl;
3793 struct softnet_data *sd;
3794 unsigned int old_flow, new_flow;
3795
3796 if (qlen < (netdev_max_backlog >> 1))
3797 return false;
3798
903ceff7 3799 sd = this_cpu_ptr(&softnet_data);
99bbc707
WB
3800
3801 rcu_read_lock();
3802 fl = rcu_dereference(sd->flow_limit);
3803 if (fl) {
3958afa1 3804 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
99bbc707
WB
3805 old_flow = fl->history[fl->history_head];
3806 fl->history[fl->history_head] = new_flow;
3807
3808 fl->history_head++;
3809 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3810
3811 if (likely(fl->buckets[old_flow]))
3812 fl->buckets[old_flow]--;
3813
3814 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3815 fl->count++;
3816 rcu_read_unlock();
3817 return true;
3818 }
3819 }
3820 rcu_read_unlock();
3821#endif
3822 return false;
3823}
3824
0a9627f2
TH
3825/*
3826 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3827 * queue (may be a remote CPU queue).
3828 */
fec5e652
TH
3829static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3830 unsigned int *qtail)
0a9627f2 3831{
e36fa2f7 3832 struct softnet_data *sd;
0a9627f2 3833 unsigned long flags;
99bbc707 3834 unsigned int qlen;
0a9627f2 3835
e36fa2f7 3836 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
3837
3838 local_irq_save(flags);
0a9627f2 3839
e36fa2f7 3840 rps_lock(sd);
e9e4dd32
JA
3841 if (!netif_running(skb->dev))
3842 goto drop;
99bbc707
WB
3843 qlen = skb_queue_len(&sd->input_pkt_queue);
3844 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
e008f3f0 3845 if (qlen) {
0a9627f2 3846enqueue:
e36fa2f7 3847 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 3848 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 3849 rps_unlock(sd);
152102c7 3850 local_irq_restore(flags);
0a9627f2
TH
3851 return NET_RX_SUCCESS;
3852 }
3853
ebda37c2
ED
3854 /* Schedule NAPI for backlog device
3855 * We can use non atomic operation since we own the queue lock
3856 */
3857 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 3858 if (!rps_ipi_queued(sd))
eecfd7c4 3859 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
3860 }
3861 goto enqueue;
3862 }
3863
e9e4dd32 3864drop:
dee42870 3865 sd->dropped++;
e36fa2f7 3866 rps_unlock(sd);
0a9627f2 3867
0a9627f2
TH
3868 local_irq_restore(flags);
3869
caf586e5 3870 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
3871 kfree_skb(skb);
3872 return NET_RX_DROP;
3873}
1da177e4 3874
d4455169
JF
3875static u32 netif_receive_generic_xdp(struct sk_buff *skb,
3876 struct bpf_prog *xdp_prog)
3877{
de8f3a83 3878 u32 metalen, act = XDP_DROP;
d4455169 3879 struct xdp_buff xdp;
d4455169
JF
3880 void *orig_data;
3881 int hlen, off;
3882 u32 mac_len;
3883
3884 /* Reinjected packets coming from act_mirred or similar should
3885 * not get XDP generic processing.
3886 */
3887 if (skb_cloned(skb))
3888 return XDP_PASS;
3889
de8f3a83
DB
3890 /* XDP packets must be linear and must have sufficient headroom
3891 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
3892 * native XDP provides, thus we need to do it here as well.
3893 */
3894 if (skb_is_nonlinear(skb) ||
3895 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
3896 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
3897 int troom = skb->tail + skb->data_len - skb->end;
3898
3899 /* In case we have to go down the path and also linearize,
3900 * then lets do the pskb_expand_head() work just once here.
3901 */
3902 if (pskb_expand_head(skb,
3903 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
3904 troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
3905 goto do_drop;
3906 if (troom > 0 && __skb_linearize(skb))
3907 goto do_drop;
3908 }
d4455169
JF
3909
3910 /* The XDP program wants to see the packet starting at the MAC
3911 * header.
3912 */
3913 mac_len = skb->data - skb_mac_header(skb);
3914 hlen = skb_headlen(skb) + mac_len;
3915 xdp.data = skb->data - mac_len;
de8f3a83 3916 xdp.data_meta = xdp.data;
d4455169
JF
3917 xdp.data_end = xdp.data + hlen;
3918 xdp.data_hard_start = skb->data - skb_headroom(skb);
3919 orig_data = xdp.data;
3920
3921 act = bpf_prog_run_xdp(xdp_prog, &xdp);
3922
3923 off = xdp.data - orig_data;
3924 if (off > 0)
3925 __skb_pull(skb, off);
3926 else if (off < 0)
3927 __skb_push(skb, -off);
92dd5452 3928 skb->mac_header += off;
d4455169
JF
3929
3930 switch (act) {
6103aa96 3931 case XDP_REDIRECT:
d4455169
JF
3932 case XDP_TX:
3933 __skb_push(skb, mac_len);
de8f3a83 3934 break;
d4455169 3935 case XDP_PASS:
de8f3a83
DB
3936 metalen = xdp.data - xdp.data_meta;
3937 if (metalen)
3938 skb_metadata_set(skb, metalen);
d4455169 3939 break;
d4455169
JF
3940 default:
3941 bpf_warn_invalid_xdp_action(act);
3942 /* fall through */
3943 case XDP_ABORTED:
3944 trace_xdp_exception(skb->dev, xdp_prog, act);
3945 /* fall through */
3946 case XDP_DROP:
3947 do_drop:
3948 kfree_skb(skb);
3949 break;
3950 }
3951
3952 return act;
3953}
3954
3955/* When doing generic XDP we have to bypass the qdisc layer and the
3956 * network taps in order to match in-driver-XDP behavior.
3957 */
7c497478 3958void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
d4455169
JF
3959{
3960 struct net_device *dev = skb->dev;
3961 struct netdev_queue *txq;
3962 bool free_skb = true;
3963 int cpu, rc;
3964
3965 txq = netdev_pick_tx(dev, skb, NULL);
3966 cpu = smp_processor_id();
3967 HARD_TX_LOCK(dev, txq, cpu);
3968 if (!netif_xmit_stopped(txq)) {
3969 rc = netdev_start_xmit(skb, dev, txq, 0);
3970 if (dev_xmit_complete(rc))
3971 free_skb = false;
3972 }
3973 HARD_TX_UNLOCK(dev, txq);
3974 if (free_skb) {
3975 trace_xdp_exception(dev, xdp_prog, XDP_TX);
3976 kfree_skb(skb);
3977 }
3978}
7c497478 3979EXPORT_SYMBOL_GPL(generic_xdp_tx);
d4455169
JF
3980
3981static struct static_key generic_xdp_needed __read_mostly;
3982
7c497478 3983int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
d4455169 3984{
d4455169
JF
3985 if (xdp_prog) {
3986 u32 act = netif_receive_generic_xdp(skb, xdp_prog);
6103aa96 3987 int err;
d4455169
JF
3988
3989 if (act != XDP_PASS) {
6103aa96
JF
3990 switch (act) {
3991 case XDP_REDIRECT:
2facaad6
JDB
3992 err = xdp_do_generic_redirect(skb->dev, skb,
3993 xdp_prog);
6103aa96
JF
3994 if (err)
3995 goto out_redir;
3996 /* fallthru to submit skb */
3997 case XDP_TX:
d4455169 3998 generic_xdp_tx(skb, xdp_prog);
6103aa96
JF
3999 break;
4000 }
d4455169
JF
4001 return XDP_DROP;
4002 }
4003 }
4004 return XDP_PASS;
6103aa96 4005out_redir:
6103aa96
JF
4006 kfree_skb(skb);
4007 return XDP_DROP;
d4455169 4008}
7c497478 4009EXPORT_SYMBOL_GPL(do_xdp_generic);
d4455169 4010
ae78dbfa 4011static int netif_rx_internal(struct sk_buff *skb)
1da177e4 4012{
b0e28f1e 4013 int ret;
1da177e4 4014
588f0330 4015 net_timestamp_check(netdev_tstamp_prequeue, skb);
1da177e4 4016
cf66ba58 4017 trace_netif_rx(skb);
d4455169
JF
4018
4019 if (static_key_false(&generic_xdp_needed)) {
bbbe211c
JF
4020 int ret;
4021
4022 preempt_disable();
4023 rcu_read_lock();
4024 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4025 rcu_read_unlock();
4026 preempt_enable();
d4455169 4027
6103aa96
JF
4028 /* Consider XDP consuming the packet a success from
4029 * the netdev point of view we do not want to count
4030 * this as an error.
4031 */
d4455169 4032 if (ret != XDP_PASS)
6103aa96 4033 return NET_RX_SUCCESS;
d4455169
JF
4034 }
4035
df334545 4036#ifdef CONFIG_RPS
c5905afb 4037 if (static_key_false(&rps_needed)) {
fec5e652 4038 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
4039 int cpu;
4040
cece1945 4041 preempt_disable();
b0e28f1e 4042 rcu_read_lock();
fec5e652
TH
4043
4044 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
4045 if (cpu < 0)
4046 cpu = smp_processor_id();
fec5e652
TH
4047
4048 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4049
b0e28f1e 4050 rcu_read_unlock();
cece1945 4051 preempt_enable();
adc9300e
ED
4052 } else
4053#endif
fec5e652
TH
4054 {
4055 unsigned int qtail;
f4563a75 4056
fec5e652
TH
4057 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4058 put_cpu();
4059 }
b0e28f1e 4060 return ret;
1da177e4 4061}
ae78dbfa
BH
4062
4063/**
4064 * netif_rx - post buffer to the network code
4065 * @skb: buffer to post
4066 *
4067 * This function receives a packet from a device driver and queues it for
4068 * the upper (protocol) levels to process. It always succeeds. The buffer
4069 * may be dropped during processing for congestion control or by the
4070 * protocol layers.
4071 *
4072 * return values:
4073 * NET_RX_SUCCESS (no congestion)
4074 * NET_RX_DROP (packet was dropped)
4075 *
4076 */
4077
4078int netif_rx(struct sk_buff *skb)
4079{
4080 trace_netif_rx_entry(skb);
4081
4082 return netif_rx_internal(skb);
4083}
d1b19dff 4084EXPORT_SYMBOL(netif_rx);
1da177e4
LT
4085
4086int netif_rx_ni(struct sk_buff *skb)
4087{
4088 int err;
4089
ae78dbfa
BH
4090 trace_netif_rx_ni_entry(skb);
4091
1da177e4 4092 preempt_disable();
ae78dbfa 4093 err = netif_rx_internal(skb);
1da177e4
LT
4094 if (local_softirq_pending())
4095 do_softirq();
4096 preempt_enable();
4097
4098 return err;
4099}
1da177e4
LT
4100EXPORT_SYMBOL(netif_rx_ni);
4101
0766f788 4102static __latent_entropy void net_tx_action(struct softirq_action *h)
1da177e4 4103{
903ceff7 4104 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
1da177e4
LT
4105
4106 if (sd->completion_queue) {
4107 struct sk_buff *clist;
4108
4109 local_irq_disable();
4110 clist = sd->completion_queue;
4111 sd->completion_queue = NULL;
4112 local_irq_enable();
4113
4114 while (clist) {
4115 struct sk_buff *skb = clist;
f4563a75 4116
1da177e4
LT
4117 clist = clist->next;
4118
63354797 4119 WARN_ON(refcount_read(&skb->users));
e6247027
ED
4120 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4121 trace_consume_skb(skb);
4122 else
4123 trace_kfree_skb(skb, net_tx_action);
15fad714
JDB
4124
4125 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4126 __kfree_skb(skb);
4127 else
4128 __kfree_skb_defer(skb);
1da177e4 4129 }
15fad714
JDB
4130
4131 __kfree_skb_flush();
1da177e4
LT
4132 }
4133
4134 if (sd->output_queue) {
37437bb2 4135 struct Qdisc *head;
1da177e4
LT
4136
4137 local_irq_disable();
4138 head = sd->output_queue;
4139 sd->output_queue = NULL;
a9cbd588 4140 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
4141 local_irq_enable();
4142
4143 while (head) {
37437bb2
DM
4144 struct Qdisc *q = head;
4145 spinlock_t *root_lock;
4146
1da177e4
LT
4147 head = head->next_sched;
4148
5fb66229 4149 root_lock = qdisc_lock(q);
3bcb846c
ED
4150 spin_lock(root_lock);
4151 /* We need to make sure head->next_sched is read
4152 * before clearing __QDISC_STATE_SCHED
4153 */
4154 smp_mb__before_atomic();
4155 clear_bit(__QDISC_STATE_SCHED, &q->state);
4156 qdisc_run(q);
4157 spin_unlock(root_lock);
1da177e4
LT
4158 }
4159 }
4160}
4161
181402a5 4162#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
da678292
MM
4163/* This hook is defined here for ATM LANE */
4164int (*br_fdb_test_addr_hook)(struct net_device *dev,
4165 unsigned char *addr) __read_mostly;
4fb019a0 4166EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 4167#endif
1da177e4 4168
1f211a1b
DB
4169static inline struct sk_buff *
4170sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4171 struct net_device *orig_dev)
f697c3e8 4172{
e7582bab 4173#ifdef CONFIG_NET_CLS_ACT
46209401 4174 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
d2788d34 4175 struct tcf_result cl_res;
24824a09 4176
c9e99fd0
DB
4177 /* If there's at least one ingress present somewhere (so
4178 * we get here via enabled static key), remaining devices
4179 * that are not configured with an ingress qdisc will bail
d2788d34 4180 * out here.
c9e99fd0 4181 */
46209401 4182 if (!miniq)
4577139b 4183 return skb;
46209401 4184
f697c3e8
HX
4185 if (*pt_prev) {
4186 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4187 *pt_prev = NULL;
1da177e4
LT
4188 }
4189
3365495c 4190 qdisc_skb_cb(skb)->pkt_len = skb->len;
8dc07fdb 4191 skb->tc_at_ingress = 1;
46209401 4192 mini_qdisc_bstats_cpu_update(miniq, skb);
c9e99fd0 4193
46209401 4194 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
d2788d34
DB
4195 case TC_ACT_OK:
4196 case TC_ACT_RECLASSIFY:
4197 skb->tc_index = TC_H_MIN(cl_res.classid);
4198 break;
4199 case TC_ACT_SHOT:
46209401 4200 mini_qdisc_qstats_cpu_drop(miniq);
8a3a4c6e
ED
4201 kfree_skb(skb);
4202 return NULL;
d2788d34
DB
4203 case TC_ACT_STOLEN:
4204 case TC_ACT_QUEUED:
e25ea21f 4205 case TC_ACT_TRAP:
8a3a4c6e 4206 consume_skb(skb);
d2788d34 4207 return NULL;
27b29f63
AS
4208 case TC_ACT_REDIRECT:
4209 /* skb_mac_header check was done by cls/act_bpf, so
4210 * we can safely push the L2 header back before
4211 * redirecting to another netdev
4212 */
4213 __skb_push(skb, skb->mac_len);
4214 skb_do_redirect(skb);
4215 return NULL;
d2788d34
DB
4216 default:
4217 break;
f697c3e8 4218 }
e7582bab 4219#endif /* CONFIG_NET_CLS_ACT */
e687ad60
PN
4220 return skb;
4221}
1da177e4 4222
24b27fc4
MB
4223/**
4224 * netdev_is_rx_handler_busy - check if receive handler is registered
4225 * @dev: device to check
4226 *
4227 * Check if a receive handler is already registered for a given device.
4228 * Return true if there one.
4229 *
4230 * The caller must hold the rtnl_mutex.
4231 */
4232bool netdev_is_rx_handler_busy(struct net_device *dev)
4233{
4234 ASSERT_RTNL();
4235 return dev && rtnl_dereference(dev->rx_handler);
4236}
4237EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4238
ab95bfe0
JP
4239/**
4240 * netdev_rx_handler_register - register receive handler
4241 * @dev: device to register a handler for
4242 * @rx_handler: receive handler to register
93e2c32b 4243 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0 4244 *
e227867f 4245 * Register a receive handler for a device. This handler will then be
ab95bfe0
JP
4246 * called from __netif_receive_skb. A negative errno code is returned
4247 * on a failure.
4248 *
4249 * The caller must hold the rtnl_mutex.
8a4eb573
JP
4250 *
4251 * For a general description of rx_handler, see enum rx_handler_result.
ab95bfe0
JP
4252 */
4253int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
4254 rx_handler_func_t *rx_handler,
4255 void *rx_handler_data)
ab95bfe0 4256{
1b7cd004 4257 if (netdev_is_rx_handler_busy(dev))
ab95bfe0
JP
4258 return -EBUSY;
4259
00cfec37 4260 /* Note: rx_handler_data must be set before rx_handler */
93e2c32b 4261 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
4262 rcu_assign_pointer(dev->rx_handler, rx_handler);
4263
4264 return 0;
4265}
4266EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4267
4268/**
4269 * netdev_rx_handler_unregister - unregister receive handler
4270 * @dev: device to unregister a handler from
4271 *
166ec369 4272 * Unregister a receive handler from a device.
ab95bfe0
JP
4273 *
4274 * The caller must hold the rtnl_mutex.
4275 */
4276void netdev_rx_handler_unregister(struct net_device *dev)
4277{
4278
4279 ASSERT_RTNL();
a9b3cd7f 4280 RCU_INIT_POINTER(dev->rx_handler, NULL);
00cfec37
ED
4281 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4282 * section has a guarantee to see a non NULL rx_handler_data
4283 * as well.
4284 */
4285 synchronize_net();
a9b3cd7f 4286 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
ab95bfe0
JP
4287}
4288EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4289
b4b9e355
MG
4290/*
4291 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4292 * the special handling of PFMEMALLOC skbs.
4293 */
4294static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4295{
4296 switch (skb->protocol) {
2b8837ae
JP
4297 case htons(ETH_P_ARP):
4298 case htons(ETH_P_IP):
4299 case htons(ETH_P_IPV6):
4300 case htons(ETH_P_8021Q):
4301 case htons(ETH_P_8021AD):
b4b9e355
MG
4302 return true;
4303 default:
4304 return false;
4305 }
4306}
4307
e687ad60
PN
4308static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4309 int *ret, struct net_device *orig_dev)
4310{
e7582bab 4311#ifdef CONFIG_NETFILTER_INGRESS
e687ad60 4312 if (nf_hook_ingress_active(skb)) {
2c1e2703
AC
4313 int ingress_retval;
4314
e687ad60
PN
4315 if (*pt_prev) {
4316 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4317 *pt_prev = NULL;
4318 }
4319
2c1e2703
AC
4320 rcu_read_lock();
4321 ingress_retval = nf_hook_ingress(skb);
4322 rcu_read_unlock();
4323 return ingress_retval;
e687ad60 4324 }
e7582bab 4325#endif /* CONFIG_NETFILTER_INGRESS */
e687ad60
PN
4326 return 0;
4327}
e687ad60 4328
9754e293 4329static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
1da177e4
LT
4330{
4331 struct packet_type *ptype, *pt_prev;
ab95bfe0 4332 rx_handler_func_t *rx_handler;
f2ccd8fa 4333 struct net_device *orig_dev;
8a4eb573 4334 bool deliver_exact = false;
1da177e4 4335 int ret = NET_RX_DROP;
252e3346 4336 __be16 type;
1da177e4 4337
588f0330 4338 net_timestamp_check(!netdev_tstamp_prequeue, skb);
81bbb3d4 4339
cf66ba58 4340 trace_netif_receive_skb(skb);
9b22ea56 4341
cc9bd5ce 4342 orig_dev = skb->dev;
8f903c70 4343
c1d2bbe1 4344 skb_reset_network_header(skb);
fda55eca
ED
4345 if (!skb_transport_header_was_set(skb))
4346 skb_reset_transport_header(skb);
0b5c9db1 4347 skb_reset_mac_len(skb);
1da177e4
LT
4348
4349 pt_prev = NULL;
4350
63d8ea7f 4351another_round:
b6858177 4352 skb->skb_iif = skb->dev->ifindex;
63d8ea7f
DM
4353
4354 __this_cpu_inc(softnet_data.processed);
4355
8ad227ff
PM
4356 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4357 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
0d5501c1 4358 skb = skb_vlan_untag(skb);
bcc6d479 4359 if (unlikely(!skb))
2c17d27c 4360 goto out;
bcc6d479
JP
4361 }
4362
e7246e12
WB
4363 if (skb_skip_tc_classify(skb))
4364 goto skip_classify;
1da177e4 4365
9754e293 4366 if (pfmemalloc)
b4b9e355
MG
4367 goto skip_taps;
4368
1da177e4 4369 list_for_each_entry_rcu(ptype, &ptype_all, list) {
7866a621
SN
4370 if (pt_prev)
4371 ret = deliver_skb(skb, pt_prev, orig_dev);
4372 pt_prev = ptype;
4373 }
4374
4375 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4376 if (pt_prev)
4377 ret = deliver_skb(skb, pt_prev, orig_dev);
4378 pt_prev = ptype;
1da177e4
LT
4379 }
4380
b4b9e355 4381skip_taps:
1cf51900 4382#ifdef CONFIG_NET_INGRESS
4577139b 4383 if (static_key_false(&ingress_needed)) {
1f211a1b 4384 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
4577139b 4385 if (!skb)
2c17d27c 4386 goto out;
e687ad60
PN
4387
4388 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
2c17d27c 4389 goto out;
4577139b 4390 }
1cf51900 4391#endif
a5135bcf 4392 skb_reset_tc(skb);
e7246e12 4393skip_classify:
9754e293 4394 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
b4b9e355
MG
4395 goto drop;
4396
df8a39de 4397 if (skb_vlan_tag_present(skb)) {
2425717b
JF
4398 if (pt_prev) {
4399 ret = deliver_skb(skb, pt_prev, orig_dev);
4400 pt_prev = NULL;
4401 }
48cc32d3 4402 if (vlan_do_receive(&skb))
2425717b
JF
4403 goto another_round;
4404 else if (unlikely(!skb))
2c17d27c 4405 goto out;
2425717b
JF
4406 }
4407
48cc32d3 4408 rx_handler = rcu_dereference(skb->dev->rx_handler);
ab95bfe0
JP
4409 if (rx_handler) {
4410 if (pt_prev) {
4411 ret = deliver_skb(skb, pt_prev, orig_dev);
4412 pt_prev = NULL;
4413 }
8a4eb573
JP
4414 switch (rx_handler(&skb)) {
4415 case RX_HANDLER_CONSUMED:
3bc1b1ad 4416 ret = NET_RX_SUCCESS;
2c17d27c 4417 goto out;
8a4eb573 4418 case RX_HANDLER_ANOTHER:
63d8ea7f 4419 goto another_round;
8a4eb573
JP
4420 case RX_HANDLER_EXACT:
4421 deliver_exact = true;
4422 case RX_HANDLER_PASS:
4423 break;
4424 default:
4425 BUG();
4426 }
ab95bfe0 4427 }
1da177e4 4428
df8a39de
JP
4429 if (unlikely(skb_vlan_tag_present(skb))) {
4430 if (skb_vlan_tag_get_id(skb))
d4b812de
ED
4431 skb->pkt_type = PACKET_OTHERHOST;
4432 /* Note: we might in the future use prio bits
4433 * and set skb->priority like in vlan_do_receive()
4434 * For the time being, just ignore Priority Code Point
4435 */
4436 skb->vlan_tci = 0;
4437 }
48cc32d3 4438
7866a621
SN
4439 type = skb->protocol;
4440
63d8ea7f 4441 /* deliver only exact match when indicated */
7866a621
SN
4442 if (likely(!deliver_exact)) {
4443 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4444 &ptype_base[ntohs(type) &
4445 PTYPE_HASH_MASK]);
4446 }
1f3c8804 4447
7866a621
SN
4448 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4449 &orig_dev->ptype_specific);
4450
4451 if (unlikely(skb->dev != orig_dev)) {
4452 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4453 &skb->dev->ptype_specific);
1da177e4
LT
4454 }
4455
4456 if (pt_prev) {
1f8b977a 4457 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
0e698bf6 4458 goto drop;
1080e512
MT
4459 else
4460 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4 4461 } else {
b4b9e355 4462drop:
6e7333d3
JW
4463 if (!deliver_exact)
4464 atomic_long_inc(&skb->dev->rx_dropped);
4465 else
4466 atomic_long_inc(&skb->dev->rx_nohandler);
1da177e4
LT
4467 kfree_skb(skb);
4468 /* Jamal, now you will not able to escape explaining
4469 * me how you were going to use this. :-)
4470 */
4471 ret = NET_RX_DROP;
4472 }
4473
2c17d27c 4474out:
9754e293
DM
4475 return ret;
4476}
4477
1c601d82
JDB
4478/**
4479 * netif_receive_skb_core - special purpose version of netif_receive_skb
4480 * @skb: buffer to process
4481 *
4482 * More direct receive version of netif_receive_skb(). It should
4483 * only be used by callers that have a need to skip RPS and Generic XDP.
4484 * Caller must also take care of handling if (page_is_)pfmemalloc.
4485 *
4486 * This function may only be called from softirq context and interrupts
4487 * should be enabled.
4488 *
4489 * Return values (usually ignored):
4490 * NET_RX_SUCCESS: no congestion
4491 * NET_RX_DROP: packet was dropped
4492 */
4493int netif_receive_skb_core(struct sk_buff *skb)
4494{
4495 int ret;
4496
4497 rcu_read_lock();
4498 ret = __netif_receive_skb_core(skb, false);
4499 rcu_read_unlock();
4500
4501 return ret;
4502}
4503EXPORT_SYMBOL(netif_receive_skb_core);
4504
9754e293
DM
4505static int __netif_receive_skb(struct sk_buff *skb)
4506{
4507 int ret;
4508
4509 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
f1083048 4510 unsigned int noreclaim_flag;
9754e293
DM
4511
4512 /*
4513 * PFMEMALLOC skbs are special, they should
4514 * - be delivered to SOCK_MEMALLOC sockets only
4515 * - stay away from userspace
4516 * - have bounded memory usage
4517 *
4518 * Use PF_MEMALLOC as this saves us from propagating the allocation
4519 * context down to all allocation sites.
4520 */
f1083048 4521 noreclaim_flag = memalloc_noreclaim_save();
9754e293 4522 ret = __netif_receive_skb_core(skb, true);
f1083048 4523 memalloc_noreclaim_restore(noreclaim_flag);
9754e293
DM
4524 } else
4525 ret = __netif_receive_skb_core(skb, false);
4526
1da177e4
LT
4527 return ret;
4528}
0a9627f2 4529
f4e63525 4530static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
b5cdae32 4531{
58038695 4532 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
b5cdae32
DM
4533 struct bpf_prog *new = xdp->prog;
4534 int ret = 0;
4535
4536 switch (xdp->command) {
58038695 4537 case XDP_SETUP_PROG:
b5cdae32
DM
4538 rcu_assign_pointer(dev->xdp_prog, new);
4539 if (old)
4540 bpf_prog_put(old);
4541
4542 if (old && !new) {
4543 static_key_slow_dec(&generic_xdp_needed);
4544 } else if (new && !old) {
4545 static_key_slow_inc(&generic_xdp_needed);
4546 dev_disable_lro(dev);
4547 }
4548 break;
b5cdae32
DM
4549
4550 case XDP_QUERY_PROG:
58038695
MKL
4551 xdp->prog_attached = !!old;
4552 xdp->prog_id = old ? old->aux->id : 0;
b5cdae32
DM
4553 break;
4554
4555 default:
4556 ret = -EINVAL;
4557 break;
4558 }
4559
4560 return ret;
4561}
4562
ae78dbfa 4563static int netif_receive_skb_internal(struct sk_buff *skb)
0a9627f2 4564{
2c17d27c
JA
4565 int ret;
4566
588f0330 4567 net_timestamp_check(netdev_tstamp_prequeue, skb);
3b098e2d 4568
c1f19b51
RC
4569 if (skb_defer_rx_timestamp(skb))
4570 return NET_RX_SUCCESS;
4571
b5cdae32 4572 if (static_key_false(&generic_xdp_needed)) {
bbbe211c 4573 int ret;
b5cdae32 4574
bbbe211c
JF
4575 preempt_disable();
4576 rcu_read_lock();
4577 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4578 rcu_read_unlock();
4579 preempt_enable();
4580
4581 if (ret != XDP_PASS)
d4455169 4582 return NET_RX_DROP;
b5cdae32
DM
4583 }
4584
bbbe211c 4585 rcu_read_lock();
df334545 4586#ifdef CONFIG_RPS
c5905afb 4587 if (static_key_false(&rps_needed)) {
3b098e2d 4588 struct rps_dev_flow voidflow, *rflow = &voidflow;
2c17d27c 4589 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 4590
3b098e2d
ED
4591 if (cpu >= 0) {
4592 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4593 rcu_read_unlock();
adc9300e 4594 return ret;
3b098e2d 4595 }
fec5e652 4596 }
1e94d72f 4597#endif
2c17d27c
JA
4598 ret = __netif_receive_skb(skb);
4599 rcu_read_unlock();
4600 return ret;
0a9627f2 4601}
ae78dbfa
BH
4602
4603/**
4604 * netif_receive_skb - process receive buffer from network
4605 * @skb: buffer to process
4606 *
4607 * netif_receive_skb() is the main receive data processing function.
4608 * It always succeeds. The buffer may be dropped during processing
4609 * for congestion control or by the protocol layers.
4610 *
4611 * This function may only be called from softirq context and interrupts
4612 * should be enabled.
4613 *
4614 * Return values (usually ignored):
4615 * NET_RX_SUCCESS: no congestion
4616 * NET_RX_DROP: packet was dropped
4617 */
04eb4489 4618int netif_receive_skb(struct sk_buff *skb)
ae78dbfa
BH
4619{
4620 trace_netif_receive_skb_entry(skb);
4621
4622 return netif_receive_skb_internal(skb);
4623}
04eb4489 4624EXPORT_SYMBOL(netif_receive_skb);
1da177e4 4625
41852497 4626DEFINE_PER_CPU(struct work_struct, flush_works);
145dd5f9
PA
4627
4628/* Network device is going away, flush any packets still pending */
4629static void flush_backlog(struct work_struct *work)
6e583ce5 4630{
6e583ce5 4631 struct sk_buff *skb, *tmp;
145dd5f9
PA
4632 struct softnet_data *sd;
4633
4634 local_bh_disable();
4635 sd = this_cpu_ptr(&softnet_data);
6e583ce5 4636
145dd5f9 4637 local_irq_disable();
e36fa2f7 4638 rps_lock(sd);
6e7676c1 4639 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
41852497 4640 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
e36fa2f7 4641 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 4642 kfree_skb(skb);
76cc8b13 4643 input_queue_head_incr(sd);
6e583ce5 4644 }
6e7676c1 4645 }
e36fa2f7 4646 rps_unlock(sd);
145dd5f9 4647 local_irq_enable();
6e7676c1
CG
4648
4649 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
41852497 4650 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
6e7676c1
CG
4651 __skb_unlink(skb, &sd->process_queue);
4652 kfree_skb(skb);
76cc8b13 4653 input_queue_head_incr(sd);
6e7676c1
CG
4654 }
4655 }
145dd5f9
PA
4656 local_bh_enable();
4657}
4658
41852497 4659static void flush_all_backlogs(void)
145dd5f9
PA
4660{
4661 unsigned int cpu;
4662
4663 get_online_cpus();
4664
41852497
ED
4665 for_each_online_cpu(cpu)
4666 queue_work_on(cpu, system_highpri_wq,
4667 per_cpu_ptr(&flush_works, cpu));
145dd5f9
PA
4668
4669 for_each_online_cpu(cpu)
41852497 4670 flush_work(per_cpu_ptr(&flush_works, cpu));
145dd5f9
PA
4671
4672 put_online_cpus();
6e583ce5
SH
4673}
4674
d565b0a1
HX
4675static int napi_gro_complete(struct sk_buff *skb)
4676{
22061d80 4677 struct packet_offload *ptype;
d565b0a1 4678 __be16 type = skb->protocol;
22061d80 4679 struct list_head *head = &offload_base;
d565b0a1
HX
4680 int err = -ENOENT;
4681
c3c7c254
ED
4682 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4683
fc59f9a3
HX
4684 if (NAPI_GRO_CB(skb)->count == 1) {
4685 skb_shinfo(skb)->gso_size = 0;
d565b0a1 4686 goto out;
fc59f9a3 4687 }
d565b0a1
HX
4688
4689 rcu_read_lock();
4690 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4691 if (ptype->type != type || !ptype->callbacks.gro_complete)
d565b0a1
HX
4692 continue;
4693
299603e8 4694 err = ptype->callbacks.gro_complete(skb, 0);
d565b0a1
HX
4695 break;
4696 }
4697 rcu_read_unlock();
4698
4699 if (err) {
4700 WARN_ON(&ptype->list == head);
4701 kfree_skb(skb);
4702 return NET_RX_SUCCESS;
4703 }
4704
4705out:
ae78dbfa 4706 return netif_receive_skb_internal(skb);
d565b0a1
HX
4707}
4708
2e71a6f8
ED
4709/* napi->gro_list contains packets ordered by age.
4710 * youngest packets at the head of it.
4711 * Complete skbs in reverse order to reduce latencies.
4712 */
4713void napi_gro_flush(struct napi_struct *napi, bool flush_old)
d565b0a1 4714{
2e71a6f8 4715 struct sk_buff *skb, *prev = NULL;
d565b0a1 4716
2e71a6f8
ED
4717 /* scan list and build reverse chain */
4718 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4719 skb->prev = prev;
4720 prev = skb;
4721 }
4722
4723 for (skb = prev; skb; skb = prev) {
d565b0a1 4724 skb->next = NULL;
2e71a6f8
ED
4725
4726 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4727 return;
4728
4729 prev = skb->prev;
d565b0a1 4730 napi_gro_complete(skb);
2e71a6f8 4731 napi->gro_count--;
d565b0a1
HX
4732 }
4733
4734 napi->gro_list = NULL;
4735}
86cac58b 4736EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 4737
89c5fa33
ED
4738static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4739{
4740 struct sk_buff *p;
4741 unsigned int maclen = skb->dev->hard_header_len;
0b4cec8c 4742 u32 hash = skb_get_hash_raw(skb);
89c5fa33
ED
4743
4744 for (p = napi->gro_list; p; p = p->next) {
4745 unsigned long diffs;
4746
0b4cec8c
TH
4747 NAPI_GRO_CB(p)->flush = 0;
4748
4749 if (hash != skb_get_hash_raw(p)) {
4750 NAPI_GRO_CB(p)->same_flow = 0;
4751 continue;
4752 }
4753
89c5fa33
ED
4754 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4755 diffs |= p->vlan_tci ^ skb->vlan_tci;
ce87fc6c 4756 diffs |= skb_metadata_dst_cmp(p, skb);
de8f3a83 4757 diffs |= skb_metadata_differs(p, skb);
89c5fa33
ED
4758 if (maclen == ETH_HLEN)
4759 diffs |= compare_ether_header(skb_mac_header(p),
a50e233c 4760 skb_mac_header(skb));
89c5fa33
ED
4761 else if (!diffs)
4762 diffs = memcmp(skb_mac_header(p),
a50e233c 4763 skb_mac_header(skb),
89c5fa33
ED
4764 maclen);
4765 NAPI_GRO_CB(p)->same_flow = !diffs;
89c5fa33
ED
4766 }
4767}
4768
299603e8
JC
4769static void skb_gro_reset_offset(struct sk_buff *skb)
4770{
4771 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4772 const skb_frag_t *frag0 = &pinfo->frags[0];
4773
4774 NAPI_GRO_CB(skb)->data_offset = 0;
4775 NAPI_GRO_CB(skb)->frag0 = NULL;
4776 NAPI_GRO_CB(skb)->frag0_len = 0;
4777
4778 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4779 pinfo->nr_frags &&
4780 !PageHighMem(skb_frag_page(frag0))) {
4781 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
7cfd5fd5
ED
4782 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
4783 skb_frag_size(frag0),
4784 skb->end - skb->tail);
89c5fa33
ED
4785 }
4786}
4787
a50e233c
ED
4788static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4789{
4790 struct skb_shared_info *pinfo = skb_shinfo(skb);
4791
4792 BUG_ON(skb->end - skb->tail < grow);
4793
4794 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4795
4796 skb->data_len -= grow;
4797 skb->tail += grow;
4798
4799 pinfo->frags[0].page_offset += grow;
4800 skb_frag_size_sub(&pinfo->frags[0], grow);
4801
4802 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4803 skb_frag_unref(skb, 0);
4804 memmove(pinfo->frags, pinfo->frags + 1,
4805 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4806 }
4807}
4808
bb728820 4809static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
4810{
4811 struct sk_buff **pp = NULL;
22061d80 4812 struct packet_offload *ptype;
d565b0a1 4813 __be16 type = skb->protocol;
22061d80 4814 struct list_head *head = &offload_base;
0da2afd5 4815 int same_flow;
5b252f0c 4816 enum gro_result ret;
a50e233c 4817 int grow;
d565b0a1 4818
b5cdae32 4819 if (netif_elide_gro(skb->dev))
d565b0a1
HX
4820 goto normal;
4821
89c5fa33
ED
4822 gro_list_prepare(napi, skb);
4823
d565b0a1
HX
4824 rcu_read_lock();
4825 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4826 if (ptype->type != type || !ptype->callbacks.gro_receive)
d565b0a1
HX
4827 continue;
4828
86911732 4829 skb_set_network_header(skb, skb_gro_offset(skb));
efd9450e 4830 skb_reset_mac_len(skb);
d565b0a1 4831 NAPI_GRO_CB(skb)->same_flow = 0;
d61d072e 4832 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
5d38a079 4833 NAPI_GRO_CB(skb)->free = 0;
fac8e0f5 4834 NAPI_GRO_CB(skb)->encap_mark = 0;
fcd91dd4 4835 NAPI_GRO_CB(skb)->recursion_counter = 0;
a0ca153f 4836 NAPI_GRO_CB(skb)->is_fou = 0;
1530545e 4837 NAPI_GRO_CB(skb)->is_atomic = 1;
15e2396d 4838 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
d565b0a1 4839
662880f4
TH
4840 /* Setup for GRO checksum validation */
4841 switch (skb->ip_summed) {
4842 case CHECKSUM_COMPLETE:
4843 NAPI_GRO_CB(skb)->csum = skb->csum;
4844 NAPI_GRO_CB(skb)->csum_valid = 1;
4845 NAPI_GRO_CB(skb)->csum_cnt = 0;
4846 break;
4847 case CHECKSUM_UNNECESSARY:
4848 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4849 NAPI_GRO_CB(skb)->csum_valid = 0;
4850 break;
4851 default:
4852 NAPI_GRO_CB(skb)->csum_cnt = 0;
4853 NAPI_GRO_CB(skb)->csum_valid = 0;
4854 }
d565b0a1 4855
f191a1d1 4856 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
d565b0a1
HX
4857 break;
4858 }
4859 rcu_read_unlock();
4860
4861 if (&ptype->list == head)
4862 goto normal;
4863
25393d3f
SK
4864 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
4865 ret = GRO_CONSUMED;
4866 goto ok;
4867 }
4868
0da2afd5 4869 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 4870 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 4871
d565b0a1
HX
4872 if (pp) {
4873 struct sk_buff *nskb = *pp;
4874
4875 *pp = nskb->next;
4876 nskb->next = NULL;
4877 napi_gro_complete(nskb);
4ae5544f 4878 napi->gro_count--;
d565b0a1
HX
4879 }
4880
0da2afd5 4881 if (same_flow)
d565b0a1
HX
4882 goto ok;
4883
600adc18 4884 if (NAPI_GRO_CB(skb)->flush)
d565b0a1 4885 goto normal;
d565b0a1 4886
600adc18
ED
4887 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4888 struct sk_buff *nskb = napi->gro_list;
4889
4890 /* locate the end of the list to select the 'oldest' flow */
4891 while (nskb->next) {
4892 pp = &nskb->next;
4893 nskb = *pp;
4894 }
4895 *pp = NULL;
4896 nskb->next = NULL;
4897 napi_gro_complete(nskb);
4898 } else {
4899 napi->gro_count++;
4900 }
d565b0a1 4901 NAPI_GRO_CB(skb)->count = 1;
2e71a6f8 4902 NAPI_GRO_CB(skb)->age = jiffies;
29e98242 4903 NAPI_GRO_CB(skb)->last = skb;
86911732 4904 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
4905 skb->next = napi->gro_list;
4906 napi->gro_list = skb;
5d0d9be8 4907 ret = GRO_HELD;
d565b0a1 4908
ad0f9904 4909pull:
a50e233c
ED
4910 grow = skb_gro_offset(skb) - skb_headlen(skb);
4911 if (grow > 0)
4912 gro_pull_from_frag0(skb, grow);
d565b0a1 4913ok:
5d0d9be8 4914 return ret;
d565b0a1
HX
4915
4916normal:
ad0f9904
HX
4917 ret = GRO_NORMAL;
4918 goto pull;
5d38a079 4919}
96e93eab 4920
bf5a755f
JC
4921struct packet_offload *gro_find_receive_by_type(__be16 type)
4922{
4923 struct list_head *offload_head = &offload_base;
4924 struct packet_offload *ptype;
4925
4926 list_for_each_entry_rcu(ptype, offload_head, list) {
4927 if (ptype->type != type || !ptype->callbacks.gro_receive)
4928 continue;
4929 return ptype;
4930 }
4931 return NULL;
4932}
e27a2f83 4933EXPORT_SYMBOL(gro_find_receive_by_type);
bf5a755f
JC
4934
4935struct packet_offload *gro_find_complete_by_type(__be16 type)
4936{
4937 struct list_head *offload_head = &offload_base;
4938 struct packet_offload *ptype;
4939
4940 list_for_each_entry_rcu(ptype, offload_head, list) {
4941 if (ptype->type != type || !ptype->callbacks.gro_complete)
4942 continue;
4943 return ptype;
4944 }
4945 return NULL;
4946}
e27a2f83 4947EXPORT_SYMBOL(gro_find_complete_by_type);
5d38a079 4948
e44699d2
MK
4949static void napi_skb_free_stolen_head(struct sk_buff *skb)
4950{
4951 skb_dst_drop(skb);
4952 secpath_reset(skb);
4953 kmem_cache_free(skbuff_head_cache, skb);
4954}
4955
bb728820 4956static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 4957{
5d0d9be8
HX
4958 switch (ret) {
4959 case GRO_NORMAL:
ae78dbfa 4960 if (netif_receive_skb_internal(skb))
c7c4b3b6
BH
4961 ret = GRO_DROP;
4962 break;
5d38a079 4963
5d0d9be8 4964 case GRO_DROP:
5d38a079
HX
4965 kfree_skb(skb);
4966 break;
5b252f0c 4967
daa86548 4968 case GRO_MERGED_FREE:
e44699d2
MK
4969 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4970 napi_skb_free_stolen_head(skb);
4971 else
d7e8883c 4972 __kfree_skb(skb);
daa86548
ED
4973 break;
4974
5b252f0c
BH
4975 case GRO_HELD:
4976 case GRO_MERGED:
25393d3f 4977 case GRO_CONSUMED:
5b252f0c 4978 break;
5d38a079
HX
4979 }
4980
c7c4b3b6 4981 return ret;
5d0d9be8 4982}
5d0d9be8 4983
c7c4b3b6 4984gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 4985{
93f93a44 4986 skb_mark_napi_id(skb, napi);
ae78dbfa 4987 trace_napi_gro_receive_entry(skb);
86911732 4988
a50e233c
ED
4989 skb_gro_reset_offset(skb);
4990
89c5fa33 4991 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
d565b0a1
HX
4992}
4993EXPORT_SYMBOL(napi_gro_receive);
4994
d0c2b0d2 4995static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 4996{
93a35f59
ED
4997 if (unlikely(skb->pfmemalloc)) {
4998 consume_skb(skb);
4999 return;
5000 }
96e93eab 5001 __skb_pull(skb, skb_headlen(skb));
2a2a459e
ED
5002 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
5003 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3701e513 5004 skb->vlan_tci = 0;
66c46d74 5005 skb->dev = napi->dev;
6d152e23 5006 skb->skb_iif = 0;
c3caf119
JC
5007 skb->encapsulation = 0;
5008 skb_shinfo(skb)->gso_type = 0;
e33d0ba8 5009 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
f991bb9d 5010 secpath_reset(skb);
96e93eab
HX
5011
5012 napi->skb = skb;
5013}
96e93eab 5014
76620aaf 5015struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 5016{
5d38a079 5017 struct sk_buff *skb = napi->skb;
5d38a079
HX
5018
5019 if (!skb) {
fd11a83d 5020 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
e2f9dc3b
ED
5021 if (skb) {
5022 napi->skb = skb;
5023 skb_mark_napi_id(skb, napi);
5024 }
80595d59 5025 }
96e93eab
HX
5026 return skb;
5027}
76620aaf 5028EXPORT_SYMBOL(napi_get_frags);
96e93eab 5029
a50e233c
ED
5030static gro_result_t napi_frags_finish(struct napi_struct *napi,
5031 struct sk_buff *skb,
5032 gro_result_t ret)
96e93eab 5033{
5d0d9be8
HX
5034 switch (ret) {
5035 case GRO_NORMAL:
a50e233c
ED
5036 case GRO_HELD:
5037 __skb_push(skb, ETH_HLEN);
5038 skb->protocol = eth_type_trans(skb, skb->dev);
5039 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
c7c4b3b6 5040 ret = GRO_DROP;
86911732 5041 break;
5d38a079 5042
5d0d9be8 5043 case GRO_DROP:
5d0d9be8
HX
5044 napi_reuse_skb(napi, skb);
5045 break;
5b252f0c 5046
e44699d2
MK
5047 case GRO_MERGED_FREE:
5048 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5049 napi_skb_free_stolen_head(skb);
5050 else
5051 napi_reuse_skb(napi, skb);
5052 break;
5053
5b252f0c 5054 case GRO_MERGED:
25393d3f 5055 case GRO_CONSUMED:
5b252f0c 5056 break;
5d0d9be8 5057 }
5d38a079 5058
c7c4b3b6 5059 return ret;
5d38a079 5060}
5d0d9be8 5061
a50e233c
ED
5062/* Upper GRO stack assumes network header starts at gro_offset=0
5063 * Drivers could call both napi_gro_frags() and napi_gro_receive()
5064 * We copy ethernet header into skb->data to have a common layout.
5065 */
4adb9c4a 5066static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
76620aaf
HX
5067{
5068 struct sk_buff *skb = napi->skb;
a50e233c
ED
5069 const struct ethhdr *eth;
5070 unsigned int hlen = sizeof(*eth);
76620aaf
HX
5071
5072 napi->skb = NULL;
5073
a50e233c
ED
5074 skb_reset_mac_header(skb);
5075 skb_gro_reset_offset(skb);
5076
5077 eth = skb_gro_header_fast(skb, 0);
5078 if (unlikely(skb_gro_header_hard(skb, hlen))) {
5079 eth = skb_gro_header_slow(skb, hlen, 0);
5080 if (unlikely(!eth)) {
4da46ceb
AC
5081 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5082 __func__, napi->dev->name);
a50e233c
ED
5083 napi_reuse_skb(napi, skb);
5084 return NULL;
5085 }
5086 } else {
5087 gro_pull_from_frag0(skb, hlen);
5088 NAPI_GRO_CB(skb)->frag0 += hlen;
5089 NAPI_GRO_CB(skb)->frag0_len -= hlen;
76620aaf 5090 }
a50e233c
ED
5091 __skb_pull(skb, hlen);
5092
5093 /*
5094 * This works because the only protocols we care about don't require
5095 * special handling.
5096 * We'll fix it up properly in napi_frags_finish()
5097 */
5098 skb->protocol = eth->h_proto;
76620aaf 5099
76620aaf
HX
5100 return skb;
5101}
76620aaf 5102
c7c4b3b6 5103gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 5104{
76620aaf 5105 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
5106
5107 if (!skb)
c7c4b3b6 5108 return GRO_DROP;
5d0d9be8 5109
ae78dbfa
BH
5110 trace_napi_gro_frags_entry(skb);
5111
89c5fa33 5112 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5d0d9be8 5113}
5d38a079
HX
5114EXPORT_SYMBOL(napi_gro_frags);
5115
573e8fca
TH
5116/* Compute the checksum from gro_offset and return the folded value
5117 * after adding in any pseudo checksum.
5118 */
5119__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
5120{
5121 __wsum wsum;
5122 __sum16 sum;
5123
5124 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
5125
5126 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5127 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
5128 if (likely(!sum)) {
5129 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
5130 !skb->csum_complete_sw)
5131 netdev_rx_csum_fault(skb->dev);
5132 }
5133
5134 NAPI_GRO_CB(skb)->csum = wsum;
5135 NAPI_GRO_CB(skb)->csum_valid = 1;
5136
5137 return sum;
5138}
5139EXPORT_SYMBOL(__skb_gro_checksum_complete);
5140
773fc8f6 5141static void net_rps_send_ipi(struct softnet_data *remsd)
5142{
5143#ifdef CONFIG_RPS
5144 while (remsd) {
5145 struct softnet_data *next = remsd->rps_ipi_next;
5146
5147 if (cpu_online(remsd->cpu))
5148 smp_call_function_single_async(remsd->cpu, &remsd->csd);
5149 remsd = next;
5150 }
5151#endif
5152}
5153
e326bed2 5154/*
855abcf0 5155 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
e326bed2
ED
5156 * Note: called with local irq disabled, but exits with local irq enabled.
5157 */
5158static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5159{
5160#ifdef CONFIG_RPS
5161 struct softnet_data *remsd = sd->rps_ipi_list;
5162
5163 if (remsd) {
5164 sd->rps_ipi_list = NULL;
5165
5166 local_irq_enable();
5167
5168 /* Send pending IPI's to kick RPS processing on remote cpus. */
773fc8f6 5169 net_rps_send_ipi(remsd);
e326bed2
ED
5170 } else
5171#endif
5172 local_irq_enable();
5173}
5174
d75b1ade
ED
5175static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5176{
5177#ifdef CONFIG_RPS
5178 return sd->rps_ipi_list != NULL;
5179#else
5180 return false;
5181#endif
5182}
5183
bea3348e 5184static int process_backlog(struct napi_struct *napi, int quota)
1da177e4 5185{
eecfd7c4 5186 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
145dd5f9
PA
5187 bool again = true;
5188 int work = 0;
1da177e4 5189
e326bed2
ED
5190 /* Check if we have pending ipi, its better to send them now,
5191 * not waiting net_rx_action() end.
5192 */
d75b1ade 5193 if (sd_has_rps_ipi_waiting(sd)) {
e326bed2
ED
5194 local_irq_disable();
5195 net_rps_action_and_irq_enable(sd);
5196 }
d75b1ade 5197
3d48b53f 5198 napi->weight = dev_rx_weight;
145dd5f9 5199 while (again) {
1da177e4 5200 struct sk_buff *skb;
6e7676c1
CG
5201
5202 while ((skb = __skb_dequeue(&sd->process_queue))) {
2c17d27c 5203 rcu_read_lock();
6e7676c1 5204 __netif_receive_skb(skb);
2c17d27c 5205 rcu_read_unlock();
76cc8b13 5206 input_queue_head_incr(sd);
145dd5f9 5207 if (++work >= quota)
76cc8b13 5208 return work;
145dd5f9 5209
6e7676c1 5210 }
1da177e4 5211
145dd5f9 5212 local_irq_disable();
e36fa2f7 5213 rps_lock(sd);
11ef7a89 5214 if (skb_queue_empty(&sd->input_pkt_queue)) {
eecfd7c4
ED
5215 /*
5216 * Inline a custom version of __napi_complete().
5217 * only current cpu owns and manipulates this napi,
11ef7a89
TH
5218 * and NAPI_STATE_SCHED is the only possible flag set
5219 * on backlog.
5220 * We can use a plain write instead of clear_bit(),
eecfd7c4
ED
5221 * and we dont need an smp_mb() memory barrier.
5222 */
eecfd7c4 5223 napi->state = 0;
145dd5f9
PA
5224 again = false;
5225 } else {
5226 skb_queue_splice_tail_init(&sd->input_pkt_queue,
5227 &sd->process_queue);
bea3348e 5228 }
e36fa2f7 5229 rps_unlock(sd);
145dd5f9 5230 local_irq_enable();
6e7676c1 5231 }
1da177e4 5232
bea3348e
SH
5233 return work;
5234}
1da177e4 5235
bea3348e
SH
5236/**
5237 * __napi_schedule - schedule for receive
c4ea43c5 5238 * @n: entry to schedule
bea3348e 5239 *
bc9ad166
ED
5240 * The entry's receive function will be scheduled to run.
5241 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
bea3348e 5242 */
b5606c2d 5243void __napi_schedule(struct napi_struct *n)
bea3348e
SH
5244{
5245 unsigned long flags;
1da177e4 5246
bea3348e 5247 local_irq_save(flags);
903ceff7 5248 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
bea3348e 5249 local_irq_restore(flags);
1da177e4 5250}
bea3348e
SH
5251EXPORT_SYMBOL(__napi_schedule);
5252
39e6c820
ED
5253/**
5254 * napi_schedule_prep - check if napi can be scheduled
5255 * @n: napi context
5256 *
5257 * Test if NAPI routine is already running, and if not mark
5258 * it as running. This is used as a condition variable
5259 * insure only one NAPI poll instance runs. We also make
5260 * sure there is no pending NAPI disable.
5261 */
5262bool napi_schedule_prep(struct napi_struct *n)
5263{
5264 unsigned long val, new;
5265
5266 do {
5267 val = READ_ONCE(n->state);
5268 if (unlikely(val & NAPIF_STATE_DISABLE))
5269 return false;
5270 new = val | NAPIF_STATE_SCHED;
5271
5272 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5273 * This was suggested by Alexander Duyck, as compiler
5274 * emits better code than :
5275 * if (val & NAPIF_STATE_SCHED)
5276 * new |= NAPIF_STATE_MISSED;
5277 */
5278 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5279 NAPIF_STATE_MISSED;
5280 } while (cmpxchg(&n->state, val, new) != val);
5281
5282 return !(val & NAPIF_STATE_SCHED);
5283}
5284EXPORT_SYMBOL(napi_schedule_prep);
5285
bc9ad166
ED
5286/**
5287 * __napi_schedule_irqoff - schedule for receive
5288 * @n: entry to schedule
5289 *
5290 * Variant of __napi_schedule() assuming hard irqs are masked
5291 */
5292void __napi_schedule_irqoff(struct napi_struct *n)
5293{
5294 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5295}
5296EXPORT_SYMBOL(__napi_schedule_irqoff);
5297
364b6055 5298bool napi_complete_done(struct napi_struct *n, int work_done)
d565b0a1 5299{
39e6c820 5300 unsigned long flags, val, new;
d565b0a1
HX
5301
5302 /*
217f6974
ED
5303 * 1) Don't let napi dequeue from the cpu poll list
5304 * just in case its running on a different cpu.
5305 * 2) If we are busy polling, do nothing here, we have
5306 * the guarantee we will be called later.
d565b0a1 5307 */
217f6974
ED
5308 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
5309 NAPIF_STATE_IN_BUSY_POLL)))
364b6055 5310 return false;
d565b0a1 5311
3b47d303
ED
5312 if (n->gro_list) {
5313 unsigned long timeout = 0;
d75b1ade 5314
3b47d303
ED
5315 if (work_done)
5316 timeout = n->dev->gro_flush_timeout;
5317
5318 if (timeout)
5319 hrtimer_start(&n->timer, ns_to_ktime(timeout),
5320 HRTIMER_MODE_REL_PINNED);
5321 else
5322 napi_gro_flush(n, false);
5323 }
02c1602e 5324 if (unlikely(!list_empty(&n->poll_list))) {
d75b1ade
ED
5325 /* If n->poll_list is not empty, we need to mask irqs */
5326 local_irq_save(flags);
02c1602e 5327 list_del_init(&n->poll_list);
d75b1ade
ED
5328 local_irq_restore(flags);
5329 }
39e6c820
ED
5330
5331 do {
5332 val = READ_ONCE(n->state);
5333
5334 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
5335
5336 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
5337
5338 /* If STATE_MISSED was set, leave STATE_SCHED set,
5339 * because we will call napi->poll() one more time.
5340 * This C code was suggested by Alexander Duyck to help gcc.
5341 */
5342 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
5343 NAPIF_STATE_SCHED;
5344 } while (cmpxchg(&n->state, val, new) != val);
5345
5346 if (unlikely(val & NAPIF_STATE_MISSED)) {
5347 __napi_schedule(n);
5348 return false;
5349 }
5350
364b6055 5351 return true;
d565b0a1 5352}
3b47d303 5353EXPORT_SYMBOL(napi_complete_done);
d565b0a1 5354
af12fa6e 5355/* must be called under rcu_read_lock(), as we dont take a reference */
02d62e86 5356static struct napi_struct *napi_by_id(unsigned int napi_id)
af12fa6e
ET
5357{
5358 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
5359 struct napi_struct *napi;
5360
5361 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
5362 if (napi->napi_id == napi_id)
5363 return napi;
5364
5365 return NULL;
5366}
02d62e86
ED
5367
5368#if defined(CONFIG_NET_RX_BUSY_POLL)
217f6974 5369
ce6aea93 5370#define BUSY_POLL_BUDGET 8
217f6974
ED
5371
5372static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5373{
5374 int rc;
5375
39e6c820
ED
5376 /* Busy polling means there is a high chance device driver hard irq
5377 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5378 * set in napi_schedule_prep().
5379 * Since we are about to call napi->poll() once more, we can safely
5380 * clear NAPI_STATE_MISSED.
5381 *
5382 * Note: x86 could use a single "lock and ..." instruction
5383 * to perform these two clear_bit()
5384 */
5385 clear_bit(NAPI_STATE_MISSED, &napi->state);
217f6974
ED
5386 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
5387
5388 local_bh_disable();
5389
5390 /* All we really want here is to re-enable device interrupts.
5391 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5392 */
5393 rc = napi->poll(napi, BUSY_POLL_BUDGET);
1e22391e 5394 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
217f6974
ED
5395 netpoll_poll_unlock(have_poll_lock);
5396 if (rc == BUSY_POLL_BUDGET)
5397 __napi_schedule(napi);
5398 local_bh_enable();
217f6974
ED
5399}
5400
7db6b048
SS
5401void napi_busy_loop(unsigned int napi_id,
5402 bool (*loop_end)(void *, unsigned long),
5403 void *loop_end_arg)
02d62e86 5404{
7db6b048 5405 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
217f6974 5406 int (*napi_poll)(struct napi_struct *napi, int budget);
217f6974 5407 void *have_poll_lock = NULL;
02d62e86 5408 struct napi_struct *napi;
217f6974
ED
5409
5410restart:
217f6974 5411 napi_poll = NULL;
02d62e86 5412
2a028ecb 5413 rcu_read_lock();
02d62e86 5414
545cd5e5 5415 napi = napi_by_id(napi_id);
02d62e86
ED
5416 if (!napi)
5417 goto out;
5418
217f6974
ED
5419 preempt_disable();
5420 for (;;) {
2b5cd0df
AD
5421 int work = 0;
5422
2a028ecb 5423 local_bh_disable();
217f6974
ED
5424 if (!napi_poll) {
5425 unsigned long val = READ_ONCE(napi->state);
5426
5427 /* If multiple threads are competing for this napi,
5428 * we avoid dirtying napi->state as much as we can.
5429 */
5430 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
5431 NAPIF_STATE_IN_BUSY_POLL))
5432 goto count;
5433 if (cmpxchg(&napi->state, val,
5434 val | NAPIF_STATE_IN_BUSY_POLL |
5435 NAPIF_STATE_SCHED) != val)
5436 goto count;
5437 have_poll_lock = netpoll_poll_lock(napi);
5438 napi_poll = napi->poll;
5439 }
2b5cd0df
AD
5440 work = napi_poll(napi, BUSY_POLL_BUDGET);
5441 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
217f6974 5442count:
2b5cd0df 5443 if (work > 0)
7db6b048 5444 __NET_ADD_STATS(dev_net(napi->dev),
2b5cd0df 5445 LINUX_MIB_BUSYPOLLRXPACKETS, work);
2a028ecb 5446 local_bh_enable();
02d62e86 5447
7db6b048 5448 if (!loop_end || loop_end(loop_end_arg, start_time))
217f6974 5449 break;
02d62e86 5450
217f6974
ED
5451 if (unlikely(need_resched())) {
5452 if (napi_poll)
5453 busy_poll_stop(napi, have_poll_lock);
5454 preempt_enable();
5455 rcu_read_unlock();
5456 cond_resched();
7db6b048 5457 if (loop_end(loop_end_arg, start_time))
2b5cd0df 5458 return;
217f6974
ED
5459 goto restart;
5460 }
6cdf89b1 5461 cpu_relax();
217f6974
ED
5462 }
5463 if (napi_poll)
5464 busy_poll_stop(napi, have_poll_lock);
5465 preempt_enable();
02d62e86 5466out:
2a028ecb 5467 rcu_read_unlock();
02d62e86 5468}
7db6b048 5469EXPORT_SYMBOL(napi_busy_loop);
02d62e86
ED
5470
5471#endif /* CONFIG_NET_RX_BUSY_POLL */
af12fa6e 5472
149d6ad8 5473static void napi_hash_add(struct napi_struct *napi)
af12fa6e 5474{
d64b5e85
ED
5475 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
5476 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
52bd2d62 5477 return;
af12fa6e 5478
52bd2d62 5479 spin_lock(&napi_hash_lock);
af12fa6e 5480
545cd5e5 5481 /* 0..NR_CPUS range is reserved for sender_cpu use */
52bd2d62 5482 do {
545cd5e5
AD
5483 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
5484 napi_gen_id = MIN_NAPI_ID;
52bd2d62
ED
5485 } while (napi_by_id(napi_gen_id));
5486 napi->napi_id = napi_gen_id;
af12fa6e 5487
52bd2d62
ED
5488 hlist_add_head_rcu(&napi->napi_hash_node,
5489 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
af12fa6e 5490
52bd2d62 5491 spin_unlock(&napi_hash_lock);
af12fa6e 5492}
af12fa6e
ET
5493
5494/* Warning : caller is responsible to make sure rcu grace period
5495 * is respected before freeing memory containing @napi
5496 */
34cbe27e 5497bool napi_hash_del(struct napi_struct *napi)
af12fa6e 5498{
34cbe27e
ED
5499 bool rcu_sync_needed = false;
5500
af12fa6e
ET
5501 spin_lock(&napi_hash_lock);
5502
34cbe27e
ED
5503 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
5504 rcu_sync_needed = true;
af12fa6e 5505 hlist_del_rcu(&napi->napi_hash_node);
34cbe27e 5506 }
af12fa6e 5507 spin_unlock(&napi_hash_lock);
34cbe27e 5508 return rcu_sync_needed;
af12fa6e
ET
5509}
5510EXPORT_SYMBOL_GPL(napi_hash_del);
5511
3b47d303
ED
5512static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
5513{
5514 struct napi_struct *napi;
5515
5516 napi = container_of(timer, struct napi_struct, timer);
39e6c820
ED
5517
5518 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
5519 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
5520 */
5521 if (napi->gro_list && !napi_disable_pending(napi) &&
5522 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
5523 __napi_schedule_irqoff(napi);
3b47d303
ED
5524
5525 return HRTIMER_NORESTART;
5526}
5527
d565b0a1
HX
5528void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5529 int (*poll)(struct napi_struct *, int), int weight)
5530{
5531 INIT_LIST_HEAD(&napi->poll_list);
3b47d303
ED
5532 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5533 napi->timer.function = napi_watchdog;
4ae5544f 5534 napi->gro_count = 0;
d565b0a1 5535 napi->gro_list = NULL;
5d38a079 5536 napi->skb = NULL;
d565b0a1 5537 napi->poll = poll;
82dc3c63
ED
5538 if (weight > NAPI_POLL_WEIGHT)
5539 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5540 weight, dev->name);
d565b0a1
HX
5541 napi->weight = weight;
5542 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 5543 napi->dev = dev;
5d38a079 5544#ifdef CONFIG_NETPOLL
d565b0a1
HX
5545 napi->poll_owner = -1;
5546#endif
5547 set_bit(NAPI_STATE_SCHED, &napi->state);
93d05d4a 5548 napi_hash_add(napi);
d565b0a1
HX
5549}
5550EXPORT_SYMBOL(netif_napi_add);
5551
3b47d303
ED
5552void napi_disable(struct napi_struct *n)
5553{
5554 might_sleep();
5555 set_bit(NAPI_STATE_DISABLE, &n->state);
5556
5557 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
5558 msleep(1);
2d8bff12
NH
5559 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
5560 msleep(1);
3b47d303
ED
5561
5562 hrtimer_cancel(&n->timer);
5563
5564 clear_bit(NAPI_STATE_DISABLE, &n->state);
5565}
5566EXPORT_SYMBOL(napi_disable);
5567
93d05d4a 5568/* Must be called in process context */
d565b0a1
HX
5569void netif_napi_del(struct napi_struct *napi)
5570{
93d05d4a
ED
5571 might_sleep();
5572 if (napi_hash_del(napi))
5573 synchronize_net();
d7b06636 5574 list_del_init(&napi->dev_list);
76620aaf 5575 napi_free_frags(napi);
d565b0a1 5576
289dccbe 5577 kfree_skb_list(napi->gro_list);
d565b0a1 5578 napi->gro_list = NULL;
4ae5544f 5579 napi->gro_count = 0;
d565b0a1
HX
5580}
5581EXPORT_SYMBOL(netif_napi_del);
5582
726ce70e
HX
5583static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5584{
5585 void *have;
5586 int work, weight;
5587
5588 list_del_init(&n->poll_list);
5589
5590 have = netpoll_poll_lock(n);
5591
5592 weight = n->weight;
5593
5594 /* This NAPI_STATE_SCHED test is for avoiding a race
5595 * with netpoll's poll_napi(). Only the entity which
5596 * obtains the lock and sees NAPI_STATE_SCHED set will
5597 * actually make the ->poll() call. Therefore we avoid
5598 * accidentally calling ->poll() when NAPI is not scheduled.
5599 */
5600 work = 0;
5601 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
5602 work = n->poll(n, weight);
1db19db7 5603 trace_napi_poll(n, work, weight);
726ce70e
HX
5604 }
5605
5606 WARN_ON_ONCE(work > weight);
5607
5608 if (likely(work < weight))
5609 goto out_unlock;
5610
5611 /* Drivers must not modify the NAPI state if they
5612 * consume the entire weight. In such cases this code
5613 * still "owns" the NAPI instance and therefore can
5614 * move the instance around on the list at-will.
5615 */
5616 if (unlikely(napi_disable_pending(n))) {
5617 napi_complete(n);
5618 goto out_unlock;
5619 }
5620
5621 if (n->gro_list) {
5622 /* flush too old packets
5623 * If HZ < 1000, flush all packets.
5624 */
5625 napi_gro_flush(n, HZ >= 1000);
5626 }
5627
001ce546
HX
5628 /* Some drivers may have called napi_schedule
5629 * prior to exhausting their budget.
5630 */
5631 if (unlikely(!list_empty(&n->poll_list))) {
5632 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5633 n->dev ? n->dev->name : "backlog");
5634 goto out_unlock;
5635 }
5636
726ce70e
HX
5637 list_add_tail(&n->poll_list, repoll);
5638
5639out_unlock:
5640 netpoll_poll_unlock(have);
5641
5642 return work;
5643}
5644
0766f788 5645static __latent_entropy void net_rx_action(struct softirq_action *h)
1da177e4 5646{
903ceff7 5647 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
7acf8a1e
MW
5648 unsigned long time_limit = jiffies +
5649 usecs_to_jiffies(netdev_budget_usecs);
51b0bded 5650 int budget = netdev_budget;
d75b1ade
ED
5651 LIST_HEAD(list);
5652 LIST_HEAD(repoll);
53fb95d3 5653
1da177e4 5654 local_irq_disable();
d75b1ade
ED
5655 list_splice_init(&sd->poll_list, &list);
5656 local_irq_enable();
1da177e4 5657
ceb8d5bf 5658 for (;;) {
bea3348e 5659 struct napi_struct *n;
1da177e4 5660
ceb8d5bf
HX
5661 if (list_empty(&list)) {
5662 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
f52dffe0 5663 goto out;
ceb8d5bf
HX
5664 break;
5665 }
5666
6bd373eb
HX
5667 n = list_first_entry(&list, struct napi_struct, poll_list);
5668 budget -= napi_poll(n, &repoll);
5669
d75b1ade 5670 /* If softirq window is exhausted then punt.
24f8b238
SH
5671 * Allow this to run for 2 jiffies since which will allow
5672 * an average latency of 1.5/HZ.
bea3348e 5673 */
ceb8d5bf
HX
5674 if (unlikely(budget <= 0 ||
5675 time_after_eq(jiffies, time_limit))) {
5676 sd->time_squeeze++;
5677 break;
5678 }
1da177e4 5679 }
d75b1ade 5680
d75b1ade
ED
5681 local_irq_disable();
5682
5683 list_splice_tail_init(&sd->poll_list, &list);
5684 list_splice_tail(&repoll, &list);
5685 list_splice(&list, &sd->poll_list);
5686 if (!list_empty(&sd->poll_list))
5687 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
5688
e326bed2 5689 net_rps_action_and_irq_enable(sd);
f52dffe0
ED
5690out:
5691 __kfree_skb_flush();
1da177e4
LT
5692}
5693
aa9d8560 5694struct netdev_adjacent {
9ff162a8 5695 struct net_device *dev;
5d261913
VF
5696
5697 /* upper master flag, there can only be one master device per list */
9ff162a8 5698 bool master;
5d261913 5699
5d261913
VF
5700 /* counter for the number of times this device was added to us */
5701 u16 ref_nr;
5702
402dae96
VF
5703 /* private field for the users */
5704 void *private;
5705
9ff162a8
JP
5706 struct list_head list;
5707 struct rcu_head rcu;
9ff162a8
JP
5708};
5709
6ea29da1 5710static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
2f268f12 5711 struct list_head *adj_list)
9ff162a8 5712{
5d261913 5713 struct netdev_adjacent *adj;
5d261913 5714
2f268f12 5715 list_for_each_entry(adj, adj_list, list) {
5d261913
VF
5716 if (adj->dev == adj_dev)
5717 return adj;
9ff162a8
JP
5718 }
5719 return NULL;
5720}
5721
f1170fd4
DA
5722static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
5723{
5724 struct net_device *dev = data;
5725
5726 return upper_dev == dev;
5727}
5728
9ff162a8
JP
5729/**
5730 * netdev_has_upper_dev - Check if device is linked to an upper device
5731 * @dev: device
5732 * @upper_dev: upper device to check
5733 *
5734 * Find out if a device is linked to specified upper device and return true
5735 * in case it is. Note that this checks only immediate upper device,
5736 * not through a complete stack of devices. The caller must hold the RTNL lock.
5737 */
5738bool netdev_has_upper_dev(struct net_device *dev,
5739 struct net_device *upper_dev)
5740{
5741 ASSERT_RTNL();
5742
f1170fd4
DA
5743 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5744 upper_dev);
9ff162a8
JP
5745}
5746EXPORT_SYMBOL(netdev_has_upper_dev);
5747
1a3f060c
DA
5748/**
5749 * netdev_has_upper_dev_all - Check if device is linked to an upper device
5750 * @dev: device
5751 * @upper_dev: upper device to check
5752 *
5753 * Find out if a device is linked to specified upper device and return true
5754 * in case it is. Note that this checks the entire upper device chain.
5755 * The caller must hold rcu lock.
5756 */
5757
1a3f060c
DA
5758bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
5759 struct net_device *upper_dev)
5760{
5761 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5762 upper_dev);
5763}
5764EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
5765
9ff162a8
JP
5766/**
5767 * netdev_has_any_upper_dev - Check if device is linked to some device
5768 * @dev: device
5769 *
5770 * Find out if a device is linked to an upper device and return true in case
5771 * it is. The caller must hold the RTNL lock.
5772 */
25cc72a3 5773bool netdev_has_any_upper_dev(struct net_device *dev)
9ff162a8
JP
5774{
5775 ASSERT_RTNL();
5776
f1170fd4 5777 return !list_empty(&dev->adj_list.upper);
9ff162a8 5778}
25cc72a3 5779EXPORT_SYMBOL(netdev_has_any_upper_dev);
9ff162a8
JP
5780
5781/**
5782 * netdev_master_upper_dev_get - Get master upper device
5783 * @dev: device
5784 *
5785 * Find a master upper device and return pointer to it or NULL in case
5786 * it's not there. The caller must hold the RTNL lock.
5787 */
5788struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5789{
aa9d8560 5790 struct netdev_adjacent *upper;
9ff162a8
JP
5791
5792 ASSERT_RTNL();
5793
2f268f12 5794 if (list_empty(&dev->adj_list.upper))
9ff162a8
JP
5795 return NULL;
5796
2f268f12 5797 upper = list_first_entry(&dev->adj_list.upper,
aa9d8560 5798 struct netdev_adjacent, list);
9ff162a8
JP
5799 if (likely(upper->master))
5800 return upper->dev;
5801 return NULL;
5802}
5803EXPORT_SYMBOL(netdev_master_upper_dev_get);
5804
0f524a80
DA
5805/**
5806 * netdev_has_any_lower_dev - Check if device is linked to some device
5807 * @dev: device
5808 *
5809 * Find out if a device is linked to a lower device and return true in case
5810 * it is. The caller must hold the RTNL lock.
5811 */
5812static bool netdev_has_any_lower_dev(struct net_device *dev)
5813{
5814 ASSERT_RTNL();
5815
5816 return !list_empty(&dev->adj_list.lower);
5817}
5818
b6ccba4c
VF
5819void *netdev_adjacent_get_private(struct list_head *adj_list)
5820{
5821 struct netdev_adjacent *adj;
5822
5823 adj = list_entry(adj_list, struct netdev_adjacent, list);
5824
5825 return adj->private;
5826}
5827EXPORT_SYMBOL(netdev_adjacent_get_private);
5828
44a40855
VY
5829/**
5830 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5831 * @dev: device
5832 * @iter: list_head ** of the current position
5833 *
5834 * Gets the next device from the dev's upper list, starting from iter
5835 * position. The caller must hold RCU read lock.
5836 */
5837struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5838 struct list_head **iter)
5839{
5840 struct netdev_adjacent *upper;
5841
5842 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5843
5844 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5845
5846 if (&upper->list == &dev->adj_list.upper)
5847 return NULL;
5848
5849 *iter = &upper->list;
5850
5851 return upper->dev;
5852}
5853EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5854
1a3f060c
DA
5855static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
5856 struct list_head **iter)
5857{
5858 struct netdev_adjacent *upper;
5859
5860 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5861
5862 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5863
5864 if (&upper->list == &dev->adj_list.upper)
5865 return NULL;
5866
5867 *iter = &upper->list;
5868
5869 return upper->dev;
5870}
5871
5872int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
5873 int (*fn)(struct net_device *dev,
5874 void *data),
5875 void *data)
5876{
5877 struct net_device *udev;
5878 struct list_head *iter;
5879 int ret;
5880
5881 for (iter = &dev->adj_list.upper,
5882 udev = netdev_next_upper_dev_rcu(dev, &iter);
5883 udev;
5884 udev = netdev_next_upper_dev_rcu(dev, &iter)) {
5885 /* first is the upper device itself */
5886 ret = fn(udev, data);
5887 if (ret)
5888 return ret;
5889
5890 /* then look at all of its upper devices */
5891 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
5892 if (ret)
5893 return ret;
5894 }
5895
5896 return 0;
5897}
5898EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
5899
31088a11
VF
5900/**
5901 * netdev_lower_get_next_private - Get the next ->private from the
5902 * lower neighbour list
5903 * @dev: device
5904 * @iter: list_head ** of the current position
5905 *
5906 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5907 * list, starting from iter position. The caller must hold either hold the
5908 * RTNL lock or its own locking that guarantees that the neighbour lower
b469139e 5909 * list will remain unchanged.
31088a11
VF
5910 */
5911void *netdev_lower_get_next_private(struct net_device *dev,
5912 struct list_head **iter)
5913{
5914 struct netdev_adjacent *lower;
5915
5916 lower = list_entry(*iter, struct netdev_adjacent, list);
5917
5918 if (&lower->list == &dev->adj_list.lower)
5919 return NULL;
5920
6859e7df 5921 *iter = lower->list.next;
31088a11
VF
5922
5923 return lower->private;
5924}
5925EXPORT_SYMBOL(netdev_lower_get_next_private);
5926
5927/**
5928 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5929 * lower neighbour list, RCU
5930 * variant
5931 * @dev: device
5932 * @iter: list_head ** of the current position
5933 *
5934 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5935 * list, starting from iter position. The caller must hold RCU read lock.
5936 */
5937void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5938 struct list_head **iter)
5939{
5940 struct netdev_adjacent *lower;
5941
5942 WARN_ON_ONCE(!rcu_read_lock_held());
5943
5944 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5945
5946 if (&lower->list == &dev->adj_list.lower)
5947 return NULL;
5948
6859e7df 5949 *iter = &lower->list;
31088a11
VF
5950
5951 return lower->private;
5952}
5953EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5954
4085ebe8
VY
5955/**
5956 * netdev_lower_get_next - Get the next device from the lower neighbour
5957 * list
5958 * @dev: device
5959 * @iter: list_head ** of the current position
5960 *
5961 * Gets the next netdev_adjacent from the dev's lower neighbour
5962 * list, starting from iter position. The caller must hold RTNL lock or
5963 * its own locking that guarantees that the neighbour lower
b469139e 5964 * list will remain unchanged.
4085ebe8
VY
5965 */
5966void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5967{
5968 struct netdev_adjacent *lower;
5969
cfdd28be 5970 lower = list_entry(*iter, struct netdev_adjacent, list);
4085ebe8
VY
5971
5972 if (&lower->list == &dev->adj_list.lower)
5973 return NULL;
5974
cfdd28be 5975 *iter = lower->list.next;
4085ebe8
VY
5976
5977 return lower->dev;
5978}
5979EXPORT_SYMBOL(netdev_lower_get_next);
5980
1a3f060c
DA
5981static struct net_device *netdev_next_lower_dev(struct net_device *dev,
5982 struct list_head **iter)
5983{
5984 struct netdev_adjacent *lower;
5985
46b5ab1a 5986 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
1a3f060c
DA
5987
5988 if (&lower->list == &dev->adj_list.lower)
5989 return NULL;
5990
46b5ab1a 5991 *iter = &lower->list;
1a3f060c
DA
5992
5993 return lower->dev;
5994}
5995
5996int netdev_walk_all_lower_dev(struct net_device *dev,
5997 int (*fn)(struct net_device *dev,
5998 void *data),
5999 void *data)
6000{
6001 struct net_device *ldev;
6002 struct list_head *iter;
6003 int ret;
6004
6005 for (iter = &dev->adj_list.lower,
6006 ldev = netdev_next_lower_dev(dev, &iter);
6007 ldev;
6008 ldev = netdev_next_lower_dev(dev, &iter)) {
6009 /* first is the lower device itself */
6010 ret = fn(ldev, data);
6011 if (ret)
6012 return ret;
6013
6014 /* then look at all of its lower devices */
6015 ret = netdev_walk_all_lower_dev(ldev, fn, data);
6016 if (ret)
6017 return ret;
6018 }
6019
6020 return 0;
6021}
6022EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
6023
1a3f060c
DA
6024static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6025 struct list_head **iter)
6026{
6027 struct netdev_adjacent *lower;
6028
6029 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6030 if (&lower->list == &dev->adj_list.lower)
6031 return NULL;
6032
6033 *iter = &lower->list;
6034
6035 return lower->dev;
6036}
6037
6038int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
6039 int (*fn)(struct net_device *dev,
6040 void *data),
6041 void *data)
6042{
6043 struct net_device *ldev;
6044 struct list_head *iter;
6045 int ret;
6046
6047 for (iter = &dev->adj_list.lower,
6048 ldev = netdev_next_lower_dev_rcu(dev, &iter);
6049 ldev;
6050 ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
6051 /* first is the lower device itself */
6052 ret = fn(ldev, data);
6053 if (ret)
6054 return ret;
6055
6056 /* then look at all of its lower devices */
6057 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
6058 if (ret)
6059 return ret;
6060 }
6061
6062 return 0;
6063}
6064EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
6065
e001bfad 6066/**
6067 * netdev_lower_get_first_private_rcu - Get the first ->private from the
6068 * lower neighbour list, RCU
6069 * variant
6070 * @dev: device
6071 *
6072 * Gets the first netdev_adjacent->private from the dev's lower neighbour
6073 * list. The caller must hold RCU read lock.
6074 */
6075void *netdev_lower_get_first_private_rcu(struct net_device *dev)
6076{
6077 struct netdev_adjacent *lower;
6078
6079 lower = list_first_or_null_rcu(&dev->adj_list.lower,
6080 struct netdev_adjacent, list);
6081 if (lower)
6082 return lower->private;
6083 return NULL;
6084}
6085EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
6086
9ff162a8
JP
6087/**
6088 * netdev_master_upper_dev_get_rcu - Get master upper device
6089 * @dev: device
6090 *
6091 * Find a master upper device and return pointer to it or NULL in case
6092 * it's not there. The caller must hold the RCU read lock.
6093 */
6094struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
6095{
aa9d8560 6096 struct netdev_adjacent *upper;
9ff162a8 6097
2f268f12 6098 upper = list_first_or_null_rcu(&dev->adj_list.upper,
aa9d8560 6099 struct netdev_adjacent, list);
9ff162a8
JP
6100 if (upper && likely(upper->master))
6101 return upper->dev;
6102 return NULL;
6103}
6104EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
6105
0a59f3a9 6106static int netdev_adjacent_sysfs_add(struct net_device *dev,
3ee32707
VF
6107 struct net_device *adj_dev,
6108 struct list_head *dev_list)
6109{
6110 char linkname[IFNAMSIZ+7];
f4563a75 6111
3ee32707
VF
6112 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6113 "upper_%s" : "lower_%s", adj_dev->name);
6114 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
6115 linkname);
6116}
0a59f3a9 6117static void netdev_adjacent_sysfs_del(struct net_device *dev,
3ee32707
VF
6118 char *name,
6119 struct list_head *dev_list)
6120{
6121 char linkname[IFNAMSIZ+7];
f4563a75 6122
3ee32707
VF
6123 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6124 "upper_%s" : "lower_%s", name);
6125 sysfs_remove_link(&(dev->dev.kobj), linkname);
6126}
6127
7ce64c79
AF
6128static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
6129 struct net_device *adj_dev,
6130 struct list_head *dev_list)
6131{
6132 return (dev_list == &dev->adj_list.upper ||
6133 dev_list == &dev->adj_list.lower) &&
6134 net_eq(dev_net(dev), dev_net(adj_dev));
6135}
3ee32707 6136
5d261913
VF
6137static int __netdev_adjacent_dev_insert(struct net_device *dev,
6138 struct net_device *adj_dev,
7863c054 6139 struct list_head *dev_list,
402dae96 6140 void *private, bool master)
5d261913
VF
6141{
6142 struct netdev_adjacent *adj;
842d67a7 6143 int ret;
5d261913 6144
6ea29da1 6145 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913
VF
6146
6147 if (adj) {
790510d9 6148 adj->ref_nr += 1;
67b62f98
DA
6149 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6150 dev->name, adj_dev->name, adj->ref_nr);
6151
5d261913
VF
6152 return 0;
6153 }
6154
6155 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
6156 if (!adj)
6157 return -ENOMEM;
6158
6159 adj->dev = adj_dev;
6160 adj->master = master;
790510d9 6161 adj->ref_nr = 1;
402dae96 6162 adj->private = private;
5d261913 6163 dev_hold(adj_dev);
2f268f12 6164
67b62f98
DA
6165 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6166 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
5d261913 6167
7ce64c79 6168 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
3ee32707 6169 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5831d66e
VF
6170 if (ret)
6171 goto free_adj;
6172 }
6173
7863c054 6174 /* Ensure that master link is always the first item in list. */
842d67a7
VF
6175 if (master) {
6176 ret = sysfs_create_link(&(dev->dev.kobj),
6177 &(adj_dev->dev.kobj), "master");
6178 if (ret)
5831d66e 6179 goto remove_symlinks;
842d67a7 6180
7863c054 6181 list_add_rcu(&adj->list, dev_list);
842d67a7 6182 } else {
7863c054 6183 list_add_tail_rcu(&adj->list, dev_list);
842d67a7 6184 }
5d261913
VF
6185
6186 return 0;
842d67a7 6187
5831d66e 6188remove_symlinks:
7ce64c79 6189 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 6190 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
842d67a7
VF
6191free_adj:
6192 kfree(adj);
974daef7 6193 dev_put(adj_dev);
842d67a7
VF
6194
6195 return ret;
5d261913
VF
6196}
6197
1d143d9f 6198static void __netdev_adjacent_dev_remove(struct net_device *dev,
6199 struct net_device *adj_dev,
93409033 6200 u16 ref_nr,
1d143d9f 6201 struct list_head *dev_list)
5d261913
VF
6202{
6203 struct netdev_adjacent *adj;
6204
67b62f98
DA
6205 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6206 dev->name, adj_dev->name, ref_nr);
6207
6ea29da1 6208 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913 6209
2f268f12 6210 if (!adj) {
67b62f98 6211 pr_err("Adjacency does not exist for device %s from %s\n",
2f268f12 6212 dev->name, adj_dev->name);
67b62f98
DA
6213 WARN_ON(1);
6214 return;
2f268f12 6215 }
5d261913 6216
93409033 6217 if (adj->ref_nr > ref_nr) {
67b62f98
DA
6218 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6219 dev->name, adj_dev->name, ref_nr,
6220 adj->ref_nr - ref_nr);
93409033 6221 adj->ref_nr -= ref_nr;
5d261913
VF
6222 return;
6223 }
6224
842d67a7
VF
6225 if (adj->master)
6226 sysfs_remove_link(&(dev->dev.kobj), "master");
6227
7ce64c79 6228 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 6229 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5831d66e 6230
5d261913 6231 list_del_rcu(&adj->list);
67b62f98 6232 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
2f268f12 6233 adj_dev->name, dev->name, adj_dev->name);
5d261913
VF
6234 dev_put(adj_dev);
6235 kfree_rcu(adj, rcu);
6236}
6237
1d143d9f 6238static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
6239 struct net_device *upper_dev,
6240 struct list_head *up_list,
6241 struct list_head *down_list,
6242 void *private, bool master)
5d261913
VF
6243{
6244 int ret;
6245
790510d9 6246 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
93409033 6247 private, master);
5d261913
VF
6248 if (ret)
6249 return ret;
6250
790510d9 6251 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
93409033 6252 private, false);
5d261913 6253 if (ret) {
790510d9 6254 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
5d261913
VF
6255 return ret;
6256 }
6257
6258 return 0;
6259}
6260
1d143d9f 6261static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
6262 struct net_device *upper_dev,
93409033 6263 u16 ref_nr,
1d143d9f 6264 struct list_head *up_list,
6265 struct list_head *down_list)
5d261913 6266{
93409033
AC
6267 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
6268 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
5d261913
VF
6269}
6270
1d143d9f 6271static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
6272 struct net_device *upper_dev,
6273 void *private, bool master)
2f268f12 6274{
f1170fd4
DA
6275 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
6276 &dev->adj_list.upper,
6277 &upper_dev->adj_list.lower,
6278 private, master);
5d261913
VF
6279}
6280
1d143d9f 6281static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
6282 struct net_device *upper_dev)
2f268f12 6283{
93409033 6284 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
2f268f12
VF
6285 &dev->adj_list.upper,
6286 &upper_dev->adj_list.lower);
6287}
5d261913 6288
9ff162a8 6289static int __netdev_upper_dev_link(struct net_device *dev,
402dae96 6290 struct net_device *upper_dev, bool master,
42ab19ee
DA
6291 void *upper_priv, void *upper_info,
6292 struct netlink_ext_ack *extack)
9ff162a8 6293{
51d0c047
DA
6294 struct netdev_notifier_changeupper_info changeupper_info = {
6295 .info = {
6296 .dev = dev,
42ab19ee 6297 .extack = extack,
51d0c047
DA
6298 },
6299 .upper_dev = upper_dev,
6300 .master = master,
6301 .linking = true,
6302 .upper_info = upper_info,
6303 };
5d261913 6304 int ret = 0;
9ff162a8
JP
6305
6306 ASSERT_RTNL();
6307
6308 if (dev == upper_dev)
6309 return -EBUSY;
6310
6311 /* To prevent loops, check if dev is not upper device to upper_dev. */
f1170fd4 6312 if (netdev_has_upper_dev(upper_dev, dev))
9ff162a8
JP
6313 return -EBUSY;
6314
f1170fd4 6315 if (netdev_has_upper_dev(dev, upper_dev))
9ff162a8
JP
6316 return -EEXIST;
6317
6318 if (master && netdev_master_upper_dev_get(dev))
6319 return -EBUSY;
6320
51d0c047 6321 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
573c7ba0
JP
6322 &changeupper_info.info);
6323 ret = notifier_to_errno(ret);
6324 if (ret)
6325 return ret;
6326
6dffb044 6327 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
402dae96 6328 master);
5d261913
VF
6329 if (ret)
6330 return ret;
9ff162a8 6331
51d0c047 6332 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
b03804e7
IS
6333 &changeupper_info.info);
6334 ret = notifier_to_errno(ret);
6335 if (ret)
f1170fd4 6336 goto rollback;
b03804e7 6337
9ff162a8 6338 return 0;
5d261913 6339
f1170fd4 6340rollback:
2f268f12 6341 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
6342
6343 return ret;
9ff162a8
JP
6344}
6345
6346/**
6347 * netdev_upper_dev_link - Add a link to the upper device
6348 * @dev: device
6349 * @upper_dev: new upper device
6350 *
6351 * Adds a link to device which is upper to this one. The caller must hold
6352 * the RTNL lock. On a failure a negative errno code is returned.
6353 * On success the reference counts are adjusted and the function
6354 * returns zero.
6355 */
6356int netdev_upper_dev_link(struct net_device *dev,
42ab19ee
DA
6357 struct net_device *upper_dev,
6358 struct netlink_ext_ack *extack)
9ff162a8 6359{
42ab19ee
DA
6360 return __netdev_upper_dev_link(dev, upper_dev, false,
6361 NULL, NULL, extack);
9ff162a8
JP
6362}
6363EXPORT_SYMBOL(netdev_upper_dev_link);
6364
6365/**
6366 * netdev_master_upper_dev_link - Add a master link to the upper device
6367 * @dev: device
6368 * @upper_dev: new upper device
6dffb044 6369 * @upper_priv: upper device private
29bf24af 6370 * @upper_info: upper info to be passed down via notifier
9ff162a8
JP
6371 *
6372 * Adds a link to device which is upper to this one. In this case, only
6373 * one master upper device can be linked, although other non-master devices
6374 * might be linked as well. The caller must hold the RTNL lock.
6375 * On a failure a negative errno code is returned. On success the reference
6376 * counts are adjusted and the function returns zero.
6377 */
6378int netdev_master_upper_dev_link(struct net_device *dev,
6dffb044 6379 struct net_device *upper_dev,
42ab19ee
DA
6380 void *upper_priv, void *upper_info,
6381 struct netlink_ext_ack *extack)
9ff162a8 6382{
29bf24af 6383 return __netdev_upper_dev_link(dev, upper_dev, true,
42ab19ee 6384 upper_priv, upper_info, extack);
9ff162a8
JP
6385}
6386EXPORT_SYMBOL(netdev_master_upper_dev_link);
6387
6388/**
6389 * netdev_upper_dev_unlink - Removes a link to upper device
6390 * @dev: device
6391 * @upper_dev: new upper device
6392 *
6393 * Removes a link to device which is upper to this one. The caller must hold
6394 * the RTNL lock.
6395 */
6396void netdev_upper_dev_unlink(struct net_device *dev,
6397 struct net_device *upper_dev)
6398{
51d0c047
DA
6399 struct netdev_notifier_changeupper_info changeupper_info = {
6400 .info = {
6401 .dev = dev,
6402 },
6403 .upper_dev = upper_dev,
6404 .linking = false,
6405 };
f4563a75 6406
9ff162a8
JP
6407 ASSERT_RTNL();
6408
0e4ead9d 6409 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
0e4ead9d 6410
51d0c047 6411 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
573c7ba0
JP
6412 &changeupper_info.info);
6413
2f268f12 6414 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913 6415
51d0c047 6416 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
0e4ead9d 6417 &changeupper_info.info);
9ff162a8
JP
6418}
6419EXPORT_SYMBOL(netdev_upper_dev_unlink);
6420
61bd3857
MS
6421/**
6422 * netdev_bonding_info_change - Dispatch event about slave change
6423 * @dev: device
4a26e453 6424 * @bonding_info: info to dispatch
61bd3857
MS
6425 *
6426 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
6427 * The caller must hold the RTNL lock.
6428 */
6429void netdev_bonding_info_change(struct net_device *dev,
6430 struct netdev_bonding_info *bonding_info)
6431{
51d0c047
DA
6432 struct netdev_notifier_bonding_info info = {
6433 .info.dev = dev,
6434 };
61bd3857
MS
6435
6436 memcpy(&info.bonding_info, bonding_info,
6437 sizeof(struct netdev_bonding_info));
51d0c047 6438 call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
61bd3857
MS
6439 &info.info);
6440}
6441EXPORT_SYMBOL(netdev_bonding_info_change);
6442
2ce1ee17 6443static void netdev_adjacent_add_links(struct net_device *dev)
4c75431a
AF
6444{
6445 struct netdev_adjacent *iter;
6446
6447 struct net *net = dev_net(dev);
6448
6449 list_for_each_entry(iter, &dev->adj_list.upper, list) {
be4da0e3 6450 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
6451 continue;
6452 netdev_adjacent_sysfs_add(iter->dev, dev,
6453 &iter->dev->adj_list.lower);
6454 netdev_adjacent_sysfs_add(dev, iter->dev,
6455 &dev->adj_list.upper);
6456 }
6457
6458 list_for_each_entry(iter, &dev->adj_list.lower, list) {
be4da0e3 6459 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
6460 continue;
6461 netdev_adjacent_sysfs_add(iter->dev, dev,
6462 &iter->dev->adj_list.upper);
6463 netdev_adjacent_sysfs_add(dev, iter->dev,
6464 &dev->adj_list.lower);
6465 }
6466}
6467
2ce1ee17 6468static void netdev_adjacent_del_links(struct net_device *dev)
4c75431a
AF
6469{
6470 struct netdev_adjacent *iter;
6471
6472 struct net *net = dev_net(dev);
6473
6474 list_for_each_entry(iter, &dev->adj_list.upper, list) {
be4da0e3 6475 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
6476 continue;
6477 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6478 &iter->dev->adj_list.lower);
6479 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6480 &dev->adj_list.upper);
6481 }
6482
6483 list_for_each_entry(iter, &dev->adj_list.lower, list) {
be4da0e3 6484 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
6485 continue;
6486 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6487 &iter->dev->adj_list.upper);
6488 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6489 &dev->adj_list.lower);
6490 }
6491}
6492
5bb025fa 6493void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
402dae96 6494{
5bb025fa 6495 struct netdev_adjacent *iter;
402dae96 6496
4c75431a
AF
6497 struct net *net = dev_net(dev);
6498
5bb025fa 6499 list_for_each_entry(iter, &dev->adj_list.upper, list) {
be4da0e3 6500 if (!net_eq(net, dev_net(iter->dev)))
4c75431a 6501 continue;
5bb025fa
VF
6502 netdev_adjacent_sysfs_del(iter->dev, oldname,
6503 &iter->dev->adj_list.lower);
6504 netdev_adjacent_sysfs_add(iter->dev, dev,
6505 &iter->dev->adj_list.lower);
6506 }
402dae96 6507
5bb025fa 6508 list_for_each_entry(iter, &dev->adj_list.lower, list) {
be4da0e3 6509 if (!net_eq(net, dev_net(iter->dev)))
4c75431a 6510 continue;
5bb025fa
VF
6511 netdev_adjacent_sysfs_del(iter->dev, oldname,
6512 &iter->dev->adj_list.upper);
6513 netdev_adjacent_sysfs_add(iter->dev, dev,
6514 &iter->dev->adj_list.upper);
6515 }
402dae96 6516}
402dae96
VF
6517
6518void *netdev_lower_dev_get_private(struct net_device *dev,
6519 struct net_device *lower_dev)
6520{
6521 struct netdev_adjacent *lower;
6522
6523 if (!lower_dev)
6524 return NULL;
6ea29da1 6525 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
402dae96
VF
6526 if (!lower)
6527 return NULL;
6528
6529 return lower->private;
6530}
6531EXPORT_SYMBOL(netdev_lower_dev_get_private);
6532
4085ebe8 6533
952fcfd0 6534int dev_get_nest_level(struct net_device *dev)
4085ebe8
VY
6535{
6536 struct net_device *lower = NULL;
6537 struct list_head *iter;
6538 int max_nest = -1;
6539 int nest;
6540
6541 ASSERT_RTNL();
6542
6543 netdev_for_each_lower_dev(dev, lower, iter) {
952fcfd0 6544 nest = dev_get_nest_level(lower);
4085ebe8
VY
6545 if (max_nest < nest)
6546 max_nest = nest;
6547 }
6548
952fcfd0 6549 return max_nest + 1;
4085ebe8
VY
6550}
6551EXPORT_SYMBOL(dev_get_nest_level);
6552
04d48266
JP
6553/**
6554 * netdev_lower_change - Dispatch event about lower device state change
6555 * @lower_dev: device
6556 * @lower_state_info: state to dispatch
6557 *
6558 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
6559 * The caller must hold the RTNL lock.
6560 */
6561void netdev_lower_state_changed(struct net_device *lower_dev,
6562 void *lower_state_info)
6563{
51d0c047
DA
6564 struct netdev_notifier_changelowerstate_info changelowerstate_info = {
6565 .info.dev = lower_dev,
6566 };
04d48266
JP
6567
6568 ASSERT_RTNL();
6569 changelowerstate_info.lower_state_info = lower_state_info;
51d0c047 6570 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
04d48266
JP
6571 &changelowerstate_info.info);
6572}
6573EXPORT_SYMBOL(netdev_lower_state_changed);
6574
b6c40d68
PM
6575static void dev_change_rx_flags(struct net_device *dev, int flags)
6576{
d314774c
SH
6577 const struct net_device_ops *ops = dev->netdev_ops;
6578
d2615bf4 6579 if (ops->ndo_change_rx_flags)
d314774c 6580 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
6581}
6582
991fb3f7 6583static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
1da177e4 6584{
b536db93 6585 unsigned int old_flags = dev->flags;
d04a48b0
EB
6586 kuid_t uid;
6587 kgid_t gid;
1da177e4 6588
24023451
PM
6589 ASSERT_RTNL();
6590
dad9b335
WC
6591 dev->flags |= IFF_PROMISC;
6592 dev->promiscuity += inc;
6593 if (dev->promiscuity == 0) {
6594 /*
6595 * Avoid overflow.
6596 * If inc causes overflow, untouch promisc and return error.
6597 */
6598 if (inc < 0)
6599 dev->flags &= ~IFF_PROMISC;
6600 else {
6601 dev->promiscuity -= inc;
7b6cd1ce
JP
6602 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6603 dev->name);
dad9b335
WC
6604 return -EOVERFLOW;
6605 }
6606 }
52609c0b 6607 if (dev->flags != old_flags) {
7b6cd1ce
JP
6608 pr_info("device %s %s promiscuous mode\n",
6609 dev->name,
6610 dev->flags & IFF_PROMISC ? "entered" : "left");
8192b0c4
DH
6611 if (audit_enabled) {
6612 current_uid_gid(&uid, &gid);
7759db82
KHK
6613 audit_log(current->audit_context, GFP_ATOMIC,
6614 AUDIT_ANOM_PROMISCUOUS,
6615 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6616 dev->name, (dev->flags & IFF_PROMISC),
6617 (old_flags & IFF_PROMISC),
e1760bd5 6618 from_kuid(&init_user_ns, audit_get_loginuid(current)),
d04a48b0
EB
6619 from_kuid(&init_user_ns, uid),
6620 from_kgid(&init_user_ns, gid),
7759db82 6621 audit_get_sessionid(current));
8192b0c4 6622 }
24023451 6623
b6c40d68 6624 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 6625 }
991fb3f7
ND
6626 if (notify)
6627 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
dad9b335 6628 return 0;
1da177e4
LT
6629}
6630
4417da66
PM
6631/**
6632 * dev_set_promiscuity - update promiscuity count on a device
6633 * @dev: device
6634 * @inc: modifier
6635 *
6636 * Add or remove promiscuity from a device. While the count in the device
6637 * remains above zero the interface remains promiscuous. Once it hits zero
6638 * the device reverts back to normal filtering operation. A negative inc
6639 * value is used to drop promiscuity on the device.
dad9b335 6640 * Return 0 if successful or a negative errno code on error.
4417da66 6641 */
dad9b335 6642int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66 6643{
b536db93 6644 unsigned int old_flags = dev->flags;
dad9b335 6645 int err;
4417da66 6646
991fb3f7 6647 err = __dev_set_promiscuity(dev, inc, true);
4b5a698e 6648 if (err < 0)
dad9b335 6649 return err;
4417da66
PM
6650 if (dev->flags != old_flags)
6651 dev_set_rx_mode(dev);
dad9b335 6652 return err;
4417da66 6653}
d1b19dff 6654EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 6655
991fb3f7 6656static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
1da177e4 6657{
991fb3f7 6658 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
1da177e4 6659
24023451
PM
6660 ASSERT_RTNL();
6661
1da177e4 6662 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
6663 dev->allmulti += inc;
6664 if (dev->allmulti == 0) {
6665 /*
6666 * Avoid overflow.
6667 * If inc causes overflow, untouch allmulti and return error.
6668 */
6669 if (inc < 0)
6670 dev->flags &= ~IFF_ALLMULTI;
6671 else {
6672 dev->allmulti -= inc;
7b6cd1ce
JP
6673 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6674 dev->name);
dad9b335
WC
6675 return -EOVERFLOW;
6676 }
6677 }
24023451 6678 if (dev->flags ^ old_flags) {
b6c40d68 6679 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 6680 dev_set_rx_mode(dev);
991fb3f7
ND
6681 if (notify)
6682 __dev_notify_flags(dev, old_flags,
6683 dev->gflags ^ old_gflags);
24023451 6684 }
dad9b335 6685 return 0;
4417da66 6686}
991fb3f7
ND
6687
6688/**
6689 * dev_set_allmulti - update allmulti count on a device
6690 * @dev: device
6691 * @inc: modifier
6692 *
6693 * Add or remove reception of all multicast frames to a device. While the
6694 * count in the device remains above zero the interface remains listening
6695 * to all interfaces. Once it hits zero the device reverts back to normal
6696 * filtering operation. A negative @inc value is used to drop the counter
6697 * when releasing a resource needing all multicasts.
6698 * Return 0 if successful or a negative errno code on error.
6699 */
6700
6701int dev_set_allmulti(struct net_device *dev, int inc)
6702{
6703 return __dev_set_allmulti(dev, inc, true);
6704}
d1b19dff 6705EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
6706
6707/*
6708 * Upload unicast and multicast address lists to device and
6709 * configure RX filtering. When the device doesn't support unicast
53ccaae1 6710 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
6711 * are present.
6712 */
6713void __dev_set_rx_mode(struct net_device *dev)
6714{
d314774c
SH
6715 const struct net_device_ops *ops = dev->netdev_ops;
6716
4417da66
PM
6717 /* dev_open will call this function so the list will stay sane. */
6718 if (!(dev->flags&IFF_UP))
6719 return;
6720
6721 if (!netif_device_present(dev))
40b77c94 6722 return;
4417da66 6723
01789349 6724 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4417da66
PM
6725 /* Unicast addresses changes may only happen under the rtnl,
6726 * therefore calling __dev_set_promiscuity here is safe.
6727 */
32e7bfc4 6728 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
991fb3f7 6729 __dev_set_promiscuity(dev, 1, false);
2d348d1f 6730 dev->uc_promisc = true;
32e7bfc4 6731 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
991fb3f7 6732 __dev_set_promiscuity(dev, -1, false);
2d348d1f 6733 dev->uc_promisc = false;
4417da66 6734 }
4417da66 6735 }
01789349
JP
6736
6737 if (ops->ndo_set_rx_mode)
6738 ops->ndo_set_rx_mode(dev);
4417da66
PM
6739}
6740
6741void dev_set_rx_mode(struct net_device *dev)
6742{
b9e40857 6743 netif_addr_lock_bh(dev);
4417da66 6744 __dev_set_rx_mode(dev);
b9e40857 6745 netif_addr_unlock_bh(dev);
1da177e4
LT
6746}
6747
f0db275a
SH
6748/**
6749 * dev_get_flags - get flags reported to userspace
6750 * @dev: device
6751 *
6752 * Get the combination of flag bits exported through APIs to userspace.
6753 */
95c96174 6754unsigned int dev_get_flags(const struct net_device *dev)
1da177e4 6755{
95c96174 6756 unsigned int flags;
1da177e4
LT
6757
6758 flags = (dev->flags & ~(IFF_PROMISC |
6759 IFF_ALLMULTI |
b00055aa
SR
6760 IFF_RUNNING |
6761 IFF_LOWER_UP |
6762 IFF_DORMANT)) |
1da177e4
LT
6763 (dev->gflags & (IFF_PROMISC |
6764 IFF_ALLMULTI));
6765
b00055aa
SR
6766 if (netif_running(dev)) {
6767 if (netif_oper_up(dev))
6768 flags |= IFF_RUNNING;
6769 if (netif_carrier_ok(dev))
6770 flags |= IFF_LOWER_UP;
6771 if (netif_dormant(dev))
6772 flags |= IFF_DORMANT;
6773 }
1da177e4
LT
6774
6775 return flags;
6776}
d1b19dff 6777EXPORT_SYMBOL(dev_get_flags);
1da177e4 6778
bd380811 6779int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 6780{
b536db93 6781 unsigned int old_flags = dev->flags;
bd380811 6782 int ret;
1da177e4 6783
24023451
PM
6784 ASSERT_RTNL();
6785
1da177e4
LT
6786 /*
6787 * Set the flags on our device.
6788 */
6789
6790 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
6791 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
6792 IFF_AUTOMEDIA)) |
6793 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
6794 IFF_ALLMULTI));
6795
6796 /*
6797 * Load in the correct multicast list now the flags have changed.
6798 */
6799
b6c40d68
PM
6800 if ((old_flags ^ flags) & IFF_MULTICAST)
6801 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 6802
4417da66 6803 dev_set_rx_mode(dev);
1da177e4
LT
6804
6805 /*
6806 * Have we downed the interface. We handle IFF_UP ourselves
6807 * according to user attempts to set it, rather than blindly
6808 * setting it.
6809 */
6810
6811 ret = 0;
7051b88a 6812 if ((old_flags ^ flags) & IFF_UP) {
6813 if (old_flags & IFF_UP)
6814 __dev_close(dev);
6815 else
6816 ret = __dev_open(dev);
6817 }
1da177e4 6818
1da177e4 6819 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff 6820 int inc = (flags & IFF_PROMISC) ? 1 : -1;
991fb3f7 6821 unsigned int old_flags = dev->flags;
d1b19dff 6822
1da177e4 6823 dev->gflags ^= IFF_PROMISC;
991fb3f7
ND
6824
6825 if (__dev_set_promiscuity(dev, inc, false) >= 0)
6826 if (dev->flags != old_flags)
6827 dev_set_rx_mode(dev);
1da177e4
LT
6828 }
6829
6830 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
eb13da1a 6831 * is important. Some (broken) drivers set IFF_PROMISC, when
6832 * IFF_ALLMULTI is requested not asking us and not reporting.
1da177e4
LT
6833 */
6834 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
6835 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6836
1da177e4 6837 dev->gflags ^= IFF_ALLMULTI;
991fb3f7 6838 __dev_set_allmulti(dev, inc, false);
1da177e4
LT
6839 }
6840
bd380811
PM
6841 return ret;
6842}
6843
a528c219
ND
6844void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6845 unsigned int gchanges)
bd380811
PM
6846{
6847 unsigned int changes = dev->flags ^ old_flags;
6848
a528c219 6849 if (gchanges)
7f294054 6850 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
a528c219 6851
bd380811
PM
6852 if (changes & IFF_UP) {
6853 if (dev->flags & IFF_UP)
6854 call_netdevice_notifiers(NETDEV_UP, dev);
6855 else
6856 call_netdevice_notifiers(NETDEV_DOWN, dev);
6857 }
6858
6859 if (dev->flags & IFF_UP &&
be9efd36 6860 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
51d0c047
DA
6861 struct netdev_notifier_change_info change_info = {
6862 .info = {
6863 .dev = dev,
6864 },
6865 .flags_changed = changes,
6866 };
be9efd36 6867
51d0c047 6868 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
be9efd36 6869 }
bd380811
PM
6870}
6871
6872/**
6873 * dev_change_flags - change device settings
6874 * @dev: device
6875 * @flags: device state flags
6876 *
6877 * Change settings on device based state flags. The flags are
6878 * in the userspace exported format.
6879 */
b536db93 6880int dev_change_flags(struct net_device *dev, unsigned int flags)
bd380811 6881{
b536db93 6882 int ret;
991fb3f7 6883 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
bd380811
PM
6884
6885 ret = __dev_change_flags(dev, flags);
6886 if (ret < 0)
6887 return ret;
6888
991fb3f7 6889 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
a528c219 6890 __dev_notify_flags(dev, old_flags, changes);
1da177e4
LT
6891 return ret;
6892}
d1b19dff 6893EXPORT_SYMBOL(dev_change_flags);
1da177e4 6894
f51048c3 6895int __dev_set_mtu(struct net_device *dev, int new_mtu)
2315dc91
VF
6896{
6897 const struct net_device_ops *ops = dev->netdev_ops;
6898
6899 if (ops->ndo_change_mtu)
6900 return ops->ndo_change_mtu(dev, new_mtu);
6901
6902 dev->mtu = new_mtu;
6903 return 0;
6904}
f51048c3 6905EXPORT_SYMBOL(__dev_set_mtu);
2315dc91 6906
f0db275a
SH
6907/**
6908 * dev_set_mtu - Change maximum transfer unit
6909 * @dev: device
6910 * @new_mtu: new transfer unit
6911 *
6912 * Change the maximum transfer size of the network device.
6913 */
1da177e4
LT
6914int dev_set_mtu(struct net_device *dev, int new_mtu)
6915{
2315dc91 6916 int err, orig_mtu;
1da177e4
LT
6917
6918 if (new_mtu == dev->mtu)
6919 return 0;
6920
61e84623
JW
6921 /* MTU must be positive, and in range */
6922 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
6923 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
6924 dev->name, new_mtu, dev->min_mtu);
1da177e4 6925 return -EINVAL;
61e84623
JW
6926 }
6927
6928 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
6929 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
a0e65de7 6930 dev->name, new_mtu, dev->max_mtu);
61e84623
JW
6931 return -EINVAL;
6932 }
1da177e4
LT
6933
6934 if (!netif_device_present(dev))
6935 return -ENODEV;
6936
1d486bfb
VF
6937 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6938 err = notifier_to_errno(err);
6939 if (err)
6940 return err;
d314774c 6941
2315dc91
VF
6942 orig_mtu = dev->mtu;
6943 err = __dev_set_mtu(dev, new_mtu);
d314774c 6944
2315dc91
VF
6945 if (!err) {
6946 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6947 err = notifier_to_errno(err);
6948 if (err) {
6949 /* setting mtu back and notifying everyone again,
6950 * so that they have a chance to revert changes.
6951 */
6952 __dev_set_mtu(dev, orig_mtu);
6953 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6954 }
6955 }
1da177e4
LT
6956 return err;
6957}
d1b19dff 6958EXPORT_SYMBOL(dev_set_mtu);
1da177e4 6959
cbda10fa
VD
6960/**
6961 * dev_set_group - Change group this device belongs to
6962 * @dev: device
6963 * @new_group: group this device should belong to
6964 */
6965void dev_set_group(struct net_device *dev, int new_group)
6966{
6967 dev->group = new_group;
6968}
6969EXPORT_SYMBOL(dev_set_group);
6970
f0db275a
SH
6971/**
6972 * dev_set_mac_address - Change Media Access Control Address
6973 * @dev: device
6974 * @sa: new address
6975 *
6976 * Change the hardware (MAC) address of the device
6977 */
1da177e4
LT
6978int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6979{
d314774c 6980 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
6981 int err;
6982
d314774c 6983 if (!ops->ndo_set_mac_address)
1da177e4
LT
6984 return -EOPNOTSUPP;
6985 if (sa->sa_family != dev->type)
6986 return -EINVAL;
6987 if (!netif_device_present(dev))
6988 return -ENODEV;
d314774c 6989 err = ops->ndo_set_mac_address(dev, sa);
f6521516
JP
6990 if (err)
6991 return err;
fbdeca2d 6992 dev->addr_assign_type = NET_ADDR_SET;
f6521516 6993 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7bf23575 6994 add_device_randomness(dev->dev_addr, dev->addr_len);
f6521516 6995 return 0;
1da177e4 6996}
d1b19dff 6997EXPORT_SYMBOL(dev_set_mac_address);
1da177e4 6998
4bf84c35
JP
6999/**
7000 * dev_change_carrier - Change device carrier
7001 * @dev: device
691b3b7e 7002 * @new_carrier: new value
4bf84c35
JP
7003 *
7004 * Change device carrier
7005 */
7006int dev_change_carrier(struct net_device *dev, bool new_carrier)
7007{
7008 const struct net_device_ops *ops = dev->netdev_ops;
7009
7010 if (!ops->ndo_change_carrier)
7011 return -EOPNOTSUPP;
7012 if (!netif_device_present(dev))
7013 return -ENODEV;
7014 return ops->ndo_change_carrier(dev, new_carrier);
7015}
7016EXPORT_SYMBOL(dev_change_carrier);
7017
66b52b0d
JP
7018/**
7019 * dev_get_phys_port_id - Get device physical port ID
7020 * @dev: device
7021 * @ppid: port ID
7022 *
7023 * Get device physical port ID
7024 */
7025int dev_get_phys_port_id(struct net_device *dev,
02637fce 7026 struct netdev_phys_item_id *ppid)
66b52b0d
JP
7027{
7028 const struct net_device_ops *ops = dev->netdev_ops;
7029
7030 if (!ops->ndo_get_phys_port_id)
7031 return -EOPNOTSUPP;
7032 return ops->ndo_get_phys_port_id(dev, ppid);
7033}
7034EXPORT_SYMBOL(dev_get_phys_port_id);
7035
db24a904
DA
7036/**
7037 * dev_get_phys_port_name - Get device physical port name
7038 * @dev: device
7039 * @name: port name
ed49e650 7040 * @len: limit of bytes to copy to name
db24a904
DA
7041 *
7042 * Get device physical port name
7043 */
7044int dev_get_phys_port_name(struct net_device *dev,
7045 char *name, size_t len)
7046{
7047 const struct net_device_ops *ops = dev->netdev_ops;
7048
7049 if (!ops->ndo_get_phys_port_name)
7050 return -EOPNOTSUPP;
7051 return ops->ndo_get_phys_port_name(dev, name, len);
7052}
7053EXPORT_SYMBOL(dev_get_phys_port_name);
7054
d746d707
AK
7055/**
7056 * dev_change_proto_down - update protocol port state information
7057 * @dev: device
7058 * @proto_down: new value
7059 *
7060 * This info can be used by switch drivers to set the phys state of the
7061 * port.
7062 */
7063int dev_change_proto_down(struct net_device *dev, bool proto_down)
7064{
7065 const struct net_device_ops *ops = dev->netdev_ops;
7066
7067 if (!ops->ndo_change_proto_down)
7068 return -EOPNOTSUPP;
7069 if (!netif_device_present(dev))
7070 return -ENODEV;
7071 return ops->ndo_change_proto_down(dev, proto_down);
7072}
7073EXPORT_SYMBOL(dev_change_proto_down);
7074
f4e63525 7075u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op, u32 *prog_id)
d67b9cd2 7076{
f4e63525 7077 struct netdev_bpf xdp;
d67b9cd2
DB
7078
7079 memset(&xdp, 0, sizeof(xdp));
7080 xdp.command = XDP_QUERY_PROG;
7081
7082 /* Query must always succeed. */
f4e63525 7083 WARN_ON(bpf_op(dev, &xdp) < 0);
58038695
MKL
7084 if (prog_id)
7085 *prog_id = xdp.prog_id;
7086
d67b9cd2
DB
7087 return xdp.prog_attached;
7088}
7089
f4e63525 7090static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
32d60277 7091 struct netlink_ext_ack *extack, u32 flags,
d67b9cd2
DB
7092 struct bpf_prog *prog)
7093{
f4e63525 7094 struct netdev_bpf xdp;
d67b9cd2
DB
7095
7096 memset(&xdp, 0, sizeof(xdp));
ee5d032f
JK
7097 if (flags & XDP_FLAGS_HW_MODE)
7098 xdp.command = XDP_SETUP_PROG_HW;
7099 else
7100 xdp.command = XDP_SETUP_PROG;
d67b9cd2 7101 xdp.extack = extack;
32d60277 7102 xdp.flags = flags;
d67b9cd2
DB
7103 xdp.prog = prog;
7104
f4e63525 7105 return bpf_op(dev, &xdp);
d67b9cd2
DB
7106}
7107
a7862b45
BB
7108/**
7109 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
7110 * @dev: device
b5d60989 7111 * @extack: netlink extended ack
a7862b45 7112 * @fd: new program fd or negative value to clear
85de8576 7113 * @flags: xdp-related flags
a7862b45
BB
7114 *
7115 * Set or clear a bpf program for a device
7116 */
ddf9f970
JK
7117int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
7118 int fd, u32 flags)
a7862b45
BB
7119{
7120 const struct net_device_ops *ops = dev->netdev_ops;
7121 struct bpf_prog *prog = NULL;
f4e63525 7122 bpf_op_t bpf_op, bpf_chk;
a7862b45
BB
7123 int err;
7124
85de8576
DB
7125 ASSERT_RTNL();
7126
f4e63525
JK
7127 bpf_op = bpf_chk = ops->ndo_bpf;
7128 if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
0489df9a 7129 return -EOPNOTSUPP;
f4e63525
JK
7130 if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
7131 bpf_op = generic_xdp_install;
7132 if (bpf_op == bpf_chk)
7133 bpf_chk = generic_xdp_install;
b5cdae32 7134
a7862b45 7135 if (fd >= 0) {
f4e63525 7136 if (bpf_chk && __dev_xdp_attached(dev, bpf_chk, NULL))
d67b9cd2
DB
7137 return -EEXIST;
7138 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
f4e63525 7139 __dev_xdp_attached(dev, bpf_op, NULL))
d67b9cd2 7140 return -EBUSY;
85de8576 7141
248f346f
JK
7142 if (bpf_op == ops->ndo_bpf)
7143 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
7144 dev);
7145 else
7146 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
a7862b45
BB
7147 if (IS_ERR(prog))
7148 return PTR_ERR(prog);
7149 }
7150
f4e63525 7151 err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
a7862b45
BB
7152 if (err < 0 && prog)
7153 bpf_prog_put(prog);
7154
7155 return err;
7156}
a7862b45 7157
1da177e4
LT
7158/**
7159 * dev_new_index - allocate an ifindex
c4ea43c5 7160 * @net: the applicable net namespace
1da177e4
LT
7161 *
7162 * Returns a suitable unique value for a new device interface
7163 * number. The caller must hold the rtnl semaphore or the
7164 * dev_base_lock to be sure it remains unique.
7165 */
881d966b 7166static int dev_new_index(struct net *net)
1da177e4 7167{
aa79e66e 7168 int ifindex = net->ifindex;
f4563a75 7169
1da177e4
LT
7170 for (;;) {
7171 if (++ifindex <= 0)
7172 ifindex = 1;
881d966b 7173 if (!__dev_get_by_index(net, ifindex))
aa79e66e 7174 return net->ifindex = ifindex;
1da177e4
LT
7175 }
7176}
7177
1da177e4 7178/* Delayed registration/unregisteration */
3b5b34fd 7179static LIST_HEAD(net_todo_list);
200b916f 7180DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
1da177e4 7181
6f05f629 7182static void net_set_todo(struct net_device *dev)
1da177e4 7183{
1da177e4 7184 list_add_tail(&dev->todo_list, &net_todo_list);
50624c93 7185 dev_net(dev)->dev_unreg_count++;
1da177e4
LT
7186}
7187
9b5e383c 7188static void rollback_registered_many(struct list_head *head)
93ee31f1 7189{
e93737b0 7190 struct net_device *dev, *tmp;
5cde2829 7191 LIST_HEAD(close_head);
9b5e383c 7192
93ee31f1
DL
7193 BUG_ON(dev_boot_phase);
7194 ASSERT_RTNL();
7195
e93737b0 7196 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 7197 /* Some devices call without registering
e93737b0
KK
7198 * for initialization unwind. Remove those
7199 * devices and proceed with the remaining.
9b5e383c
ED
7200 */
7201 if (dev->reg_state == NETREG_UNINITIALIZED) {
7b6cd1ce
JP
7202 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
7203 dev->name, dev);
93ee31f1 7204
9b5e383c 7205 WARN_ON(1);
e93737b0
KK
7206 list_del(&dev->unreg_list);
7207 continue;
9b5e383c 7208 }
449f4544 7209 dev->dismantle = true;
9b5e383c 7210 BUG_ON(dev->reg_state != NETREG_REGISTERED);
44345724 7211 }
93ee31f1 7212
44345724 7213 /* If device is running, close it first. */
5cde2829
EB
7214 list_for_each_entry(dev, head, unreg_list)
7215 list_add_tail(&dev->close_list, &close_head);
99c4a26a 7216 dev_close_many(&close_head, true);
93ee31f1 7217
44345724 7218 list_for_each_entry(dev, head, unreg_list) {
9b5e383c
ED
7219 /* And unlink it from device chain. */
7220 unlist_netdevice(dev);
93ee31f1 7221
9b5e383c
ED
7222 dev->reg_state = NETREG_UNREGISTERING;
7223 }
41852497 7224 flush_all_backlogs();
93ee31f1
DL
7225
7226 synchronize_net();
7227
9b5e383c 7228 list_for_each_entry(dev, head, unreg_list) {
395eea6c
MB
7229 struct sk_buff *skb = NULL;
7230
9b5e383c
ED
7231 /* Shutdown queueing discipline. */
7232 dev_shutdown(dev);
93ee31f1
DL
7233
7234
9b5e383c 7235 /* Notify protocols, that we are about to destroy
eb13da1a 7236 * this device. They should clean all the things.
7237 */
9b5e383c 7238 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 7239
395eea6c
MB
7240 if (!dev->rtnl_link_ops ||
7241 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
3d3ea5af 7242 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
6621dd29 7243 GFP_KERNEL, NULL);
395eea6c 7244
9b5e383c
ED
7245 /*
7246 * Flush the unicast and multicast chains
7247 */
a748ee24 7248 dev_uc_flush(dev);
22bedad3 7249 dev_mc_flush(dev);
93ee31f1 7250
9b5e383c
ED
7251 if (dev->netdev_ops->ndo_uninit)
7252 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 7253
395eea6c
MB
7254 if (skb)
7255 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
56bfa7ee 7256
9ff162a8
JP
7257 /* Notifier chain MUST detach us all upper devices. */
7258 WARN_ON(netdev_has_any_upper_dev(dev));
0f524a80 7259 WARN_ON(netdev_has_any_lower_dev(dev));
93ee31f1 7260
9b5e383c
ED
7261 /* Remove entries from kobject tree */
7262 netdev_unregister_kobject(dev);
024e9679
AD
7263#ifdef CONFIG_XPS
7264 /* Remove XPS queueing entries */
7265 netif_reset_xps_queues_gt(dev, 0);
7266#endif
9b5e383c 7267 }
93ee31f1 7268
850a545b 7269 synchronize_net();
395264d5 7270
a5ee1551 7271 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
7272 dev_put(dev);
7273}
7274
7275static void rollback_registered(struct net_device *dev)
7276{
7277 LIST_HEAD(single);
7278
7279 list_add(&dev->unreg_list, &single);
7280 rollback_registered_many(&single);
ceaaec98 7281 list_del(&single);
93ee31f1
DL
7282}
7283
fd867d51
JW
7284static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
7285 struct net_device *upper, netdev_features_t features)
7286{
7287 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7288 netdev_features_t feature;
5ba3f7d6 7289 int feature_bit;
fd867d51 7290
5ba3f7d6
JW
7291 for_each_netdev_feature(&upper_disables, feature_bit) {
7292 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
7293 if (!(upper->wanted_features & feature)
7294 && (features & feature)) {
7295 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
7296 &feature, upper->name);
7297 features &= ~feature;
7298 }
7299 }
7300
7301 return features;
7302}
7303
7304static void netdev_sync_lower_features(struct net_device *upper,
7305 struct net_device *lower, netdev_features_t features)
7306{
7307 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7308 netdev_features_t feature;
5ba3f7d6 7309 int feature_bit;
fd867d51 7310
5ba3f7d6
JW
7311 for_each_netdev_feature(&upper_disables, feature_bit) {
7312 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
7313 if (!(features & feature) && (lower->features & feature)) {
7314 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
7315 &feature, lower->name);
7316 lower->wanted_features &= ~feature;
7317 netdev_update_features(lower);
7318
7319 if (unlikely(lower->features & feature))
7320 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
7321 &feature, lower->name);
7322 }
7323 }
7324}
7325
c8f44aff
MM
7326static netdev_features_t netdev_fix_features(struct net_device *dev,
7327 netdev_features_t features)
b63365a2 7328{
57422dc5
MM
7329 /* Fix illegal checksum combinations */
7330 if ((features & NETIF_F_HW_CSUM) &&
7331 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 7332 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
57422dc5
MM
7333 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
7334 }
7335
b63365a2 7336 /* TSO requires that SG is present as well. */
ea2d3688 7337 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6f404e44 7338 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
ea2d3688 7339 features &= ~NETIF_F_ALL_TSO;
b63365a2
HX
7340 }
7341
ec5f0615
PS
7342 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
7343 !(features & NETIF_F_IP_CSUM)) {
7344 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
7345 features &= ~NETIF_F_TSO;
7346 features &= ~NETIF_F_TSO_ECN;
7347 }
7348
7349 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
7350 !(features & NETIF_F_IPV6_CSUM)) {
7351 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
7352 features &= ~NETIF_F_TSO6;
7353 }
7354
b1dc497b
AD
7355 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
7356 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
7357 features &= ~NETIF_F_TSO_MANGLEID;
7358
31d8b9e0
BH
7359 /* TSO ECN requires that TSO is present as well. */
7360 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
7361 features &= ~NETIF_F_TSO_ECN;
7362
212b573f
MM
7363 /* Software GSO depends on SG. */
7364 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6f404e44 7365 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
212b573f
MM
7366 features &= ~NETIF_F_GSO;
7367 }
7368
802ab55a
AD
7369 /* GSO partial features require GSO partial be set */
7370 if ((features & dev->gso_partial_features) &&
7371 !(features & NETIF_F_GSO_PARTIAL)) {
7372 netdev_dbg(dev,
7373 "Dropping partially supported GSO features since no GSO partial.\n");
7374 features &= ~dev->gso_partial_features;
7375 }
7376
b63365a2
HX
7377 return features;
7378}
b63365a2 7379
6cb6a27c 7380int __netdev_update_features(struct net_device *dev)
5455c699 7381{
fd867d51 7382 struct net_device *upper, *lower;
c8f44aff 7383 netdev_features_t features;
fd867d51 7384 struct list_head *iter;
e7868a85 7385 int err = -1;
5455c699 7386
87267485
MM
7387 ASSERT_RTNL();
7388
5455c699
MM
7389 features = netdev_get_wanted_features(dev);
7390
7391 if (dev->netdev_ops->ndo_fix_features)
7392 features = dev->netdev_ops->ndo_fix_features(dev, features);
7393
7394 /* driver might be less strict about feature dependencies */
7395 features = netdev_fix_features(dev, features);
7396
fd867d51
JW
7397 /* some features can't be enabled if they're off an an upper device */
7398 netdev_for_each_upper_dev_rcu(dev, upper, iter)
7399 features = netdev_sync_upper_features(dev, upper, features);
7400
5455c699 7401 if (dev->features == features)
e7868a85 7402 goto sync_lower;
5455c699 7403
c8f44aff
MM
7404 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
7405 &dev->features, &features);
5455c699
MM
7406
7407 if (dev->netdev_ops->ndo_set_features)
7408 err = dev->netdev_ops->ndo_set_features(dev, features);
5f8dc33e
NA
7409 else
7410 err = 0;
5455c699 7411
6cb6a27c 7412 if (unlikely(err < 0)) {
5455c699 7413 netdev_err(dev,
c8f44aff
MM
7414 "set_features() failed (%d); wanted %pNF, left %pNF\n",
7415 err, &features, &dev->features);
17b85d29
NA
7416 /* return non-0 since some features might have changed and
7417 * it's better to fire a spurious notification than miss it
7418 */
7419 return -1;
6cb6a27c
MM
7420 }
7421
e7868a85 7422sync_lower:
fd867d51
JW
7423 /* some features must be disabled on lower devices when disabled
7424 * on an upper device (think: bonding master or bridge)
7425 */
7426 netdev_for_each_lower_dev(dev, lower, iter)
7427 netdev_sync_lower_features(dev, lower, features);
7428
ae847f40
SD
7429 if (!err) {
7430 netdev_features_t diff = features ^ dev->features;
7431
7432 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
7433 /* udp_tunnel_{get,drop}_rx_info both need
7434 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
7435 * device, or they won't do anything.
7436 * Thus we need to update dev->features
7437 * *before* calling udp_tunnel_get_rx_info,
7438 * but *after* calling udp_tunnel_drop_rx_info.
7439 */
7440 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
7441 dev->features = features;
7442 udp_tunnel_get_rx_info(dev);
7443 } else {
7444 udp_tunnel_drop_rx_info(dev);
7445 }
7446 }
7447
6cb6a27c 7448 dev->features = features;
ae847f40 7449 }
6cb6a27c 7450
e7868a85 7451 return err < 0 ? 0 : 1;
6cb6a27c
MM
7452}
7453
afe12cc8
MM
7454/**
7455 * netdev_update_features - recalculate device features
7456 * @dev: the device to check
7457 *
7458 * Recalculate dev->features set and send notifications if it
7459 * has changed. Should be called after driver or hardware dependent
7460 * conditions might have changed that influence the features.
7461 */
6cb6a27c
MM
7462void netdev_update_features(struct net_device *dev)
7463{
7464 if (__netdev_update_features(dev))
7465 netdev_features_change(dev);
5455c699
MM
7466}
7467EXPORT_SYMBOL(netdev_update_features);
7468
afe12cc8
MM
7469/**
7470 * netdev_change_features - recalculate device features
7471 * @dev: the device to check
7472 *
7473 * Recalculate dev->features set and send notifications even
7474 * if they have not changed. Should be called instead of
7475 * netdev_update_features() if also dev->vlan_features might
7476 * have changed to allow the changes to be propagated to stacked
7477 * VLAN devices.
7478 */
7479void netdev_change_features(struct net_device *dev)
7480{
7481 __netdev_update_features(dev);
7482 netdev_features_change(dev);
7483}
7484EXPORT_SYMBOL(netdev_change_features);
7485
fc4a7489
PM
7486/**
7487 * netif_stacked_transfer_operstate - transfer operstate
7488 * @rootdev: the root or lower level device to transfer state from
7489 * @dev: the device to transfer operstate to
7490 *
7491 * Transfer operational state from root to device. This is normally
7492 * called when a stacking relationship exists between the root
7493 * device and the device(a leaf device).
7494 */
7495void netif_stacked_transfer_operstate(const struct net_device *rootdev,
7496 struct net_device *dev)
7497{
7498 if (rootdev->operstate == IF_OPER_DORMANT)
7499 netif_dormant_on(dev);
7500 else
7501 netif_dormant_off(dev);
7502
0575c86b
ZS
7503 if (netif_carrier_ok(rootdev))
7504 netif_carrier_on(dev);
7505 else
7506 netif_carrier_off(dev);
fc4a7489
PM
7507}
7508EXPORT_SYMBOL(netif_stacked_transfer_operstate);
7509
a953be53 7510#ifdef CONFIG_SYSFS
1b4bf461
ED
7511static int netif_alloc_rx_queues(struct net_device *dev)
7512{
1b4bf461 7513 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 7514 struct netdev_rx_queue *rx;
10595902 7515 size_t sz = count * sizeof(*rx);
1b4bf461 7516
bd25fa7b 7517 BUG_ON(count < 1);
1b4bf461 7518
dcda9b04 7519 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
da6bc57a
MH
7520 if (!rx)
7521 return -ENOMEM;
7522
bd25fa7b
TH
7523 dev->_rx = rx;
7524
bd25fa7b 7525 for (i = 0; i < count; i++)
fe822240 7526 rx[i].dev = dev;
1b4bf461
ED
7527 return 0;
7528}
bf264145 7529#endif
1b4bf461 7530
aa942104
CG
7531static void netdev_init_one_queue(struct net_device *dev,
7532 struct netdev_queue *queue, void *_unused)
7533{
7534 /* Initialize queue lock */
7535 spin_lock_init(&queue->_xmit_lock);
7536 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
7537 queue->xmit_lock_owner = -1;
b236da69 7538 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104 7539 queue->dev = dev;
114cf580
TH
7540#ifdef CONFIG_BQL
7541 dql_init(&queue->dql, HZ);
7542#endif
aa942104
CG
7543}
7544
60877a32
ED
7545static void netif_free_tx_queues(struct net_device *dev)
7546{
4cb28970 7547 kvfree(dev->_tx);
60877a32
ED
7548}
7549
e6484930
TH
7550static int netif_alloc_netdev_queues(struct net_device *dev)
7551{
7552 unsigned int count = dev->num_tx_queues;
7553 struct netdev_queue *tx;
60877a32 7554 size_t sz = count * sizeof(*tx);
e6484930 7555
d339727c
ED
7556 if (count < 1 || count > 0xffff)
7557 return -EINVAL;
62b5942a 7558
dcda9b04 7559 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
da6bc57a
MH
7560 if (!tx)
7561 return -ENOMEM;
7562
e6484930 7563 dev->_tx = tx;
1d24eb48 7564
e6484930
TH
7565 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
7566 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
7567
7568 return 0;
e6484930
TH
7569}
7570
a2029240
DV
7571void netif_tx_stop_all_queues(struct net_device *dev)
7572{
7573 unsigned int i;
7574
7575 for (i = 0; i < dev->num_tx_queues; i++) {
7576 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
f4563a75 7577
a2029240
DV
7578 netif_tx_stop_queue(txq);
7579 }
7580}
7581EXPORT_SYMBOL(netif_tx_stop_all_queues);
7582
1da177e4
LT
7583/**
7584 * register_netdevice - register a network device
7585 * @dev: device to register
7586 *
7587 * Take a completed network device structure and add it to the kernel
7588 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7589 * chain. 0 is returned on success. A negative errno code is returned
7590 * on a failure to set up the device, or if the name is a duplicate.
7591 *
7592 * Callers must hold the rtnl semaphore. You may want
7593 * register_netdev() instead of this.
7594 *
7595 * BUGS:
7596 * The locking appears insufficient to guarantee two parallel registers
7597 * will not get the same name.
7598 */
7599
7600int register_netdevice(struct net_device *dev)
7601{
1da177e4 7602 int ret;
d314774c 7603 struct net *net = dev_net(dev);
1da177e4
LT
7604
7605 BUG_ON(dev_boot_phase);
7606 ASSERT_RTNL();
7607
b17a7c17
SH
7608 might_sleep();
7609
1da177e4
LT
7610 /* When net_device's are persistent, this will be fatal. */
7611 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 7612 BUG_ON(!net);
1da177e4 7613
f1f28aa3 7614 spin_lock_init(&dev->addr_list_lock);
cf508b12 7615 netdev_set_addr_lockdep_class(dev);
1da177e4 7616
828de4f6 7617 ret = dev_get_valid_name(net, dev, dev->name);
0696c3a8
PP
7618 if (ret < 0)
7619 goto out;
7620
1da177e4 7621 /* Init, if this function is available */
d314774c
SH
7622 if (dev->netdev_ops->ndo_init) {
7623 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
7624 if (ret) {
7625 if (ret > 0)
7626 ret = -EIO;
90833aa4 7627 goto out;
1da177e4
LT
7628 }
7629 }
4ec93edb 7630
f646968f
PM
7631 if (((dev->hw_features | dev->features) &
7632 NETIF_F_HW_VLAN_CTAG_FILTER) &&
d2ed273d
MM
7633 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
7634 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
7635 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
7636 ret = -EINVAL;
7637 goto err_uninit;
7638 }
7639
9c7dafbf
PE
7640 ret = -EBUSY;
7641 if (!dev->ifindex)
7642 dev->ifindex = dev_new_index(net);
7643 else if (__dev_get_by_index(net, dev->ifindex))
7644 goto err_uninit;
7645
5455c699
MM
7646 /* Transfer changeable features to wanted_features and enable
7647 * software offloads (GSO and GRO).
7648 */
7649 dev->hw_features |= NETIF_F_SOFT_FEATURES;
14d1232f 7650 dev->features |= NETIF_F_SOFT_FEATURES;
d764a122
SD
7651
7652 if (dev->netdev_ops->ndo_udp_tunnel_add) {
7653 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
7654 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
7655 }
7656
14d1232f 7657 dev->wanted_features = dev->features & dev->hw_features;
1da177e4 7658
cbc53e08 7659 if (!(dev->flags & IFF_LOOPBACK))
34324dc2 7660 dev->hw_features |= NETIF_F_NOCACHE_COPY;
cbc53e08 7661
7f348a60
AD
7662 /* If IPv4 TCP segmentation offload is supported we should also
7663 * allow the device to enable segmenting the frame with the option
7664 * of ignoring a static IP ID value. This doesn't enable the
7665 * feature itself but allows the user to enable it later.
7666 */
cbc53e08
AD
7667 if (dev->hw_features & NETIF_F_TSO)
7668 dev->hw_features |= NETIF_F_TSO_MANGLEID;
7f348a60
AD
7669 if (dev->vlan_features & NETIF_F_TSO)
7670 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
7671 if (dev->mpls_features & NETIF_F_TSO)
7672 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
7673 if (dev->hw_enc_features & NETIF_F_TSO)
7674 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
c6e1a0d1 7675
1180e7d6 7676 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
16c3ea78 7677 */
1180e7d6 7678 dev->vlan_features |= NETIF_F_HIGHDMA;
16c3ea78 7679
ee579677
PS
7680 /* Make NETIF_F_SG inheritable to tunnel devices.
7681 */
802ab55a 7682 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
ee579677 7683
0d89d203
SH
7684 /* Make NETIF_F_SG inheritable to MPLS.
7685 */
7686 dev->mpls_features |= NETIF_F_SG;
7687
7ffbe3fd
JB
7688 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
7689 ret = notifier_to_errno(ret);
7690 if (ret)
7691 goto err_uninit;
7692
8b41d188 7693 ret = netdev_register_kobject(dev);
b17a7c17 7694 if (ret)
7ce1b0ed 7695 goto err_uninit;
b17a7c17
SH
7696 dev->reg_state = NETREG_REGISTERED;
7697
6cb6a27c 7698 __netdev_update_features(dev);
8e9b59b2 7699
1da177e4
LT
7700 /*
7701 * Default initial state at registry is that the
7702 * device is present.
7703 */
7704
7705 set_bit(__LINK_STATE_PRESENT, &dev->state);
7706
8f4cccbb
BH
7707 linkwatch_init_dev(dev);
7708
1da177e4 7709 dev_init_scheduler(dev);
1da177e4 7710 dev_hold(dev);
ce286d32 7711 list_netdevice(dev);
7bf23575 7712 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 7713
948b337e
JP
7714 /* If the device has permanent device address, driver should
7715 * set dev_addr and also addr_assign_type should be set to
7716 * NET_ADDR_PERM (default value).
7717 */
7718 if (dev->addr_assign_type == NET_ADDR_PERM)
7719 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
7720
1da177e4 7721 /* Notify protocols, that a new device appeared. */
056925ab 7722 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 7723 ret = notifier_to_errno(ret);
93ee31f1
DL
7724 if (ret) {
7725 rollback_registered(dev);
7726 dev->reg_state = NETREG_UNREGISTERED;
7727 }
d90a909e
EB
7728 /*
7729 * Prevent userspace races by waiting until the network
7730 * device is fully setup before sending notifications.
7731 */
a2835763
PM
7732 if (!dev->rtnl_link_ops ||
7733 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7f294054 7734 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
1da177e4
LT
7735
7736out:
7737 return ret;
7ce1b0ed
HX
7738
7739err_uninit:
d314774c
SH
7740 if (dev->netdev_ops->ndo_uninit)
7741 dev->netdev_ops->ndo_uninit(dev);
cf124db5
DM
7742 if (dev->priv_destructor)
7743 dev->priv_destructor(dev);
7ce1b0ed 7744 goto out;
1da177e4 7745}
d1b19dff 7746EXPORT_SYMBOL(register_netdevice);
1da177e4 7747
937f1ba5
BH
7748/**
7749 * init_dummy_netdev - init a dummy network device for NAPI
7750 * @dev: device to init
7751 *
7752 * This takes a network device structure and initialize the minimum
7753 * amount of fields so it can be used to schedule NAPI polls without
7754 * registering a full blown interface. This is to be used by drivers
7755 * that need to tie several hardware interfaces to a single NAPI
7756 * poll scheduler due to HW limitations.
7757 */
7758int init_dummy_netdev(struct net_device *dev)
7759{
7760 /* Clear everything. Note we don't initialize spinlocks
7761 * are they aren't supposed to be taken by any of the
7762 * NAPI code and this dummy netdev is supposed to be
7763 * only ever used for NAPI polls
7764 */
7765 memset(dev, 0, sizeof(struct net_device));
7766
7767 /* make sure we BUG if trying to hit standard
7768 * register/unregister code path
7769 */
7770 dev->reg_state = NETREG_DUMMY;
7771
937f1ba5
BH
7772 /* NAPI wants this */
7773 INIT_LIST_HEAD(&dev->napi_list);
7774
7775 /* a dummy interface is started by default */
7776 set_bit(__LINK_STATE_PRESENT, &dev->state);
7777 set_bit(__LINK_STATE_START, &dev->state);
7778
29b4433d
ED
7779 /* Note : We dont allocate pcpu_refcnt for dummy devices,
7780 * because users of this 'device' dont need to change
7781 * its refcount.
7782 */
7783
937f1ba5
BH
7784 return 0;
7785}
7786EXPORT_SYMBOL_GPL(init_dummy_netdev);
7787
7788
1da177e4
LT
7789/**
7790 * register_netdev - register a network device
7791 * @dev: device to register
7792 *
7793 * Take a completed network device structure and add it to the kernel
7794 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7795 * chain. 0 is returned on success. A negative errno code is returned
7796 * on a failure to set up the device, or if the name is a duplicate.
7797 *
38b4da38 7798 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
7799 * and expands the device name if you passed a format string to
7800 * alloc_netdev.
7801 */
7802int register_netdev(struct net_device *dev)
7803{
7804 int err;
7805
7806 rtnl_lock();
1da177e4 7807 err = register_netdevice(dev);
1da177e4
LT
7808 rtnl_unlock();
7809 return err;
7810}
7811EXPORT_SYMBOL(register_netdev);
7812
29b4433d
ED
7813int netdev_refcnt_read(const struct net_device *dev)
7814{
7815 int i, refcnt = 0;
7816
7817 for_each_possible_cpu(i)
7818 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
7819 return refcnt;
7820}
7821EXPORT_SYMBOL(netdev_refcnt_read);
7822
2c53040f 7823/**
1da177e4 7824 * netdev_wait_allrefs - wait until all references are gone.
3de7a37b 7825 * @dev: target net_device
1da177e4
LT
7826 *
7827 * This is called when unregistering network devices.
7828 *
7829 * Any protocol or device that holds a reference should register
7830 * for netdevice notification, and cleanup and put back the
7831 * reference if they receive an UNREGISTER event.
7832 * We can get stuck here if buggy protocols don't correctly
4ec93edb 7833 * call dev_put.
1da177e4
LT
7834 */
7835static void netdev_wait_allrefs(struct net_device *dev)
7836{
7837 unsigned long rebroadcast_time, warning_time;
29b4433d 7838 int refcnt;
1da177e4 7839
e014debe
ED
7840 linkwatch_forget_dev(dev);
7841
1da177e4 7842 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
7843 refcnt = netdev_refcnt_read(dev);
7844
7845 while (refcnt != 0) {
1da177e4 7846 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 7847 rtnl_lock();
1da177e4
LT
7848
7849 /* Rebroadcast unregister notification */
056925ab 7850 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
1da177e4 7851
748e2d93 7852 __rtnl_unlock();
0115e8e3 7853 rcu_barrier();
748e2d93
ED
7854 rtnl_lock();
7855
0115e8e3 7856 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
1da177e4
LT
7857 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
7858 &dev->state)) {
7859 /* We must not have linkwatch events
7860 * pending on unregister. If this
7861 * happens, we simply run the queue
7862 * unscheduled, resulting in a noop
7863 * for this device.
7864 */
7865 linkwatch_run_queue();
7866 }
7867
6756ae4b 7868 __rtnl_unlock();
1da177e4
LT
7869
7870 rebroadcast_time = jiffies;
7871 }
7872
7873 msleep(250);
7874
29b4433d
ED
7875 refcnt = netdev_refcnt_read(dev);
7876
1da177e4 7877 if (time_after(jiffies, warning_time + 10 * HZ)) {
7b6cd1ce
JP
7878 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
7879 dev->name, refcnt);
1da177e4
LT
7880 warning_time = jiffies;
7881 }
7882 }
7883}
7884
7885/* The sequence is:
7886 *
7887 * rtnl_lock();
7888 * ...
7889 * register_netdevice(x1);
7890 * register_netdevice(x2);
7891 * ...
7892 * unregister_netdevice(y1);
7893 * unregister_netdevice(y2);
7894 * ...
7895 * rtnl_unlock();
7896 * free_netdev(y1);
7897 * free_netdev(y2);
7898 *
58ec3b4d 7899 * We are invoked by rtnl_unlock().
1da177e4 7900 * This allows us to deal with problems:
b17a7c17 7901 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
7902 * without deadlocking with linkwatch via keventd.
7903 * 2) Since we run with the RTNL semaphore not held, we can sleep
7904 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
7905 *
7906 * We must not return until all unregister events added during
7907 * the interval the lock was held have been completed.
1da177e4 7908 */
1da177e4
LT
7909void netdev_run_todo(void)
7910{
626ab0e6 7911 struct list_head list;
1da177e4 7912
1da177e4 7913 /* Snapshot list, allow later requests */
626ab0e6 7914 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
7915
7916 __rtnl_unlock();
626ab0e6 7917
0115e8e3
ED
7918
7919 /* Wait for rcu callbacks to finish before next phase */
850a545b
EB
7920 if (!list_empty(&list))
7921 rcu_barrier();
7922
1da177e4
LT
7923 while (!list_empty(&list)) {
7924 struct net_device *dev
e5e26d75 7925 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
7926 list_del(&dev->todo_list);
7927
748e2d93 7928 rtnl_lock();
0115e8e3 7929 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
748e2d93 7930 __rtnl_unlock();
0115e8e3 7931
b17a7c17 7932 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7b6cd1ce 7933 pr_err("network todo '%s' but state %d\n",
b17a7c17
SH
7934 dev->name, dev->reg_state);
7935 dump_stack();
7936 continue;
7937 }
1da177e4 7938
b17a7c17 7939 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 7940
b17a7c17 7941 netdev_wait_allrefs(dev);
1da177e4 7942
b17a7c17 7943 /* paranoia */
29b4433d 7944 BUG_ON(netdev_refcnt_read(dev));
7866a621
SN
7945 BUG_ON(!list_empty(&dev->ptype_all));
7946 BUG_ON(!list_empty(&dev->ptype_specific));
33d480ce
ED
7947 WARN_ON(rcu_access_pointer(dev->ip_ptr));
7948 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
547b792c 7949 WARN_ON(dev->dn_ptr);
1da177e4 7950
cf124db5
DM
7951 if (dev->priv_destructor)
7952 dev->priv_destructor(dev);
7953 if (dev->needs_free_netdev)
7954 free_netdev(dev);
9093bbb2 7955
50624c93
EB
7956 /* Report a network device has been unregistered */
7957 rtnl_lock();
7958 dev_net(dev)->dev_unreg_count--;
7959 __rtnl_unlock();
7960 wake_up(&netdev_unregistering_wq);
7961
9093bbb2
SH
7962 /* Free network device */
7963 kobject_put(&dev->dev.kobj);
1da177e4 7964 }
1da177e4
LT
7965}
7966
9256645a
JW
7967/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
7968 * all the same fields in the same order as net_device_stats, with only
7969 * the type differing, but rtnl_link_stats64 may have additional fields
7970 * at the end for newer counters.
3cfde79c 7971 */
77a1abf5
ED
7972void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7973 const struct net_device_stats *netdev_stats)
3cfde79c
BH
7974{
7975#if BITS_PER_LONG == 64
9256645a 7976 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
9af9959e 7977 memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
9256645a
JW
7978 /* zero out counters that only exist in rtnl_link_stats64 */
7979 memset((char *)stats64 + sizeof(*netdev_stats), 0,
7980 sizeof(*stats64) - sizeof(*netdev_stats));
3cfde79c 7981#else
9256645a 7982 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
3cfde79c
BH
7983 const unsigned long *src = (const unsigned long *)netdev_stats;
7984 u64 *dst = (u64 *)stats64;
7985
9256645a 7986 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
3cfde79c
BH
7987 for (i = 0; i < n; i++)
7988 dst[i] = src[i];
9256645a
JW
7989 /* zero out counters that only exist in rtnl_link_stats64 */
7990 memset((char *)stats64 + n * sizeof(u64), 0,
7991 sizeof(*stats64) - n * sizeof(u64));
3cfde79c
BH
7992#endif
7993}
77a1abf5 7994EXPORT_SYMBOL(netdev_stats_to_stats64);
3cfde79c 7995
eeda3fd6
SH
7996/**
7997 * dev_get_stats - get network device statistics
7998 * @dev: device to get statistics from
28172739 7999 * @storage: place to store stats
eeda3fd6 8000 *
d7753516
BH
8001 * Get network statistics from device. Return @storage.
8002 * The device driver may provide its own method by setting
8003 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
8004 * otherwise the internal statistics structure is used.
eeda3fd6 8005 */
d7753516
BH
8006struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
8007 struct rtnl_link_stats64 *storage)
7004bf25 8008{
eeda3fd6
SH
8009 const struct net_device_ops *ops = dev->netdev_ops;
8010
28172739
ED
8011 if (ops->ndo_get_stats64) {
8012 memset(storage, 0, sizeof(*storage));
caf586e5
ED
8013 ops->ndo_get_stats64(dev, storage);
8014 } else if (ops->ndo_get_stats) {
3cfde79c 8015 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
8016 } else {
8017 netdev_stats_to_stats64(storage, &dev->stats);
28172739 8018 }
6f64ec74
ED
8019 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
8020 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
8021 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
28172739 8022 return storage;
c45d286e 8023}
eeda3fd6 8024EXPORT_SYMBOL(dev_get_stats);
c45d286e 8025
24824a09 8026struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 8027{
24824a09 8028 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 8029
24824a09
ED
8030#ifdef CONFIG_NET_CLS_ACT
8031 if (queue)
8032 return queue;
8033 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
8034 if (!queue)
8035 return NULL;
8036 netdev_init_one_queue(dev, queue, NULL);
2ce1ee17 8037 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
24824a09
ED
8038 queue->qdisc_sleeping = &noop_qdisc;
8039 rcu_assign_pointer(dev->ingress_queue, queue);
8040#endif
8041 return queue;
bb949fbd
DM
8042}
8043
2c60db03
ED
8044static const struct ethtool_ops default_ethtool_ops;
8045
d07d7507
SG
8046void netdev_set_default_ethtool_ops(struct net_device *dev,
8047 const struct ethtool_ops *ops)
8048{
8049 if (dev->ethtool_ops == &default_ethtool_ops)
8050 dev->ethtool_ops = ops;
8051}
8052EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
8053
74d332c1
ED
8054void netdev_freemem(struct net_device *dev)
8055{
8056 char *addr = (char *)dev - dev->padded;
8057
4cb28970 8058 kvfree(addr);
74d332c1
ED
8059}
8060
1da177e4 8061/**
722c9a0c 8062 * alloc_netdev_mqs - allocate network device
8063 * @sizeof_priv: size of private data to allocate space for
8064 * @name: device name format string
8065 * @name_assign_type: origin of device name
8066 * @setup: callback to initialize device
8067 * @txqs: the number of TX subqueues to allocate
8068 * @rxqs: the number of RX subqueues to allocate
8069 *
8070 * Allocates a struct net_device with private data area for driver use
8071 * and performs basic initialization. Also allocates subqueue structs
8072 * for each queue on the device.
1da177e4 8073 */
36909ea4 8074struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
c835a677 8075 unsigned char name_assign_type,
36909ea4
TH
8076 void (*setup)(struct net_device *),
8077 unsigned int txqs, unsigned int rxqs)
1da177e4 8078{
1da177e4 8079 struct net_device *dev;
52a59bd5 8080 unsigned int alloc_size;
1ce8e7b5 8081 struct net_device *p;
1da177e4 8082
b6fe17d6
SH
8083 BUG_ON(strlen(name) >= sizeof(dev->name));
8084
36909ea4 8085 if (txqs < 1) {
7b6cd1ce 8086 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
55513fb4
TH
8087 return NULL;
8088 }
8089
a953be53 8090#ifdef CONFIG_SYSFS
36909ea4 8091 if (rxqs < 1) {
7b6cd1ce 8092 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
36909ea4
TH
8093 return NULL;
8094 }
8095#endif
8096
fd2ea0a7 8097 alloc_size = sizeof(struct net_device);
d1643d24
AD
8098 if (sizeof_priv) {
8099 /* ensure 32-byte alignment of private area */
1ce8e7b5 8100 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
8101 alloc_size += sizeof_priv;
8102 }
8103 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 8104 alloc_size += NETDEV_ALIGN - 1;
1da177e4 8105
dcda9b04 8106 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
62b5942a 8107 if (!p)
1da177e4 8108 return NULL;
1da177e4 8109
1ce8e7b5 8110 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 8111 dev->padded = (char *)dev - (char *)p;
ab9c73cc 8112
29b4433d
ED
8113 dev->pcpu_refcnt = alloc_percpu(int);
8114 if (!dev->pcpu_refcnt)
74d332c1 8115 goto free_dev;
ab9c73cc 8116
ab9c73cc 8117 if (dev_addr_init(dev))
29b4433d 8118 goto free_pcpu;
ab9c73cc 8119
22bedad3 8120 dev_mc_init(dev);
a748ee24 8121 dev_uc_init(dev);
ccffad25 8122
c346dca1 8123 dev_net_set(dev, &init_net);
1da177e4 8124
8d3bdbd5 8125 dev->gso_max_size = GSO_MAX_SIZE;
30b678d8 8126 dev->gso_max_segs = GSO_MAX_SEGS;
8d3bdbd5 8127
8d3bdbd5
DM
8128 INIT_LIST_HEAD(&dev->napi_list);
8129 INIT_LIST_HEAD(&dev->unreg_list);
5cde2829 8130 INIT_LIST_HEAD(&dev->close_list);
8d3bdbd5 8131 INIT_LIST_HEAD(&dev->link_watch_list);
2f268f12
VF
8132 INIT_LIST_HEAD(&dev->adj_list.upper);
8133 INIT_LIST_HEAD(&dev->adj_list.lower);
7866a621
SN
8134 INIT_LIST_HEAD(&dev->ptype_all);
8135 INIT_LIST_HEAD(&dev->ptype_specific);
59cc1f61
JK
8136#ifdef CONFIG_NET_SCHED
8137 hash_init(dev->qdisc_hash);
8138#endif
02875878 8139 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
8d3bdbd5
DM
8140 setup(dev);
8141
a813104d 8142 if (!dev->tx_queue_len) {
f84bb1ea 8143 dev->priv_flags |= IFF_NO_QUEUE;
11597084 8144 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
a813104d 8145 }
906470c1 8146
36909ea4
TH
8147 dev->num_tx_queues = txqs;
8148 dev->real_num_tx_queues = txqs;
ed9af2e8 8149 if (netif_alloc_netdev_queues(dev))
8d3bdbd5 8150 goto free_all;
e8a0464c 8151
a953be53 8152#ifdef CONFIG_SYSFS
36909ea4
TH
8153 dev->num_rx_queues = rxqs;
8154 dev->real_num_rx_queues = rxqs;
fe822240 8155 if (netif_alloc_rx_queues(dev))
8d3bdbd5 8156 goto free_all;
df334545 8157#endif
0a9627f2 8158
1da177e4 8159 strcpy(dev->name, name);
c835a677 8160 dev->name_assign_type = name_assign_type;
cbda10fa 8161 dev->group = INIT_NETDEV_GROUP;
2c60db03
ED
8162 if (!dev->ethtool_ops)
8163 dev->ethtool_ops = &default_ethtool_ops;
e687ad60
PN
8164
8165 nf_hook_ingress_init(dev);
8166
1da177e4 8167 return dev;
ab9c73cc 8168
8d3bdbd5
DM
8169free_all:
8170 free_netdev(dev);
8171 return NULL;
8172
29b4433d
ED
8173free_pcpu:
8174 free_percpu(dev->pcpu_refcnt);
74d332c1
ED
8175free_dev:
8176 netdev_freemem(dev);
ab9c73cc 8177 return NULL;
1da177e4 8178}
36909ea4 8179EXPORT_SYMBOL(alloc_netdev_mqs);
1da177e4
LT
8180
8181/**
722c9a0c 8182 * free_netdev - free network device
8183 * @dev: device
1da177e4 8184 *
722c9a0c 8185 * This function does the last stage of destroying an allocated device
8186 * interface. The reference to the device object is released. If this
8187 * is the last reference then it will be freed.Must be called in process
8188 * context.
1da177e4
LT
8189 */
8190void free_netdev(struct net_device *dev)
8191{
d565b0a1 8192 struct napi_struct *p, *n;
b5cdae32 8193 struct bpf_prog *prog;
d565b0a1 8194
93d05d4a 8195 might_sleep();
60877a32 8196 netif_free_tx_queues(dev);
a953be53 8197#ifdef CONFIG_SYSFS
10595902 8198 kvfree(dev->_rx);
fe822240 8199#endif
e8a0464c 8200
33d480ce 8201 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
24824a09 8202
f001fde5
JP
8203 /* Flush device addresses */
8204 dev_addr_flush(dev);
8205
d565b0a1
HX
8206 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
8207 netif_napi_del(p);
8208
29b4433d
ED
8209 free_percpu(dev->pcpu_refcnt);
8210 dev->pcpu_refcnt = NULL;
8211
b5cdae32
DM
8212 prog = rcu_dereference_protected(dev->xdp_prog, 1);
8213 if (prog) {
8214 bpf_prog_put(prog);
8215 static_key_slow_dec(&generic_xdp_needed);
8216 }
8217
3041a069 8218 /* Compatibility with error handling in drivers */
1da177e4 8219 if (dev->reg_state == NETREG_UNINITIALIZED) {
74d332c1 8220 netdev_freemem(dev);
1da177e4
LT
8221 return;
8222 }
8223
8224 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
8225 dev->reg_state = NETREG_RELEASED;
8226
43cb76d9
GKH
8227 /* will free via device release */
8228 put_device(&dev->dev);
1da177e4 8229}
d1b19dff 8230EXPORT_SYMBOL(free_netdev);
4ec93edb 8231
f0db275a
SH
8232/**
8233 * synchronize_net - Synchronize with packet receive processing
8234 *
8235 * Wait for packets currently being received to be done.
8236 * Does not block later packets from starting.
8237 */
4ec93edb 8238void synchronize_net(void)
1da177e4
LT
8239{
8240 might_sleep();
be3fc413
ED
8241 if (rtnl_is_locked())
8242 synchronize_rcu_expedited();
8243 else
8244 synchronize_rcu();
1da177e4 8245}
d1b19dff 8246EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
8247
8248/**
44a0873d 8249 * unregister_netdevice_queue - remove device from the kernel
1da177e4 8250 * @dev: device
44a0873d 8251 * @head: list
6ebfbc06 8252 *
1da177e4 8253 * This function shuts down a device interface and removes it
d59b54b1 8254 * from the kernel tables.
44a0873d 8255 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
8256 *
8257 * Callers must hold the rtnl semaphore. You may want
8258 * unregister_netdev() instead of this.
8259 */
8260
44a0873d 8261void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 8262{
a6620712
HX
8263 ASSERT_RTNL();
8264
44a0873d 8265 if (head) {
9fdce099 8266 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
8267 } else {
8268 rollback_registered(dev);
8269 /* Finish processing unregister after unlock */
8270 net_set_todo(dev);
8271 }
1da177e4 8272}
44a0873d 8273EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 8274
9b5e383c
ED
8275/**
8276 * unregister_netdevice_many - unregister many devices
8277 * @head: list of devices
87757a91
ED
8278 *
8279 * Note: As most callers use a stack allocated list_head,
8280 * we force a list_del() to make sure stack wont be corrupted later.
9b5e383c
ED
8281 */
8282void unregister_netdevice_many(struct list_head *head)
8283{
8284 struct net_device *dev;
8285
8286 if (!list_empty(head)) {
8287 rollback_registered_many(head);
8288 list_for_each_entry(dev, head, unreg_list)
8289 net_set_todo(dev);
87757a91 8290 list_del(head);
9b5e383c
ED
8291 }
8292}
63c8099d 8293EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 8294
1da177e4
LT
8295/**
8296 * unregister_netdev - remove device from the kernel
8297 * @dev: device
8298 *
8299 * This function shuts down a device interface and removes it
d59b54b1 8300 * from the kernel tables.
1da177e4
LT
8301 *
8302 * This is just a wrapper for unregister_netdevice that takes
8303 * the rtnl semaphore. In general you want to use this and not
8304 * unregister_netdevice.
8305 */
8306void unregister_netdev(struct net_device *dev)
8307{
8308 rtnl_lock();
8309 unregister_netdevice(dev);
8310 rtnl_unlock();
8311}
1da177e4
LT
8312EXPORT_SYMBOL(unregister_netdev);
8313
ce286d32
EB
8314/**
8315 * dev_change_net_namespace - move device to different nethost namespace
8316 * @dev: device
8317 * @net: network namespace
8318 * @pat: If not NULL name pattern to try if the current device name
8319 * is already taken in the destination network namespace.
8320 *
8321 * This function shuts down a device interface and moves it
8322 * to a new network namespace. On success 0 is returned, on
8323 * a failure a netagive errno code is returned.
8324 *
8325 * Callers must hold the rtnl semaphore.
8326 */
8327
8328int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
8329{
6621dd29 8330 int err, new_nsid;
ce286d32
EB
8331
8332 ASSERT_RTNL();
8333
8334 /* Don't allow namespace local devices to be moved. */
8335 err = -EINVAL;
8336 if (dev->features & NETIF_F_NETNS_LOCAL)
8337 goto out;
8338
8339 /* Ensure the device has been registrered */
ce286d32
EB
8340 if (dev->reg_state != NETREG_REGISTERED)
8341 goto out;
8342
8343 /* Get out if there is nothing todo */
8344 err = 0;
878628fb 8345 if (net_eq(dev_net(dev), net))
ce286d32
EB
8346 goto out;
8347
8348 /* Pick the destination device name, and ensure
8349 * we can use it in the destination network namespace.
8350 */
8351 err = -EEXIST;
d9031024 8352 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
8353 /* We get here if we can't use the current device name */
8354 if (!pat)
8355 goto out;
828de4f6 8356 if (dev_get_valid_name(net, dev, pat) < 0)
ce286d32
EB
8357 goto out;
8358 }
8359
8360 /*
8361 * And now a mini version of register_netdevice unregister_netdevice.
8362 */
8363
8364 /* If device is running close it first. */
9b772652 8365 dev_close(dev);
ce286d32
EB
8366
8367 /* And unlink it from device chain */
8368 err = -ENODEV;
8369 unlist_netdevice(dev);
8370
8371 synchronize_net();
8372
8373 /* Shutdown queueing discipline. */
8374 dev_shutdown(dev);
8375
8376 /* Notify protocols, that we are about to destroy
eb13da1a 8377 * this device. They should clean all the things.
8378 *
8379 * Note that dev->reg_state stays at NETREG_REGISTERED.
8380 * This is wanted because this way 8021q and macvlan know
8381 * the device is just moving and can keep their slaves up.
8382 */
ce286d32 8383 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6549dd43
G
8384 rcu_barrier();
8385 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6621dd29
ND
8386 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net)
8387 new_nsid = peernet2id_alloc(dev_net(dev), net);
8388 else
8389 new_nsid = peernet2id(dev_net(dev), net);
8390 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid);
ce286d32
EB
8391
8392 /*
8393 * Flush the unicast and multicast chains
8394 */
a748ee24 8395 dev_uc_flush(dev);
22bedad3 8396 dev_mc_flush(dev);
ce286d32 8397
4e66ae2e
SH
8398 /* Send a netdev-removed uevent to the old namespace */
8399 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
4c75431a 8400 netdev_adjacent_del_links(dev);
4e66ae2e 8401
ce286d32 8402 /* Actually switch the network namespace */
c346dca1 8403 dev_net_set(dev, net);
ce286d32 8404
ce286d32 8405 /* If there is an ifindex conflict assign a new one */
7a66bbc9 8406 if (__dev_get_by_index(net, dev->ifindex))
ce286d32 8407 dev->ifindex = dev_new_index(net);
ce286d32 8408
4e66ae2e
SH
8409 /* Send a netdev-add uevent to the new namespace */
8410 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
4c75431a 8411 netdev_adjacent_add_links(dev);
4e66ae2e 8412
8b41d188 8413 /* Fixup kobjects */
a1b3f594 8414 err = device_rename(&dev->dev, dev->name);
8b41d188 8415 WARN_ON(err);
ce286d32
EB
8416
8417 /* Add the device back in the hashes */
8418 list_netdevice(dev);
8419
8420 /* Notify protocols, that a new device appeared. */
8421 call_netdevice_notifiers(NETDEV_REGISTER, dev);
8422
d90a909e
EB
8423 /*
8424 * Prevent userspace races by waiting until the network
8425 * device is fully setup before sending notifications.
8426 */
7f294054 8427 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
d90a909e 8428
ce286d32
EB
8429 synchronize_net();
8430 err = 0;
8431out:
8432 return err;
8433}
463d0183 8434EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 8435
f0bf90de 8436static int dev_cpu_dead(unsigned int oldcpu)
1da177e4
LT
8437{
8438 struct sk_buff **list_skb;
1da177e4 8439 struct sk_buff *skb;
f0bf90de 8440 unsigned int cpu;
97d8b6e3 8441 struct softnet_data *sd, *oldsd, *remsd = NULL;
1da177e4 8442
1da177e4
LT
8443 local_irq_disable();
8444 cpu = smp_processor_id();
8445 sd = &per_cpu(softnet_data, cpu);
8446 oldsd = &per_cpu(softnet_data, oldcpu);
8447
8448 /* Find end of our completion_queue. */
8449 list_skb = &sd->completion_queue;
8450 while (*list_skb)
8451 list_skb = &(*list_skb)->next;
8452 /* Append completion queue from offline CPU. */
8453 *list_skb = oldsd->completion_queue;
8454 oldsd->completion_queue = NULL;
8455
1da177e4 8456 /* Append output queue from offline CPU. */
a9cbd588
CG
8457 if (oldsd->output_queue) {
8458 *sd->output_queue_tailp = oldsd->output_queue;
8459 sd->output_queue_tailp = oldsd->output_queue_tailp;
8460 oldsd->output_queue = NULL;
8461 oldsd->output_queue_tailp = &oldsd->output_queue;
8462 }
ac64da0b
ED
8463 /* Append NAPI poll list from offline CPU, with one exception :
8464 * process_backlog() must be called by cpu owning percpu backlog.
8465 * We properly handle process_queue & input_pkt_queue later.
8466 */
8467 while (!list_empty(&oldsd->poll_list)) {
8468 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
8469 struct napi_struct,
8470 poll_list);
8471
8472 list_del_init(&napi->poll_list);
8473 if (napi->poll == process_backlog)
8474 napi->state = 0;
8475 else
8476 ____napi_schedule(sd, napi);
264524d5 8477 }
1da177e4
LT
8478
8479 raise_softirq_irqoff(NET_TX_SOFTIRQ);
8480 local_irq_enable();
8481
773fc8f6 8482#ifdef CONFIG_RPS
8483 remsd = oldsd->rps_ipi_list;
8484 oldsd->rps_ipi_list = NULL;
8485#endif
8486 /* send out pending IPI's on offline CPU */
8487 net_rps_send_ipi(remsd);
8488
1da177e4 8489 /* Process offline CPU's input_pkt_queue */
76cc8b13 8490 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
91e83133 8491 netif_rx_ni(skb);
76cc8b13 8492 input_queue_head_incr(oldsd);
fec5e652 8493 }
ac64da0b 8494 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
91e83133 8495 netif_rx_ni(skb);
76cc8b13
TH
8496 input_queue_head_incr(oldsd);
8497 }
1da177e4 8498
f0bf90de 8499 return 0;
1da177e4 8500}
1da177e4 8501
7f353bf2 8502/**
b63365a2
HX
8503 * netdev_increment_features - increment feature set by one
8504 * @all: current feature set
8505 * @one: new feature set
8506 * @mask: mask feature set
7f353bf2
HX
8507 *
8508 * Computes a new feature set after adding a device with feature set
b63365a2
HX
8509 * @one to the master device with current feature set @all. Will not
8510 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 8511 */
c8f44aff
MM
8512netdev_features_t netdev_increment_features(netdev_features_t all,
8513 netdev_features_t one, netdev_features_t mask)
b63365a2 8514{
c8cd0989 8515 if (mask & NETIF_F_HW_CSUM)
a188222b 8516 mask |= NETIF_F_CSUM_MASK;
1742f183 8517 mask |= NETIF_F_VLAN_CHALLENGED;
7f353bf2 8518
a188222b 8519 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
1742f183 8520 all &= one | ~NETIF_F_ALL_FOR_ALL;
c6e1a0d1 8521
1742f183 8522 /* If one device supports hw checksumming, set for all. */
c8cd0989
TH
8523 if (all & NETIF_F_HW_CSUM)
8524 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
7f353bf2
HX
8525
8526 return all;
8527}
b63365a2 8528EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 8529
430f03cd 8530static struct hlist_head * __net_init netdev_create_hash(void)
30d97d35
PE
8531{
8532 int i;
8533 struct hlist_head *hash;
8534
8535 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
8536 if (hash != NULL)
8537 for (i = 0; i < NETDEV_HASHENTRIES; i++)
8538 INIT_HLIST_HEAD(&hash[i]);
8539
8540 return hash;
8541}
8542
881d966b 8543/* Initialize per network namespace state */
4665079c 8544static int __net_init netdev_init(struct net *net)
881d966b 8545{
734b6541
RM
8546 if (net != &init_net)
8547 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 8548
30d97d35
PE
8549 net->dev_name_head = netdev_create_hash();
8550 if (net->dev_name_head == NULL)
8551 goto err_name;
881d966b 8552
30d97d35
PE
8553 net->dev_index_head = netdev_create_hash();
8554 if (net->dev_index_head == NULL)
8555 goto err_idx;
881d966b
EB
8556
8557 return 0;
30d97d35
PE
8558
8559err_idx:
8560 kfree(net->dev_name_head);
8561err_name:
8562 return -ENOMEM;
881d966b
EB
8563}
8564
f0db275a
SH
8565/**
8566 * netdev_drivername - network driver for the device
8567 * @dev: network device
f0db275a
SH
8568 *
8569 * Determine network driver for device.
8570 */
3019de12 8571const char *netdev_drivername(const struct net_device *dev)
6579e57b 8572{
cf04a4c7
SH
8573 const struct device_driver *driver;
8574 const struct device *parent;
3019de12 8575 const char *empty = "";
6579e57b
AV
8576
8577 parent = dev->dev.parent;
6579e57b 8578 if (!parent)
3019de12 8579 return empty;
6579e57b
AV
8580
8581 driver = parent->driver;
8582 if (driver && driver->name)
3019de12
DM
8583 return driver->name;
8584 return empty;
6579e57b
AV
8585}
8586
6ea754eb
JP
8587static void __netdev_printk(const char *level, const struct net_device *dev,
8588 struct va_format *vaf)
256df2f3 8589{
b004ff49 8590 if (dev && dev->dev.parent) {
6ea754eb
JP
8591 dev_printk_emit(level[1] - '0',
8592 dev->dev.parent,
8593 "%s %s %s%s: %pV",
8594 dev_driver_string(dev->dev.parent),
8595 dev_name(dev->dev.parent),
8596 netdev_name(dev), netdev_reg_state(dev),
8597 vaf);
b004ff49 8598 } else if (dev) {
6ea754eb
JP
8599 printk("%s%s%s: %pV",
8600 level, netdev_name(dev), netdev_reg_state(dev), vaf);
b004ff49 8601 } else {
6ea754eb 8602 printk("%s(NULL net_device): %pV", level, vaf);
b004ff49 8603 }
256df2f3
JP
8604}
8605
6ea754eb
JP
8606void netdev_printk(const char *level, const struct net_device *dev,
8607 const char *format, ...)
256df2f3
JP
8608{
8609 struct va_format vaf;
8610 va_list args;
256df2f3
JP
8611
8612 va_start(args, format);
8613
8614 vaf.fmt = format;
8615 vaf.va = &args;
8616
6ea754eb 8617 __netdev_printk(level, dev, &vaf);
b004ff49 8618
256df2f3 8619 va_end(args);
256df2f3
JP
8620}
8621EXPORT_SYMBOL(netdev_printk);
8622
8623#define define_netdev_printk_level(func, level) \
6ea754eb 8624void func(const struct net_device *dev, const char *fmt, ...) \
256df2f3 8625{ \
256df2f3
JP
8626 struct va_format vaf; \
8627 va_list args; \
8628 \
8629 va_start(args, fmt); \
8630 \
8631 vaf.fmt = fmt; \
8632 vaf.va = &args; \
8633 \
6ea754eb 8634 __netdev_printk(level, dev, &vaf); \
b004ff49 8635 \
256df2f3 8636 va_end(args); \
256df2f3
JP
8637} \
8638EXPORT_SYMBOL(func);
8639
8640define_netdev_printk_level(netdev_emerg, KERN_EMERG);
8641define_netdev_printk_level(netdev_alert, KERN_ALERT);
8642define_netdev_printk_level(netdev_crit, KERN_CRIT);
8643define_netdev_printk_level(netdev_err, KERN_ERR);
8644define_netdev_printk_level(netdev_warn, KERN_WARNING);
8645define_netdev_printk_level(netdev_notice, KERN_NOTICE);
8646define_netdev_printk_level(netdev_info, KERN_INFO);
8647
4665079c 8648static void __net_exit netdev_exit(struct net *net)
881d966b
EB
8649{
8650 kfree(net->dev_name_head);
8651 kfree(net->dev_index_head);
ee21b18b
VA
8652 if (net != &init_net)
8653 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
881d966b
EB
8654}
8655
022cbae6 8656static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
8657 .init = netdev_init,
8658 .exit = netdev_exit,
8659};
8660
4665079c 8661static void __net_exit default_device_exit(struct net *net)
ce286d32 8662{
e008b5fc 8663 struct net_device *dev, *aux;
ce286d32 8664 /*
e008b5fc 8665 * Push all migratable network devices back to the
ce286d32
EB
8666 * initial network namespace
8667 */
8668 rtnl_lock();
e008b5fc 8669 for_each_netdev_safe(net, dev, aux) {
ce286d32 8670 int err;
aca51397 8671 char fb_name[IFNAMSIZ];
ce286d32
EB
8672
8673 /* Ignore unmoveable devices (i.e. loopback) */
8674 if (dev->features & NETIF_F_NETNS_LOCAL)
8675 continue;
8676
e008b5fc
EB
8677 /* Leave virtual devices for the generic cleanup */
8678 if (dev->rtnl_link_ops)
8679 continue;
d0c082ce 8680
25985edc 8681 /* Push remaining network devices to init_net */
aca51397
PE
8682 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
8683 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 8684 if (err) {
7b6cd1ce
JP
8685 pr_emerg("%s: failed to move %s to init_net: %d\n",
8686 __func__, dev->name, err);
aca51397 8687 BUG();
ce286d32
EB
8688 }
8689 }
8690 rtnl_unlock();
8691}
8692
50624c93
EB
8693static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
8694{
8695 /* Return with the rtnl_lock held when there are no network
8696 * devices unregistering in any network namespace in net_list.
8697 */
8698 struct net *net;
8699 bool unregistering;
ff960a73 8700 DEFINE_WAIT_FUNC(wait, woken_wake_function);
50624c93 8701
ff960a73 8702 add_wait_queue(&netdev_unregistering_wq, &wait);
50624c93 8703 for (;;) {
50624c93
EB
8704 unregistering = false;
8705 rtnl_lock();
8706 list_for_each_entry(net, net_list, exit_list) {
8707 if (net->dev_unreg_count > 0) {
8708 unregistering = true;
8709 break;
8710 }
8711 }
8712 if (!unregistering)
8713 break;
8714 __rtnl_unlock();
ff960a73
PZ
8715
8716 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
50624c93 8717 }
ff960a73 8718 remove_wait_queue(&netdev_unregistering_wq, &wait);
50624c93
EB
8719}
8720
04dc7f6b
EB
8721static void __net_exit default_device_exit_batch(struct list_head *net_list)
8722{
8723 /* At exit all network devices most be removed from a network
b595076a 8724 * namespace. Do this in the reverse order of registration.
04dc7f6b
EB
8725 * Do this across as many network namespaces as possible to
8726 * improve batching efficiency.
8727 */
8728 struct net_device *dev;
8729 struct net *net;
8730 LIST_HEAD(dev_kill_list);
8731
50624c93
EB
8732 /* To prevent network device cleanup code from dereferencing
8733 * loopback devices or network devices that have been freed
8734 * wait here for all pending unregistrations to complete,
8735 * before unregistring the loopback device and allowing the
8736 * network namespace be freed.
8737 *
8738 * The netdev todo list containing all network devices
8739 * unregistrations that happen in default_device_exit_batch
8740 * will run in the rtnl_unlock() at the end of
8741 * default_device_exit_batch.
8742 */
8743 rtnl_lock_unregistering(net_list);
04dc7f6b
EB
8744 list_for_each_entry(net, net_list, exit_list) {
8745 for_each_netdev_reverse(net, dev) {
b0ab2fab 8746 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
04dc7f6b
EB
8747 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
8748 else
8749 unregister_netdevice_queue(dev, &dev_kill_list);
8750 }
8751 }
8752 unregister_netdevice_many(&dev_kill_list);
8753 rtnl_unlock();
8754}
8755
022cbae6 8756static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 8757 .exit = default_device_exit,
04dc7f6b 8758 .exit_batch = default_device_exit_batch,
ce286d32
EB
8759};
8760
1da177e4
LT
8761/*
8762 * Initialize the DEV module. At boot time this walks the device list and
8763 * unhooks any devices that fail to initialise (normally hardware not
8764 * present) and leaves us with a valid list of present and active devices.
8765 *
8766 */
8767
8768/*
8769 * This is called single threaded during boot, so no need
8770 * to take the rtnl semaphore.
8771 */
8772static int __init net_dev_init(void)
8773{
8774 int i, rc = -ENOMEM;
8775
8776 BUG_ON(!dev_boot_phase);
8777
1da177e4
LT
8778 if (dev_proc_init())
8779 goto out;
8780
8b41d188 8781 if (netdev_kobject_init())
1da177e4
LT
8782 goto out;
8783
8784 INIT_LIST_HEAD(&ptype_all);
82d8a867 8785 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
8786 INIT_LIST_HEAD(&ptype_base[i]);
8787
62532da9
VY
8788 INIT_LIST_HEAD(&offload_base);
8789
881d966b
EB
8790 if (register_pernet_subsys(&netdev_net_ops))
8791 goto out;
1da177e4
LT
8792
8793 /*
8794 * Initialise the packet receive queues.
8795 */
8796
6f912042 8797 for_each_possible_cpu(i) {
41852497 8798 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
e36fa2f7 8799 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 8800
41852497
ED
8801 INIT_WORK(flush, flush_backlog);
8802
e36fa2f7 8803 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 8804 skb_queue_head_init(&sd->process_queue);
e36fa2f7 8805 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588 8806 sd->output_queue_tailp = &sd->output_queue;
df334545 8807#ifdef CONFIG_RPS
e36fa2f7
ED
8808 sd->csd.func = rps_trigger_softirq;
8809 sd->csd.info = sd;
e36fa2f7 8810 sd->cpu = i;
1e94d72f 8811#endif
0a9627f2 8812
e36fa2f7
ED
8813 sd->backlog.poll = process_backlog;
8814 sd->backlog.weight = weight_p;
1da177e4
LT
8815 }
8816
1da177e4
LT
8817 dev_boot_phase = 0;
8818
505d4f73
EB
8819 /* The loopback device is special if any other network devices
8820 * is present in a network namespace the loopback device must
8821 * be present. Since we now dynamically allocate and free the
8822 * loopback device ensure this invariant is maintained by
8823 * keeping the loopback device as the first device on the
8824 * list of network devices. Ensuring the loopback devices
8825 * is the first device that appears and the last network device
8826 * that disappears.
8827 */
8828 if (register_pernet_device(&loopback_net_ops))
8829 goto out;
8830
8831 if (register_pernet_device(&default_device_ops))
8832 goto out;
8833
962cf36c
CM
8834 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
8835 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4 8836
f0bf90de
SAS
8837 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
8838 NULL, dev_cpu_dead);
8839 WARN_ON(rc < 0);
1da177e4
LT
8840 rc = 0;
8841out:
8842 return rc;
8843}
8844
8845subsys_initcall(net_dev_init);