]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - net/core/dev.c
Merge remote-tracking branch 'asoc/topic/pcm512x' into asoc-next
[mirror_ubuntu-focal-kernel.git] / net / core / dev.c
CommitLineData
1da177e4 1/*
722c9a0c 2 * NET3 Protocol independent device support routines.
1da177e4
LT
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
722c9a0c 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
722c9a0c 24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
1da177e4
LT
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
722c9a0c 39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
1da177e4
LT
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
722c9a0c 49 * Alan Cox : Fixed nasty side effect of device close
1da177e4
LT
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
722c9a0c 70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
1da177e4
LT
72 * - netif_rx() feedback
73 */
74
7c0f6ba6 75#include <linux/uaccess.h>
1da177e4 76#include <linux/bitops.h>
4fc268d2 77#include <linux/capability.h>
1da177e4
LT
78#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
08e9897d 81#include <linux/hash.h>
5a0e3ad6 82#include <linux/slab.h>
1da177e4 83#include <linux/sched.h>
f1083048 84#include <linux/sched/mm.h>
4a3e2f71 85#include <linux/mutex.h>
1da177e4
LT
86#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
0187bdfb 95#include <linux/ethtool.h>
1da177e4
LT
96#include <linux/notifier.h>
97#include <linux/skbuff.h>
a7862b45 98#include <linux/bpf.h>
b5cdae32 99#include <linux/bpf_trace.h>
457c4cbc 100#include <net/net_namespace.h>
1da177e4 101#include <net/sock.h>
02d62e86 102#include <net/busy_poll.h>
1da177e4 103#include <linux/rtnetlink.h>
1da177e4 104#include <linux/stat.h>
1da177e4 105#include <net/dst.h>
fc4099f1 106#include <net/dst_metadata.h>
1da177e4 107#include <net/pkt_sched.h>
87d83093 108#include <net/pkt_cls.h>
1da177e4 109#include <net/checksum.h>
44540960 110#include <net/xfrm.h>
1da177e4
LT
111#include <linux/highmem.h>
112#include <linux/init.h>
1da177e4 113#include <linux/module.h>
1da177e4
LT
114#include <linux/netpoll.h>
115#include <linux/rcupdate.h>
116#include <linux/delay.h>
1da177e4 117#include <net/iw_handler.h>
1da177e4 118#include <asm/current.h>
5bdb9886 119#include <linux/audit.h>
db217334 120#include <linux/dmaengine.h>
f6a78bfc 121#include <linux/err.h>
c7fa9d18 122#include <linux/ctype.h>
723e98b7 123#include <linux/if_arp.h>
6de329e2 124#include <linux/if_vlan.h>
8f0f2223 125#include <linux/ip.h>
ad55dcaf 126#include <net/ip.h>
25cd9ba0 127#include <net/mpls.h>
8f0f2223
DM
128#include <linux/ipv6.h>
129#include <linux/in.h>
b6b2fed1
DM
130#include <linux/jhash.h>
131#include <linux/random.h>
9cbc1cb8 132#include <trace/events/napi.h>
cf66ba58 133#include <trace/events/net.h>
07dc22e7 134#include <trace/events/skb.h>
5acbbd42 135#include <linux/pci.h>
caeda9b9 136#include <linux/inetdevice.h>
c445477d 137#include <linux/cpu_rmap.h>
c5905afb 138#include <linux/static_key.h>
af12fa6e 139#include <linux/hashtable.h>
60877a32 140#include <linux/vmalloc.h>
529d0489 141#include <linux/if_macvlan.h>
e7fd2885 142#include <linux/errqueue.h>
3b47d303 143#include <linux/hrtimer.h>
e687ad60 144#include <linux/netfilter_ingress.h>
40e4e713 145#include <linux/crash_dump.h>
b72b5bf6 146#include <linux/sctp.h>
ae847f40 147#include <net/udp_tunnel.h>
6621dd29 148#include <linux/net_namespace.h>
1da177e4 149
342709ef
PE
150#include "net-sysfs.h"
151
d565b0a1
HX
152/* Instead of increasing this, you should create a hash table. */
153#define MAX_GRO_SKBS 8
154
5d38a079
HX
155/* This should be increased if a protocol with a bigger head is added. */
156#define GRO_MAX_HEAD (MAX_HEADER + 128)
157
1da177e4 158static DEFINE_SPINLOCK(ptype_lock);
62532da9 159static DEFINE_SPINLOCK(offload_lock);
900ff8c6
CW
160struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
161struct list_head ptype_all __read_mostly; /* Taps */
62532da9 162static struct list_head offload_base __read_mostly;
1da177e4 163
ae78dbfa 164static int netif_rx_internal(struct sk_buff *skb);
54951194 165static int call_netdevice_notifiers_info(unsigned long val,
54951194 166 struct netdev_notifier_info *info);
90b602f8 167static struct napi_struct *napi_by_id(unsigned int napi_id);
ae78dbfa 168
1da177e4 169/*
7562f876 170 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
171 * semaphore.
172 *
c6d14c84 173 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
174 *
175 * Writers must hold the rtnl semaphore while they loop through the
7562f876 176 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
177 * actual updates. This allows pure readers to access the list even
178 * while a writer is preparing to update it.
179 *
180 * To put it another way, dev_base_lock is held for writing only to
181 * protect against pure readers; the rtnl semaphore provides the
182 * protection against other writers.
183 *
184 * See, for example usages, register_netdevice() and
185 * unregister_netdevice(), which must be called with the rtnl
186 * semaphore held.
187 */
1da177e4 188DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
189EXPORT_SYMBOL(dev_base_lock);
190
6c557001
FW
191static DEFINE_MUTEX(ifalias_mutex);
192
af12fa6e
ET
193/* protects napi_hash addition/deletion and napi_gen_id */
194static DEFINE_SPINLOCK(napi_hash_lock);
195
52bd2d62 196static unsigned int napi_gen_id = NR_CPUS;
6180d9de 197static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
af12fa6e 198
18afa4b0 199static seqcount_t devnet_rename_seq;
c91f6df2 200
4e985ada
TG
201static inline void dev_base_seq_inc(struct net *net)
202{
643aa9cb 203 while (++net->dev_base_seq == 0)
204 ;
4e985ada
TG
205}
206
881d966b 207static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4 208{
8387ff25 209 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
95c96174 210
08e9897d 211 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
212}
213
881d966b 214static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 215{
7c28bd0b 216 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
217}
218
e36fa2f7 219static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
220{
221#ifdef CONFIG_RPS
e36fa2f7 222 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
223#endif
224}
225
e36fa2f7 226static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
227{
228#ifdef CONFIG_RPS
e36fa2f7 229 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
230#endif
231}
232
ce286d32 233/* Device list insertion */
53759be9 234static void list_netdevice(struct net_device *dev)
ce286d32 235{
c346dca1 236 struct net *net = dev_net(dev);
ce286d32
EB
237
238 ASSERT_RTNL();
239
240 write_lock_bh(&dev_base_lock);
c6d14c84 241 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 242 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
243 hlist_add_head_rcu(&dev->index_hlist,
244 dev_index_hash(net, dev->ifindex));
ce286d32 245 write_unlock_bh(&dev_base_lock);
4e985ada
TG
246
247 dev_base_seq_inc(net);
ce286d32
EB
248}
249
fb699dfd
ED
250/* Device list removal
251 * caller must respect a RCU grace period before freeing/reusing dev
252 */
ce286d32
EB
253static void unlist_netdevice(struct net_device *dev)
254{
255 ASSERT_RTNL();
256
257 /* Unlink dev from the device chain */
258 write_lock_bh(&dev_base_lock);
c6d14c84 259 list_del_rcu(&dev->dev_list);
72c9528b 260 hlist_del_rcu(&dev->name_hlist);
fb699dfd 261 hlist_del_rcu(&dev->index_hlist);
ce286d32 262 write_unlock_bh(&dev_base_lock);
4e985ada
TG
263
264 dev_base_seq_inc(dev_net(dev));
ce286d32
EB
265}
266
1da177e4
LT
267/*
268 * Our notifier list
269 */
270
f07d5b94 271static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
272
273/*
274 * Device drivers call our routines to queue packets here. We empty the
275 * queue in the local softnet handler.
276 */
bea3348e 277
9958da05 278DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 279EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 280
cf508b12 281#ifdef CONFIG_LOCKDEP
723e98b7 282/*
c773e847 283 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
284 * according to dev->type
285 */
643aa9cb 286static const unsigned short netdev_lock_type[] = {
287 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
723e98b7
JP
288 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
289 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
290 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
291 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
292 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
293 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
294 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
295 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
296 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
297 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
298 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
211ed865
PG
299 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
300 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
301 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
723e98b7 302
643aa9cb 303static const char *const netdev_lock_name[] = {
304 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
305 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
306 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
307 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
308 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
309 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
310 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
311 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
312 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
313 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
314 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
315 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
316 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
317 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
318 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
319
320static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 321static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
322
323static inline unsigned short netdev_lock_pos(unsigned short dev_type)
324{
325 int i;
326
327 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
328 if (netdev_lock_type[i] == dev_type)
329 return i;
330 /* the last key is used by default */
331 return ARRAY_SIZE(netdev_lock_type) - 1;
332}
333
cf508b12
DM
334static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
335 unsigned short dev_type)
723e98b7
JP
336{
337 int i;
338
339 i = netdev_lock_pos(dev_type);
340 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
341 netdev_lock_name[i]);
342}
cf508b12
DM
343
344static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
345{
346 int i;
347
348 i = netdev_lock_pos(dev->type);
349 lockdep_set_class_and_name(&dev->addr_list_lock,
350 &netdev_addr_lock_key[i],
351 netdev_lock_name[i]);
352}
723e98b7 353#else
cf508b12
DM
354static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
355 unsigned short dev_type)
356{
357}
358static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
359{
360}
361#endif
1da177e4
LT
362
363/*******************************************************************************
eb13da1a 364 *
365 * Protocol management and registration routines
366 *
367 *******************************************************************************/
1da177e4 368
1da177e4 369
1da177e4
LT
370/*
371 * Add a protocol ID to the list. Now that the input handler is
372 * smarter we can dispense with all the messy stuff that used to be
373 * here.
374 *
375 * BEWARE!!! Protocol handlers, mangling input packets,
376 * MUST BE last in hash buckets and checking protocol handlers
377 * MUST start from promiscuous ptype_all chain in net_bh.
378 * It is true now, do not change it.
379 * Explanation follows: if protocol handler, mangling packet, will
380 * be the first on list, it is not able to sense, that packet
381 * is cloned and should be copied-on-write, so that it will
382 * change it and subsequent readers will get broken packet.
383 * --ANK (980803)
384 */
385
c07b68e8
ED
386static inline struct list_head *ptype_head(const struct packet_type *pt)
387{
388 if (pt->type == htons(ETH_P_ALL))
7866a621 389 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
c07b68e8 390 else
7866a621
SN
391 return pt->dev ? &pt->dev->ptype_specific :
392 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
c07b68e8
ED
393}
394
1da177e4
LT
395/**
396 * dev_add_pack - add packet handler
397 * @pt: packet type declaration
398 *
399 * Add a protocol handler to the networking stack. The passed &packet_type
400 * is linked into kernel lists and may not be freed until it has been
401 * removed from the kernel lists.
402 *
4ec93edb 403 * This call does not sleep therefore it can not
1da177e4
LT
404 * guarantee all CPU's that are in middle of receiving packets
405 * will see the new packet type (until the next received packet).
406 */
407
408void dev_add_pack(struct packet_type *pt)
409{
c07b68e8 410 struct list_head *head = ptype_head(pt);
1da177e4 411
c07b68e8
ED
412 spin_lock(&ptype_lock);
413 list_add_rcu(&pt->list, head);
414 spin_unlock(&ptype_lock);
1da177e4 415}
d1b19dff 416EXPORT_SYMBOL(dev_add_pack);
1da177e4 417
1da177e4
LT
418/**
419 * __dev_remove_pack - remove packet handler
420 * @pt: packet type declaration
421 *
422 * Remove a protocol handler that was previously added to the kernel
423 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
424 * from the kernel lists and can be freed or reused once this function
4ec93edb 425 * returns.
1da177e4
LT
426 *
427 * The packet type might still be in use by receivers
428 * and must not be freed until after all the CPU's have gone
429 * through a quiescent state.
430 */
431void __dev_remove_pack(struct packet_type *pt)
432{
c07b68e8 433 struct list_head *head = ptype_head(pt);
1da177e4
LT
434 struct packet_type *pt1;
435
c07b68e8 436 spin_lock(&ptype_lock);
1da177e4
LT
437
438 list_for_each_entry(pt1, head, list) {
439 if (pt == pt1) {
440 list_del_rcu(&pt->list);
441 goto out;
442 }
443 }
444
7b6cd1ce 445 pr_warn("dev_remove_pack: %p not found\n", pt);
1da177e4 446out:
c07b68e8 447 spin_unlock(&ptype_lock);
1da177e4 448}
d1b19dff
ED
449EXPORT_SYMBOL(__dev_remove_pack);
450
1da177e4
LT
451/**
452 * dev_remove_pack - remove packet handler
453 * @pt: packet type declaration
454 *
455 * Remove a protocol handler that was previously added to the kernel
456 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
457 * from the kernel lists and can be freed or reused once this function
458 * returns.
459 *
460 * This call sleeps to guarantee that no CPU is looking at the packet
461 * type after return.
462 */
463void dev_remove_pack(struct packet_type *pt)
464{
465 __dev_remove_pack(pt);
4ec93edb 466
1da177e4
LT
467 synchronize_net();
468}
d1b19dff 469EXPORT_SYMBOL(dev_remove_pack);
1da177e4 470
62532da9
VY
471
472/**
473 * dev_add_offload - register offload handlers
474 * @po: protocol offload declaration
475 *
476 * Add protocol offload handlers to the networking stack. The passed
477 * &proto_offload is linked into kernel lists and may not be freed until
478 * it has been removed from the kernel lists.
479 *
480 * This call does not sleep therefore it can not
481 * guarantee all CPU's that are in middle of receiving packets
482 * will see the new offload handlers (until the next received packet).
483 */
484void dev_add_offload(struct packet_offload *po)
485{
bdef7de4 486 struct packet_offload *elem;
62532da9
VY
487
488 spin_lock(&offload_lock);
bdef7de4
DM
489 list_for_each_entry(elem, &offload_base, list) {
490 if (po->priority < elem->priority)
491 break;
492 }
493 list_add_rcu(&po->list, elem->list.prev);
62532da9
VY
494 spin_unlock(&offload_lock);
495}
496EXPORT_SYMBOL(dev_add_offload);
497
498/**
499 * __dev_remove_offload - remove offload handler
500 * @po: packet offload declaration
501 *
502 * Remove a protocol offload handler that was previously added to the
503 * kernel offload handlers by dev_add_offload(). The passed &offload_type
504 * is removed from the kernel lists and can be freed or reused once this
505 * function returns.
506 *
507 * The packet type might still be in use by receivers
508 * and must not be freed until after all the CPU's have gone
509 * through a quiescent state.
510 */
1d143d9f 511static void __dev_remove_offload(struct packet_offload *po)
62532da9
VY
512{
513 struct list_head *head = &offload_base;
514 struct packet_offload *po1;
515
c53aa505 516 spin_lock(&offload_lock);
62532da9
VY
517
518 list_for_each_entry(po1, head, list) {
519 if (po == po1) {
520 list_del_rcu(&po->list);
521 goto out;
522 }
523 }
524
525 pr_warn("dev_remove_offload: %p not found\n", po);
526out:
c53aa505 527 spin_unlock(&offload_lock);
62532da9 528}
62532da9
VY
529
530/**
531 * dev_remove_offload - remove packet offload handler
532 * @po: packet offload declaration
533 *
534 * Remove a packet offload handler that was previously added to the kernel
535 * offload handlers by dev_add_offload(). The passed &offload_type is
536 * removed from the kernel lists and can be freed or reused once this
537 * function returns.
538 *
539 * This call sleeps to guarantee that no CPU is looking at the packet
540 * type after return.
541 */
542void dev_remove_offload(struct packet_offload *po)
543{
544 __dev_remove_offload(po);
545
546 synchronize_net();
547}
548EXPORT_SYMBOL(dev_remove_offload);
549
1da177e4 550/******************************************************************************
eb13da1a 551 *
552 * Device Boot-time Settings Routines
553 *
554 ******************************************************************************/
1da177e4
LT
555
556/* Boot time configuration table */
557static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
558
559/**
560 * netdev_boot_setup_add - add new setup entry
561 * @name: name of the device
562 * @map: configured settings for the device
563 *
564 * Adds new setup entry to the dev_boot_setup list. The function
565 * returns 0 on error and 1 on success. This is a generic routine to
566 * all netdevices.
567 */
568static int netdev_boot_setup_add(char *name, struct ifmap *map)
569{
570 struct netdev_boot_setup *s;
571 int i;
572
573 s = dev_boot_setup;
574 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
575 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
576 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 577 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
578 memcpy(&s[i].map, map, sizeof(s[i].map));
579 break;
580 }
581 }
582
583 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
584}
585
586/**
722c9a0c 587 * netdev_boot_setup_check - check boot time settings
588 * @dev: the netdevice
1da177e4 589 *
722c9a0c 590 * Check boot time settings for the device.
591 * The found settings are set for the device to be used
592 * later in the device probing.
593 * Returns 0 if no settings found, 1 if they are.
1da177e4
LT
594 */
595int netdev_boot_setup_check(struct net_device *dev)
596{
597 struct netdev_boot_setup *s = dev_boot_setup;
598 int i;
599
600 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
601 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 602 !strcmp(dev->name, s[i].name)) {
722c9a0c 603 dev->irq = s[i].map.irq;
604 dev->base_addr = s[i].map.base_addr;
605 dev->mem_start = s[i].map.mem_start;
606 dev->mem_end = s[i].map.mem_end;
1da177e4
LT
607 return 1;
608 }
609 }
610 return 0;
611}
d1b19dff 612EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
613
614
615/**
722c9a0c 616 * netdev_boot_base - get address from boot time settings
617 * @prefix: prefix for network device
618 * @unit: id for network device
619 *
620 * Check boot time settings for the base address of device.
621 * The found settings are set for the device to be used
622 * later in the device probing.
623 * Returns 0 if no settings found.
1da177e4
LT
624 */
625unsigned long netdev_boot_base(const char *prefix, int unit)
626{
627 const struct netdev_boot_setup *s = dev_boot_setup;
628 char name[IFNAMSIZ];
629 int i;
630
631 sprintf(name, "%s%d", prefix, unit);
632
633 /*
634 * If device already registered then return base of 1
635 * to indicate not to probe for this interface
636 */
881d966b 637 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
638 return 1;
639
640 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
641 if (!strcmp(name, s[i].name))
642 return s[i].map.base_addr;
643 return 0;
644}
645
646/*
647 * Saves at boot time configured settings for any netdevice.
648 */
649int __init netdev_boot_setup(char *str)
650{
651 int ints[5];
652 struct ifmap map;
653
654 str = get_options(str, ARRAY_SIZE(ints), ints);
655 if (!str || !*str)
656 return 0;
657
658 /* Save settings */
659 memset(&map, 0, sizeof(map));
660 if (ints[0] > 0)
661 map.irq = ints[1];
662 if (ints[0] > 1)
663 map.base_addr = ints[2];
664 if (ints[0] > 2)
665 map.mem_start = ints[3];
666 if (ints[0] > 3)
667 map.mem_end = ints[4];
668
669 /* Add new entry to the list */
670 return netdev_boot_setup_add(str, &map);
671}
672
673__setup("netdev=", netdev_boot_setup);
674
675/*******************************************************************************
eb13da1a 676 *
677 * Device Interface Subroutines
678 *
679 *******************************************************************************/
1da177e4 680
a54acb3a
ND
681/**
682 * dev_get_iflink - get 'iflink' value of a interface
683 * @dev: targeted interface
684 *
685 * Indicates the ifindex the interface is linked to.
686 * Physical interfaces have the same 'ifindex' and 'iflink' values.
687 */
688
689int dev_get_iflink(const struct net_device *dev)
690{
691 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
692 return dev->netdev_ops->ndo_get_iflink(dev);
693
7a66bbc9 694 return dev->ifindex;
a54acb3a
ND
695}
696EXPORT_SYMBOL(dev_get_iflink);
697
fc4099f1
PS
698/**
699 * dev_fill_metadata_dst - Retrieve tunnel egress information.
700 * @dev: targeted interface
701 * @skb: The packet.
702 *
703 * For better visibility of tunnel traffic OVS needs to retrieve
704 * egress tunnel information for a packet. Following API allows
705 * user to get this info.
706 */
707int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
708{
709 struct ip_tunnel_info *info;
710
711 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
712 return -EINVAL;
713
714 info = skb_tunnel_info_unclone(skb);
715 if (!info)
716 return -ENOMEM;
717 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
718 return -EINVAL;
719
720 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
721}
722EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
723
1da177e4
LT
724/**
725 * __dev_get_by_name - find a device by its name
c4ea43c5 726 * @net: the applicable net namespace
1da177e4
LT
727 * @name: name to find
728 *
729 * Find an interface by name. Must be called under RTNL semaphore
730 * or @dev_base_lock. If the name is found a pointer to the device
731 * is returned. If the name is not found then %NULL is returned. The
732 * reference counters are not incremented so the caller must be
733 * careful with locks.
734 */
735
881d966b 736struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4 737{
0bd8d536
ED
738 struct net_device *dev;
739 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 740
b67bfe0d 741 hlist_for_each_entry(dev, head, name_hlist)
1da177e4
LT
742 if (!strncmp(dev->name, name, IFNAMSIZ))
743 return dev;
0bd8d536 744
1da177e4
LT
745 return NULL;
746}
d1b19dff 747EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 748
72c9528b 749/**
722c9a0c 750 * dev_get_by_name_rcu - find a device by its name
751 * @net: the applicable net namespace
752 * @name: name to find
753 *
754 * Find an interface by name.
755 * If the name is found a pointer to the device is returned.
756 * If the name is not found then %NULL is returned.
757 * The reference counters are not incremented so the caller must be
758 * careful with locks. The caller must hold RCU lock.
72c9528b
ED
759 */
760
761struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
762{
72c9528b
ED
763 struct net_device *dev;
764 struct hlist_head *head = dev_name_hash(net, name);
765
b67bfe0d 766 hlist_for_each_entry_rcu(dev, head, name_hlist)
72c9528b
ED
767 if (!strncmp(dev->name, name, IFNAMSIZ))
768 return dev;
769
770 return NULL;
771}
772EXPORT_SYMBOL(dev_get_by_name_rcu);
773
1da177e4
LT
774/**
775 * dev_get_by_name - find a device by its name
c4ea43c5 776 * @net: the applicable net namespace
1da177e4
LT
777 * @name: name to find
778 *
779 * Find an interface by name. This can be called from any
780 * context and does its own locking. The returned handle has
781 * the usage count incremented and the caller must use dev_put() to
782 * release it when it is no longer needed. %NULL is returned if no
783 * matching device is found.
784 */
785
881d966b 786struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
787{
788 struct net_device *dev;
789
72c9528b
ED
790 rcu_read_lock();
791 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
792 if (dev)
793 dev_hold(dev);
72c9528b 794 rcu_read_unlock();
1da177e4
LT
795 return dev;
796}
d1b19dff 797EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
798
799/**
800 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 801 * @net: the applicable net namespace
1da177e4
LT
802 * @ifindex: index of device
803 *
804 * Search for an interface by index. Returns %NULL if the device
805 * is not found or a pointer to the device. The device has not
806 * had its reference counter increased so the caller must be careful
807 * about locking. The caller must hold either the RTNL semaphore
808 * or @dev_base_lock.
809 */
810
881d966b 811struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4 812{
0bd8d536
ED
813 struct net_device *dev;
814 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 815
b67bfe0d 816 hlist_for_each_entry(dev, head, index_hlist)
1da177e4
LT
817 if (dev->ifindex == ifindex)
818 return dev;
0bd8d536 819
1da177e4
LT
820 return NULL;
821}
d1b19dff 822EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 823
fb699dfd
ED
824/**
825 * dev_get_by_index_rcu - find a device by its ifindex
826 * @net: the applicable net namespace
827 * @ifindex: index of device
828 *
829 * Search for an interface by index. Returns %NULL if the device
830 * is not found or a pointer to the device. The device has not
831 * had its reference counter increased so the caller must be careful
832 * about locking. The caller must hold RCU lock.
833 */
834
835struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
836{
fb699dfd
ED
837 struct net_device *dev;
838 struct hlist_head *head = dev_index_hash(net, ifindex);
839
b67bfe0d 840 hlist_for_each_entry_rcu(dev, head, index_hlist)
fb699dfd
ED
841 if (dev->ifindex == ifindex)
842 return dev;
843
844 return NULL;
845}
846EXPORT_SYMBOL(dev_get_by_index_rcu);
847
1da177e4
LT
848
849/**
850 * dev_get_by_index - find a device by its ifindex
c4ea43c5 851 * @net: the applicable net namespace
1da177e4
LT
852 * @ifindex: index of device
853 *
854 * Search for an interface by index. Returns NULL if the device
855 * is not found or a pointer to the device. The device returned has
856 * had a reference added and the pointer is safe until the user calls
857 * dev_put to indicate they have finished with it.
858 */
859
881d966b 860struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
861{
862 struct net_device *dev;
863
fb699dfd
ED
864 rcu_read_lock();
865 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
866 if (dev)
867 dev_hold(dev);
fb699dfd 868 rcu_read_unlock();
1da177e4
LT
869 return dev;
870}
d1b19dff 871EXPORT_SYMBOL(dev_get_by_index);
1da177e4 872
90b602f8
ML
873/**
874 * dev_get_by_napi_id - find a device by napi_id
875 * @napi_id: ID of the NAPI struct
876 *
877 * Search for an interface by NAPI ID. Returns %NULL if the device
878 * is not found or a pointer to the device. The device has not had
879 * its reference counter increased so the caller must be careful
880 * about locking. The caller must hold RCU lock.
881 */
882
883struct net_device *dev_get_by_napi_id(unsigned int napi_id)
884{
885 struct napi_struct *napi;
886
887 WARN_ON_ONCE(!rcu_read_lock_held());
888
889 if (napi_id < MIN_NAPI_ID)
890 return NULL;
891
892 napi = napi_by_id(napi_id);
893
894 return napi ? napi->dev : NULL;
895}
896EXPORT_SYMBOL(dev_get_by_napi_id);
897
5dbe7c17
NS
898/**
899 * netdev_get_name - get a netdevice name, knowing its ifindex.
900 * @net: network namespace
901 * @name: a pointer to the buffer where the name will be stored.
902 * @ifindex: the ifindex of the interface to get the name from.
903 *
904 * The use of raw_seqcount_begin() and cond_resched() before
905 * retrying is required as we want to give the writers a chance
906 * to complete when CONFIG_PREEMPT is not set.
907 */
908int netdev_get_name(struct net *net, char *name, int ifindex)
909{
910 struct net_device *dev;
911 unsigned int seq;
912
913retry:
914 seq = raw_seqcount_begin(&devnet_rename_seq);
915 rcu_read_lock();
916 dev = dev_get_by_index_rcu(net, ifindex);
917 if (!dev) {
918 rcu_read_unlock();
919 return -ENODEV;
920 }
921
922 strcpy(name, dev->name);
923 rcu_read_unlock();
924 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
925 cond_resched();
926 goto retry;
927 }
928
929 return 0;
930}
931
1da177e4 932/**
941666c2 933 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 934 * @net: the applicable net namespace
1da177e4
LT
935 * @type: media type of device
936 * @ha: hardware address
937 *
938 * Search for an interface by MAC address. Returns NULL if the device
c506653d
ED
939 * is not found or a pointer to the device.
940 * The caller must hold RCU or RTNL.
941666c2 941 * The returned device has not had its ref count increased
1da177e4
LT
942 * and the caller must therefore be careful about locking
943 *
1da177e4
LT
944 */
945
941666c2
ED
946struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
947 const char *ha)
1da177e4
LT
948{
949 struct net_device *dev;
950
941666c2 951 for_each_netdev_rcu(net, dev)
1da177e4
LT
952 if (dev->type == type &&
953 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
954 return dev;
955
956 return NULL;
1da177e4 957}
941666c2 958EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 959
881d966b 960struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
961{
962 struct net_device *dev;
963
4e9cac2b 964 ASSERT_RTNL();
881d966b 965 for_each_netdev(net, dev)
4e9cac2b 966 if (dev->type == type)
7562f876
PE
967 return dev;
968
969 return NULL;
4e9cac2b 970}
4e9cac2b
PM
971EXPORT_SYMBOL(__dev_getfirstbyhwtype);
972
881d966b 973struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 974{
99fe3c39 975 struct net_device *dev, *ret = NULL;
4e9cac2b 976
99fe3c39
ED
977 rcu_read_lock();
978 for_each_netdev_rcu(net, dev)
979 if (dev->type == type) {
980 dev_hold(dev);
981 ret = dev;
982 break;
983 }
984 rcu_read_unlock();
985 return ret;
1da177e4 986}
1da177e4
LT
987EXPORT_SYMBOL(dev_getfirstbyhwtype);
988
989/**
6c555490 990 * __dev_get_by_flags - find any device with given flags
c4ea43c5 991 * @net: the applicable net namespace
1da177e4
LT
992 * @if_flags: IFF_* values
993 * @mask: bitmask of bits in if_flags to check
994 *
995 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04 996 * is not found or a pointer to the device. Must be called inside
6c555490 997 * rtnl_lock(), and result refcount is unchanged.
1da177e4
LT
998 */
999
6c555490
WC
1000struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1001 unsigned short mask)
1da177e4 1002{
7562f876 1003 struct net_device *dev, *ret;
1da177e4 1004
6c555490
WC
1005 ASSERT_RTNL();
1006
7562f876 1007 ret = NULL;
6c555490 1008 for_each_netdev(net, dev) {
1da177e4 1009 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 1010 ret = dev;
1da177e4
LT
1011 break;
1012 }
1013 }
7562f876 1014 return ret;
1da177e4 1015}
6c555490 1016EXPORT_SYMBOL(__dev_get_by_flags);
1da177e4
LT
1017
1018/**
1019 * dev_valid_name - check if name is okay for network device
1020 * @name: name string
1021 *
1022 * Network device names need to be valid file names to
c7fa9d18
DM
1023 * to allow sysfs to work. We also disallow any kind of
1024 * whitespace.
1da177e4 1025 */
95f050bf 1026bool dev_valid_name(const char *name)
1da177e4 1027{
c7fa9d18 1028 if (*name == '\0')
95f050bf 1029 return false;
b6fe17d6 1030 if (strlen(name) >= IFNAMSIZ)
95f050bf 1031 return false;
c7fa9d18 1032 if (!strcmp(name, ".") || !strcmp(name, ".."))
95f050bf 1033 return false;
c7fa9d18
DM
1034
1035 while (*name) {
a4176a93 1036 if (*name == '/' || *name == ':' || isspace(*name))
95f050bf 1037 return false;
c7fa9d18
DM
1038 name++;
1039 }
95f050bf 1040 return true;
1da177e4 1041}
d1b19dff 1042EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
1043
1044/**
b267b179
EB
1045 * __dev_alloc_name - allocate a name for a device
1046 * @net: network namespace to allocate the device name in
1da177e4 1047 * @name: name format string
b267b179 1048 * @buf: scratch buffer and result name string
1da177e4
LT
1049 *
1050 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
1051 * id. It scans list of devices to build up a free map, then chooses
1052 * the first empty slot. The caller must hold the dev_base or rtnl lock
1053 * while allocating the name and adding the device in order to avoid
1054 * duplicates.
1055 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1056 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
1057 */
1058
b267b179 1059static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
1060{
1061 int i = 0;
1da177e4
LT
1062 const char *p;
1063 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 1064 unsigned long *inuse;
1da177e4
LT
1065 struct net_device *d;
1066
93809105
RV
1067 if (!dev_valid_name(name))
1068 return -EINVAL;
1069
51f299dd 1070 p = strchr(name, '%');
1da177e4
LT
1071 if (p) {
1072 /*
1073 * Verify the string as this thing may have come from
1074 * the user. There must be either one "%d" and no other "%"
1075 * characters.
1076 */
1077 if (p[1] != 'd' || strchr(p + 2, '%'))
1078 return -EINVAL;
1079
1080 /* Use one page as a bit array of possible slots */
cfcabdcc 1081 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
1082 if (!inuse)
1083 return -ENOMEM;
1084
881d966b 1085 for_each_netdev(net, d) {
1da177e4
LT
1086 if (!sscanf(d->name, name, &i))
1087 continue;
1088 if (i < 0 || i >= max_netdevices)
1089 continue;
1090
1091 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 1092 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
1093 if (!strncmp(buf, d->name, IFNAMSIZ))
1094 set_bit(i, inuse);
1095 }
1096
1097 i = find_first_zero_bit(inuse, max_netdevices);
1098 free_page((unsigned long) inuse);
1099 }
1100
6224abda 1101 snprintf(buf, IFNAMSIZ, name, i);
b267b179 1102 if (!__dev_get_by_name(net, buf))
1da177e4 1103 return i;
1da177e4
LT
1104
1105 /* It is possible to run out of possible slots
1106 * when the name is long and there isn't enough space left
1107 * for the digits, or if all bits are used.
1108 */
029b6d14 1109 return -ENFILE;
1da177e4
LT
1110}
1111
2c88b855
RV
1112static int dev_alloc_name_ns(struct net *net,
1113 struct net_device *dev,
1114 const char *name)
1115{
1116 char buf[IFNAMSIZ];
1117 int ret;
1118
c46d7642 1119 BUG_ON(!net);
2c88b855
RV
1120 ret = __dev_alloc_name(net, name, buf);
1121 if (ret >= 0)
1122 strlcpy(dev->name, buf, IFNAMSIZ);
1123 return ret;
1da177e4
LT
1124}
1125
b267b179
EB
1126/**
1127 * dev_alloc_name - allocate a name for a device
1128 * @dev: device
1129 * @name: name format string
1130 *
1131 * Passed a format string - eg "lt%d" it will try and find a suitable
1132 * id. It scans list of devices to build up a free map, then chooses
1133 * the first empty slot. The caller must hold the dev_base or rtnl lock
1134 * while allocating the name and adding the device in order to avoid
1135 * duplicates.
1136 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1137 * Returns the number of the unit assigned or a negative errno code.
1138 */
1139
1140int dev_alloc_name(struct net_device *dev, const char *name)
1141{
c46d7642 1142 return dev_alloc_name_ns(dev_net(dev), dev, name);
b267b179 1143}
d1b19dff 1144EXPORT_SYMBOL(dev_alloc_name);
b267b179 1145
0ad646c8
CW
1146int dev_get_valid_name(struct net *net, struct net_device *dev,
1147 const char *name)
828de4f6 1148{
55a5ec9b
DM
1149 BUG_ON(!net);
1150
1151 if (!dev_valid_name(name))
1152 return -EINVAL;
1153
1154 if (strchr(name, '%'))
1155 return dev_alloc_name_ns(net, dev, name);
1156 else if (__dev_get_by_name(net, name))
1157 return -EEXIST;
1158 else if (dev->name != name)
1159 strlcpy(dev->name, name, IFNAMSIZ);
1160
1161 return 0;
d9031024 1162}
0ad646c8 1163EXPORT_SYMBOL(dev_get_valid_name);
1da177e4
LT
1164
1165/**
1166 * dev_change_name - change name of a device
1167 * @dev: device
1168 * @newname: name (or format string) must be at least IFNAMSIZ
1169 *
1170 * Change name of a device, can pass format strings "eth%d".
1171 * for wildcarding.
1172 */
cf04a4c7 1173int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 1174{
238fa362 1175 unsigned char old_assign_type;
fcc5a03a 1176 char oldname[IFNAMSIZ];
1da177e4 1177 int err = 0;
fcc5a03a 1178 int ret;
881d966b 1179 struct net *net;
1da177e4
LT
1180
1181 ASSERT_RTNL();
c346dca1 1182 BUG_ON(!dev_net(dev));
1da177e4 1183
c346dca1 1184 net = dev_net(dev);
1da177e4
LT
1185 if (dev->flags & IFF_UP)
1186 return -EBUSY;
1187
30e6c9fa 1188 write_seqcount_begin(&devnet_rename_seq);
c91f6df2
BH
1189
1190 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
30e6c9fa 1191 write_seqcount_end(&devnet_rename_seq);
c8d90dca 1192 return 0;
c91f6df2 1193 }
c8d90dca 1194
fcc5a03a
HX
1195 memcpy(oldname, dev->name, IFNAMSIZ);
1196
828de4f6 1197 err = dev_get_valid_name(net, dev, newname);
c91f6df2 1198 if (err < 0) {
30e6c9fa 1199 write_seqcount_end(&devnet_rename_seq);
d9031024 1200 return err;
c91f6df2 1201 }
1da177e4 1202
6fe82a39
VF
1203 if (oldname[0] && !strchr(oldname, '%'))
1204 netdev_info(dev, "renamed from %s\n", oldname);
1205
238fa362
TG
1206 old_assign_type = dev->name_assign_type;
1207 dev->name_assign_type = NET_NAME_RENAMED;
1208
fcc5a03a 1209rollback:
a1b3f594
EB
1210 ret = device_rename(&dev->dev, dev->name);
1211 if (ret) {
1212 memcpy(dev->name, oldname, IFNAMSIZ);
238fa362 1213 dev->name_assign_type = old_assign_type;
30e6c9fa 1214 write_seqcount_end(&devnet_rename_seq);
a1b3f594 1215 return ret;
dcc99773 1216 }
7f988eab 1217
30e6c9fa 1218 write_seqcount_end(&devnet_rename_seq);
c91f6df2 1219
5bb025fa
VF
1220 netdev_adjacent_rename_links(dev, oldname);
1221
7f988eab 1222 write_lock_bh(&dev_base_lock);
372b2312 1223 hlist_del_rcu(&dev->name_hlist);
72c9528b
ED
1224 write_unlock_bh(&dev_base_lock);
1225
1226 synchronize_rcu();
1227
1228 write_lock_bh(&dev_base_lock);
1229 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1230 write_unlock_bh(&dev_base_lock);
1231
056925ab 1232 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1233 ret = notifier_to_errno(ret);
1234
1235 if (ret) {
91e9c07b
ED
1236 /* err >= 0 after dev_alloc_name() or stores the first errno */
1237 if (err >= 0) {
fcc5a03a 1238 err = ret;
30e6c9fa 1239 write_seqcount_begin(&devnet_rename_seq);
fcc5a03a 1240 memcpy(dev->name, oldname, IFNAMSIZ);
5bb025fa 1241 memcpy(oldname, newname, IFNAMSIZ);
238fa362
TG
1242 dev->name_assign_type = old_assign_type;
1243 old_assign_type = NET_NAME_RENAMED;
fcc5a03a 1244 goto rollback;
91e9c07b 1245 } else {
7b6cd1ce 1246 pr_err("%s: name change rollback failed: %d\n",
91e9c07b 1247 dev->name, ret);
fcc5a03a
HX
1248 }
1249 }
1da177e4
LT
1250
1251 return err;
1252}
1253
0b815a1a
SH
1254/**
1255 * dev_set_alias - change ifalias of a device
1256 * @dev: device
1257 * @alias: name up to IFALIASZ
f0db275a 1258 * @len: limit of bytes to copy from info
0b815a1a
SH
1259 *
1260 * Set ifalias for a device,
1261 */
1262int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1263{
6c557001 1264 struct dev_ifalias *new_alias = NULL;
0b815a1a
SH
1265
1266 if (len >= IFALIASZ)
1267 return -EINVAL;
1268
6c557001
FW
1269 if (len) {
1270 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1271 if (!new_alias)
1272 return -ENOMEM;
1273
1274 memcpy(new_alias->ifalias, alias, len);
1275 new_alias->ifalias[len] = 0;
96ca4a2c
OH
1276 }
1277
6c557001
FW
1278 mutex_lock(&ifalias_mutex);
1279 rcu_swap_protected(dev->ifalias, new_alias,
1280 mutex_is_locked(&ifalias_mutex));
1281 mutex_unlock(&ifalias_mutex);
1282
1283 if (new_alias)
1284 kfree_rcu(new_alias, rcuhead);
0b815a1a 1285
0b815a1a
SH
1286 return len;
1287}
1288
6c557001
FW
1289/**
1290 * dev_get_alias - get ifalias of a device
1291 * @dev: device
20e88320 1292 * @name: buffer to store name of ifalias
6c557001
FW
1293 * @len: size of buffer
1294 *
1295 * get ifalias for a device. Caller must make sure dev cannot go
1296 * away, e.g. rcu read lock or own a reference count to device.
1297 */
1298int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1299{
1300 const struct dev_ifalias *alias;
1301 int ret = 0;
1302
1303 rcu_read_lock();
1304 alias = rcu_dereference(dev->ifalias);
1305 if (alias)
1306 ret = snprintf(name, len, "%s", alias->ifalias);
1307 rcu_read_unlock();
1308
1309 return ret;
1310}
0b815a1a 1311
d8a33ac4 1312/**
3041a069 1313 * netdev_features_change - device changes features
d8a33ac4
SH
1314 * @dev: device to cause notification
1315 *
1316 * Called to indicate a device has changed features.
1317 */
1318void netdev_features_change(struct net_device *dev)
1319{
056925ab 1320 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1321}
1322EXPORT_SYMBOL(netdev_features_change);
1323
1da177e4
LT
1324/**
1325 * netdev_state_change - device changes state
1326 * @dev: device to cause notification
1327 *
1328 * Called to indicate a device has changed state. This function calls
1329 * the notifier chains for netdev_chain and sends a NEWLINK message
1330 * to the routing socket.
1331 */
1332void netdev_state_change(struct net_device *dev)
1333{
1334 if (dev->flags & IFF_UP) {
51d0c047
DA
1335 struct netdev_notifier_change_info change_info = {
1336 .info.dev = dev,
1337 };
54951194 1338
51d0c047 1339 call_netdevice_notifiers_info(NETDEV_CHANGE,
54951194 1340 &change_info.info);
7f294054 1341 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1da177e4
LT
1342 }
1343}
d1b19dff 1344EXPORT_SYMBOL(netdev_state_change);
1da177e4 1345
ee89bab1 1346/**
722c9a0c 1347 * netdev_notify_peers - notify network peers about existence of @dev
1348 * @dev: network device
ee89bab1
AW
1349 *
1350 * Generate traffic such that interested network peers are aware of
1351 * @dev, such as by generating a gratuitous ARP. This may be used when
1352 * a device wants to inform the rest of the network about some sort of
1353 * reconfiguration such as a failover event or virtual machine
1354 * migration.
1355 */
1356void netdev_notify_peers(struct net_device *dev)
c1da4ac7 1357{
ee89bab1
AW
1358 rtnl_lock();
1359 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
37c343b4 1360 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
ee89bab1 1361 rtnl_unlock();
c1da4ac7 1362}
ee89bab1 1363EXPORT_SYMBOL(netdev_notify_peers);
c1da4ac7 1364
bd380811 1365static int __dev_open(struct net_device *dev)
1da177e4 1366{
d314774c 1367 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1368 int ret;
1da177e4 1369
e46b66bc
BH
1370 ASSERT_RTNL();
1371
1da177e4
LT
1372 if (!netif_device_present(dev))
1373 return -ENODEV;
1374
ca99ca14
NH
1375 /* Block netpoll from trying to do any rx path servicing.
1376 * If we don't do this there is a chance ndo_poll_controller
1377 * or ndo_poll may be running while we open the device
1378 */
66b5552f 1379 netpoll_poll_disable(dev);
ca99ca14 1380
3b8bcfd5
JB
1381 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1382 ret = notifier_to_errno(ret);
1383 if (ret)
1384 return ret;
1385
1da177e4 1386 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1387
d314774c
SH
1388 if (ops->ndo_validate_addr)
1389 ret = ops->ndo_validate_addr(dev);
bada339b 1390
d314774c
SH
1391 if (!ret && ops->ndo_open)
1392 ret = ops->ndo_open(dev);
1da177e4 1393
66b5552f 1394 netpoll_poll_enable(dev);
ca99ca14 1395
bada339b
JG
1396 if (ret)
1397 clear_bit(__LINK_STATE_START, &dev->state);
1398 else {
1da177e4 1399 dev->flags |= IFF_UP;
4417da66 1400 dev_set_rx_mode(dev);
1da177e4 1401 dev_activate(dev);
7bf23575 1402 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 1403 }
bada339b 1404
1da177e4
LT
1405 return ret;
1406}
1407
1408/**
bd380811
PM
1409 * dev_open - prepare an interface for use.
1410 * @dev: device to open
1da177e4 1411 *
bd380811
PM
1412 * Takes a device from down to up state. The device's private open
1413 * function is invoked and then the multicast lists are loaded. Finally
1414 * the device is moved into the up state and a %NETDEV_UP message is
1415 * sent to the netdev notifier chain.
1416 *
1417 * Calling this function on an active interface is a nop. On a failure
1418 * a negative errno code is returned.
1da177e4 1419 */
bd380811
PM
1420int dev_open(struct net_device *dev)
1421{
1422 int ret;
1423
bd380811
PM
1424 if (dev->flags & IFF_UP)
1425 return 0;
1426
bd380811
PM
1427 ret = __dev_open(dev);
1428 if (ret < 0)
1429 return ret;
1430
7f294054 1431 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
bd380811
PM
1432 call_netdevice_notifiers(NETDEV_UP, dev);
1433
1434 return ret;
1435}
1436EXPORT_SYMBOL(dev_open);
1437
7051b88a 1438static void __dev_close_many(struct list_head *head)
1da177e4 1439{
44345724 1440 struct net_device *dev;
e46b66bc 1441
bd380811 1442 ASSERT_RTNL();
9d5010db
DM
1443 might_sleep();
1444
5cde2829 1445 list_for_each_entry(dev, head, close_list) {
3f4df206 1446 /* Temporarily disable netpoll until the interface is down */
66b5552f 1447 netpoll_poll_disable(dev);
3f4df206 1448
44345724 1449 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1450
44345724 1451 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1452
44345724
OP
1453 /* Synchronize to scheduled poll. We cannot touch poll list, it
1454 * can be even on different cpu. So just clear netif_running().
1455 *
1456 * dev->stop() will invoke napi_disable() on all of it's
1457 * napi_struct instances on this device.
1458 */
4e857c58 1459 smp_mb__after_atomic(); /* Commit netif_running(). */
44345724 1460 }
1da177e4 1461
44345724 1462 dev_deactivate_many(head);
d8b2a4d2 1463
5cde2829 1464 list_for_each_entry(dev, head, close_list) {
44345724 1465 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1466
44345724
OP
1467 /*
1468 * Call the device specific close. This cannot fail.
1469 * Only if device is UP
1470 *
1471 * We allow it to be called even after a DETACH hot-plug
1472 * event.
1473 */
1474 if (ops->ndo_stop)
1475 ops->ndo_stop(dev);
1476
44345724 1477 dev->flags &= ~IFF_UP;
66b5552f 1478 netpoll_poll_enable(dev);
44345724 1479 }
44345724
OP
1480}
1481
7051b88a 1482static void __dev_close(struct net_device *dev)
44345724
OP
1483{
1484 LIST_HEAD(single);
1485
5cde2829 1486 list_add(&dev->close_list, &single);
7051b88a 1487 __dev_close_many(&single);
f87e6f47 1488 list_del(&single);
44345724
OP
1489}
1490
7051b88a 1491void dev_close_many(struct list_head *head, bool unlink)
44345724
OP
1492{
1493 struct net_device *dev, *tmp;
1da177e4 1494
5cde2829
EB
1495 /* Remove the devices that don't need to be closed */
1496 list_for_each_entry_safe(dev, tmp, head, close_list)
44345724 1497 if (!(dev->flags & IFF_UP))
5cde2829 1498 list_del_init(&dev->close_list);
44345724
OP
1499
1500 __dev_close_many(head);
1da177e4 1501
5cde2829 1502 list_for_each_entry_safe(dev, tmp, head, close_list) {
7f294054 1503 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
44345724 1504 call_netdevice_notifiers(NETDEV_DOWN, dev);
99c4a26a
DM
1505 if (unlink)
1506 list_del_init(&dev->close_list);
44345724 1507 }
bd380811 1508}
99c4a26a 1509EXPORT_SYMBOL(dev_close_many);
bd380811
PM
1510
1511/**
1512 * dev_close - shutdown an interface.
1513 * @dev: device to shutdown
1514 *
1515 * This function moves an active device into down state. A
1516 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1517 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1518 * chain.
1519 */
7051b88a 1520void dev_close(struct net_device *dev)
bd380811 1521{
e14a5993
ED
1522 if (dev->flags & IFF_UP) {
1523 LIST_HEAD(single);
1da177e4 1524
5cde2829 1525 list_add(&dev->close_list, &single);
99c4a26a 1526 dev_close_many(&single, true);
e14a5993
ED
1527 list_del(&single);
1528 }
1da177e4 1529}
d1b19dff 1530EXPORT_SYMBOL(dev_close);
1da177e4
LT
1531
1532
0187bdfb
BH
1533/**
1534 * dev_disable_lro - disable Large Receive Offload on a device
1535 * @dev: device
1536 *
1537 * Disable Large Receive Offload (LRO) on a net device. Must be
1538 * called under RTNL. This is needed if received packets may be
1539 * forwarded to another interface.
1540 */
1541void dev_disable_lro(struct net_device *dev)
1542{
fbe168ba
MK
1543 struct net_device *lower_dev;
1544 struct list_head *iter;
529d0489 1545
bc5787c6
MM
1546 dev->wanted_features &= ~NETIF_F_LRO;
1547 netdev_update_features(dev);
27660515 1548
22d5969f
MM
1549 if (unlikely(dev->features & NETIF_F_LRO))
1550 netdev_WARN(dev, "failed to disable LRO!\n");
fbe168ba
MK
1551
1552 netdev_for_each_lower_dev(dev, lower_dev, iter)
1553 dev_disable_lro(lower_dev);
0187bdfb
BH
1554}
1555EXPORT_SYMBOL(dev_disable_lro);
1556
351638e7
JP
1557static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1558 struct net_device *dev)
1559{
51d0c047
DA
1560 struct netdev_notifier_info info = {
1561 .dev = dev,
1562 };
351638e7 1563
351638e7
JP
1564 return nb->notifier_call(nb, val, &info);
1565}
0187bdfb 1566
881d966b
EB
1567static int dev_boot_phase = 1;
1568
1da177e4 1569/**
722c9a0c 1570 * register_netdevice_notifier - register a network notifier block
1571 * @nb: notifier
1da177e4 1572 *
722c9a0c 1573 * Register a notifier to be called when network device events occur.
1574 * The notifier passed is linked into the kernel structures and must
1575 * not be reused until it has been unregistered. A negative errno code
1576 * is returned on a failure.
1da177e4 1577 *
722c9a0c 1578 * When registered all registration and up events are replayed
1579 * to the new notifier to allow device to have a race free
1580 * view of the network device list.
1da177e4
LT
1581 */
1582
1583int register_netdevice_notifier(struct notifier_block *nb)
1584{
1585 struct net_device *dev;
fcc5a03a 1586 struct net_device *last;
881d966b 1587 struct net *net;
1da177e4
LT
1588 int err;
1589
1590 rtnl_lock();
f07d5b94 1591 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1592 if (err)
1593 goto unlock;
881d966b
EB
1594 if (dev_boot_phase)
1595 goto unlock;
1596 for_each_net(net) {
1597 for_each_netdev(net, dev) {
351638e7 1598 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
881d966b
EB
1599 err = notifier_to_errno(err);
1600 if (err)
1601 goto rollback;
1602
1603 if (!(dev->flags & IFF_UP))
1604 continue;
1da177e4 1605
351638e7 1606 call_netdevice_notifier(nb, NETDEV_UP, dev);
881d966b 1607 }
1da177e4 1608 }
fcc5a03a
HX
1609
1610unlock:
1da177e4
LT
1611 rtnl_unlock();
1612 return err;
fcc5a03a
HX
1613
1614rollback:
1615 last = dev;
881d966b
EB
1616 for_each_net(net) {
1617 for_each_netdev(net, dev) {
1618 if (dev == last)
8f891489 1619 goto outroll;
fcc5a03a 1620
881d966b 1621 if (dev->flags & IFF_UP) {
351638e7
JP
1622 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1623 dev);
1624 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
881d966b 1625 }
351638e7 1626 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
fcc5a03a 1627 }
fcc5a03a 1628 }
c67625a1 1629
8f891489 1630outroll:
c67625a1 1631 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1632 goto unlock;
1da177e4 1633}
d1b19dff 1634EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1635
1636/**
722c9a0c 1637 * unregister_netdevice_notifier - unregister a network notifier block
1638 * @nb: notifier
1da177e4 1639 *
722c9a0c 1640 * Unregister a notifier previously registered by
1641 * register_netdevice_notifier(). The notifier is unlinked into the
1642 * kernel structures and may then be reused. A negative errno code
1643 * is returned on a failure.
7d3d43da 1644 *
722c9a0c 1645 * After unregistering unregister and down device events are synthesized
1646 * for all devices on the device list to the removed notifier to remove
1647 * the need for special case cleanup code.
1da177e4
LT
1648 */
1649
1650int unregister_netdevice_notifier(struct notifier_block *nb)
1651{
7d3d43da
EB
1652 struct net_device *dev;
1653 struct net *net;
9f514950
HX
1654 int err;
1655
1656 rtnl_lock();
f07d5b94 1657 err = raw_notifier_chain_unregister(&netdev_chain, nb);
7d3d43da
EB
1658 if (err)
1659 goto unlock;
1660
1661 for_each_net(net) {
1662 for_each_netdev(net, dev) {
1663 if (dev->flags & IFF_UP) {
351638e7
JP
1664 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1665 dev);
1666 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
7d3d43da 1667 }
351638e7 1668 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
7d3d43da
EB
1669 }
1670 }
1671unlock:
9f514950
HX
1672 rtnl_unlock();
1673 return err;
1da177e4 1674}
d1b19dff 1675EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4 1676
351638e7
JP
1677/**
1678 * call_netdevice_notifiers_info - call all network notifier blocks
1679 * @val: value passed unmodified to notifier function
1680 * @dev: net_device pointer passed unmodified to notifier function
1681 * @info: notifier information data
1682 *
1683 * Call all network notifier blocks. Parameters and return value
1684 * are as for raw_notifier_call_chain().
1685 */
1686
1d143d9f 1687static int call_netdevice_notifiers_info(unsigned long val,
1d143d9f 1688 struct netdev_notifier_info *info)
351638e7
JP
1689{
1690 ASSERT_RTNL();
351638e7
JP
1691 return raw_notifier_call_chain(&netdev_chain, val, info);
1692}
351638e7 1693
1da177e4
LT
1694/**
1695 * call_netdevice_notifiers - call all network notifier blocks
1696 * @val: value passed unmodified to notifier function
c4ea43c5 1697 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1698 *
1699 * Call all network notifier blocks. Parameters and return value
f07d5b94 1700 * are as for raw_notifier_call_chain().
1da177e4
LT
1701 */
1702
ad7379d4 1703int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1704{
51d0c047
DA
1705 struct netdev_notifier_info info = {
1706 .dev = dev,
1707 };
351638e7 1708
51d0c047 1709 return call_netdevice_notifiers_info(val, &info);
1da177e4 1710}
edf947f1 1711EXPORT_SYMBOL(call_netdevice_notifiers);
1da177e4 1712
1cf51900 1713#ifdef CONFIG_NET_INGRESS
4577139b
DB
1714static struct static_key ingress_needed __read_mostly;
1715
1716void net_inc_ingress_queue(void)
1717{
1718 static_key_slow_inc(&ingress_needed);
1719}
1720EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1721
1722void net_dec_ingress_queue(void)
1723{
1724 static_key_slow_dec(&ingress_needed);
1725}
1726EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1727#endif
1728
1f211a1b
DB
1729#ifdef CONFIG_NET_EGRESS
1730static struct static_key egress_needed __read_mostly;
1731
1732void net_inc_egress_queue(void)
1733{
1734 static_key_slow_inc(&egress_needed);
1735}
1736EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1737
1738void net_dec_egress_queue(void)
1739{
1740 static_key_slow_dec(&egress_needed);
1741}
1742EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1743#endif
1744
c5905afb 1745static struct static_key netstamp_needed __read_mostly;
b90e5794 1746#ifdef HAVE_JUMP_LABEL
b90e5794 1747static atomic_t netstamp_needed_deferred;
13baa00a 1748static atomic_t netstamp_wanted;
5fa8bbda 1749static void netstamp_clear(struct work_struct *work)
1da177e4 1750{
b90e5794 1751 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
13baa00a 1752 int wanted;
b90e5794 1753
13baa00a
ED
1754 wanted = atomic_add_return(deferred, &netstamp_wanted);
1755 if (wanted > 0)
1756 static_key_enable(&netstamp_needed);
1757 else
1758 static_key_disable(&netstamp_needed);
5fa8bbda
ED
1759}
1760static DECLARE_WORK(netstamp_work, netstamp_clear);
b90e5794 1761#endif
5fa8bbda
ED
1762
1763void net_enable_timestamp(void)
1764{
13baa00a
ED
1765#ifdef HAVE_JUMP_LABEL
1766 int wanted;
1767
1768 while (1) {
1769 wanted = atomic_read(&netstamp_wanted);
1770 if (wanted <= 0)
1771 break;
1772 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1773 return;
1774 }
1775 atomic_inc(&netstamp_needed_deferred);
1776 schedule_work(&netstamp_work);
1777#else
c5905afb 1778 static_key_slow_inc(&netstamp_needed);
13baa00a 1779#endif
1da177e4 1780}
d1b19dff 1781EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1782
1783void net_disable_timestamp(void)
1784{
b90e5794 1785#ifdef HAVE_JUMP_LABEL
13baa00a
ED
1786 int wanted;
1787
1788 while (1) {
1789 wanted = atomic_read(&netstamp_wanted);
1790 if (wanted <= 1)
1791 break;
1792 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1793 return;
1794 }
1795 atomic_dec(&netstamp_needed_deferred);
5fa8bbda
ED
1796 schedule_work(&netstamp_work);
1797#else
c5905afb 1798 static_key_slow_dec(&netstamp_needed);
5fa8bbda 1799#endif
1da177e4 1800}
d1b19dff 1801EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1802
3b098e2d 1803static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4 1804{
2456e855 1805 skb->tstamp = 0;
c5905afb 1806 if (static_key_false(&netstamp_needed))
a61bbcf2 1807 __net_timestamp(skb);
1da177e4
LT
1808}
1809
588f0330 1810#define net_timestamp_check(COND, SKB) \
c5905afb 1811 if (static_key_false(&netstamp_needed)) { \
2456e855 1812 if ((COND) && !(SKB)->tstamp) \
588f0330
ED
1813 __net_timestamp(SKB); \
1814 } \
3b098e2d 1815
f4b05d27 1816bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
79b569f0
DL
1817{
1818 unsigned int len;
1819
1820 if (!(dev->flags & IFF_UP))
1821 return false;
1822
1823 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1824 if (skb->len <= len)
1825 return true;
1826
1827 /* if TSO is enabled, we don't care about the length as the packet
1828 * could be forwarded without being segmented before
1829 */
1830 if (skb_is_gso(skb))
1831 return true;
1832
1833 return false;
1834}
1ee481fb 1835EXPORT_SYMBOL_GPL(is_skb_forwardable);
79b569f0 1836
a0265d28
HX
1837int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1838{
4e3264d2 1839 int ret = ____dev_forward_skb(dev, skb);
a0265d28 1840
4e3264d2
MKL
1841 if (likely(!ret)) {
1842 skb->protocol = eth_type_trans(skb, dev);
1843 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1844 }
a0265d28 1845
4e3264d2 1846 return ret;
a0265d28
HX
1847}
1848EXPORT_SYMBOL_GPL(__dev_forward_skb);
1849
44540960
AB
1850/**
1851 * dev_forward_skb - loopback an skb to another netif
1852 *
1853 * @dev: destination network device
1854 * @skb: buffer to forward
1855 *
1856 * return values:
1857 * NET_RX_SUCCESS (no congestion)
6ec82562 1858 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1859 *
1860 * dev_forward_skb can be used for injecting an skb from the
1861 * start_xmit function of one device into the receive queue
1862 * of another device.
1863 *
1864 * The receiving device may be in another namespace, so
1865 * we have to clear all information in the skb that could
1866 * impact namespace isolation.
1867 */
1868int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1869{
a0265d28 1870 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
44540960
AB
1871}
1872EXPORT_SYMBOL_GPL(dev_forward_skb);
1873
71d9dec2
CG
1874static inline int deliver_skb(struct sk_buff *skb,
1875 struct packet_type *pt_prev,
1876 struct net_device *orig_dev)
1877{
1f8b977a 1878 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1080e512 1879 return -ENOMEM;
63354797 1880 refcount_inc(&skb->users);
71d9dec2
CG
1881 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1882}
1883
7866a621
SN
1884static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1885 struct packet_type **pt,
fbcb2170
JP
1886 struct net_device *orig_dev,
1887 __be16 type,
7866a621
SN
1888 struct list_head *ptype_list)
1889{
1890 struct packet_type *ptype, *pt_prev = *pt;
1891
1892 list_for_each_entry_rcu(ptype, ptype_list, list) {
1893 if (ptype->type != type)
1894 continue;
1895 if (pt_prev)
fbcb2170 1896 deliver_skb(skb, pt_prev, orig_dev);
7866a621
SN
1897 pt_prev = ptype;
1898 }
1899 *pt = pt_prev;
1900}
1901
c0de08d0
EL
1902static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1903{
a3d744e9 1904 if (!ptype->af_packet_priv || !skb->sk)
c0de08d0
EL
1905 return false;
1906
1907 if (ptype->id_match)
1908 return ptype->id_match(ptype, skb->sk);
1909 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1910 return true;
1911
1912 return false;
1913}
1914
1da177e4
LT
1915/*
1916 * Support routine. Sends outgoing frames to any network
1917 * taps currently in use.
1918 */
1919
74b20582 1920void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1921{
1922 struct packet_type *ptype;
71d9dec2
CG
1923 struct sk_buff *skb2 = NULL;
1924 struct packet_type *pt_prev = NULL;
7866a621 1925 struct list_head *ptype_list = &ptype_all;
a61bbcf2 1926
1da177e4 1927 rcu_read_lock();
7866a621
SN
1928again:
1929 list_for_each_entry_rcu(ptype, ptype_list, list) {
1da177e4
LT
1930 /* Never send packets back to the socket
1931 * they originated from - MvS (miquels@drinkel.ow.org)
1932 */
7866a621
SN
1933 if (skb_loop_sk(ptype, skb))
1934 continue;
71d9dec2 1935
7866a621
SN
1936 if (pt_prev) {
1937 deliver_skb(skb2, pt_prev, skb->dev);
1938 pt_prev = ptype;
1939 continue;
1940 }
1da177e4 1941
7866a621
SN
1942 /* need to clone skb, done only once */
1943 skb2 = skb_clone(skb, GFP_ATOMIC);
1944 if (!skb2)
1945 goto out_unlock;
70978182 1946
7866a621 1947 net_timestamp_set(skb2);
1da177e4 1948
7866a621
SN
1949 /* skb->nh should be correctly
1950 * set by sender, so that the second statement is
1951 * just protection against buggy protocols.
1952 */
1953 skb_reset_mac_header(skb2);
1954
1955 if (skb_network_header(skb2) < skb2->data ||
1956 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1957 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1958 ntohs(skb2->protocol),
1959 dev->name);
1960 skb_reset_network_header(skb2);
1da177e4 1961 }
7866a621
SN
1962
1963 skb2->transport_header = skb2->network_header;
1964 skb2->pkt_type = PACKET_OUTGOING;
1965 pt_prev = ptype;
1966 }
1967
1968 if (ptype_list == &ptype_all) {
1969 ptype_list = &dev->ptype_all;
1970 goto again;
1da177e4 1971 }
7866a621 1972out_unlock:
581fe0ea
WB
1973 if (pt_prev) {
1974 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
1975 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1976 else
1977 kfree_skb(skb2);
1978 }
1da177e4
LT
1979 rcu_read_unlock();
1980}
74b20582 1981EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
1da177e4 1982
2c53040f
BH
1983/**
1984 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
4f57c087
JF
1985 * @dev: Network device
1986 * @txq: number of queues available
1987 *
1988 * If real_num_tx_queues is changed the tc mappings may no longer be
1989 * valid. To resolve this verify the tc mapping remains valid and if
1990 * not NULL the mapping. With no priorities mapping to this
1991 * offset/count pair it will no longer be used. In the worst case TC0
1992 * is invalid nothing can be done so disable priority mappings. If is
1993 * expected that drivers will fix this mapping if they can before
1994 * calling netif_set_real_num_tx_queues.
1995 */
bb134d22 1996static void netif_setup_tc(struct net_device *dev, unsigned int txq)
4f57c087
JF
1997{
1998 int i;
1999 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2000
2001 /* If TC0 is invalidated disable TC mapping */
2002 if (tc->offset + tc->count > txq) {
7b6cd1ce 2003 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
4f57c087
JF
2004 dev->num_tc = 0;
2005 return;
2006 }
2007
2008 /* Invalidated prio to tc mappings set to TC0 */
2009 for (i = 1; i < TC_BITMASK + 1; i++) {
2010 int q = netdev_get_prio_tc_map(dev, i);
2011
2012 tc = &dev->tc_to_txq[q];
2013 if (tc->offset + tc->count > txq) {
7b6cd1ce
JP
2014 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2015 i, q);
4f57c087
JF
2016 netdev_set_prio_tc_map(dev, i, 0);
2017 }
2018 }
2019}
2020
8d059b0f
AD
2021int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2022{
2023 if (dev->num_tc) {
2024 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2025 int i;
2026
2027 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2028 if ((txq - tc->offset) < tc->count)
2029 return i;
2030 }
2031
2032 return -1;
2033 }
2034
2035 return 0;
2036}
8a5f2166 2037EXPORT_SYMBOL(netdev_txq_to_tc);
8d059b0f 2038
537c00de
AD
2039#ifdef CONFIG_XPS
2040static DEFINE_MUTEX(xps_map_mutex);
2041#define xmap_dereference(P) \
2042 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2043
6234f874
AD
2044static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2045 int tci, u16 index)
537c00de 2046{
10cdc3f3
AD
2047 struct xps_map *map = NULL;
2048 int pos;
537c00de 2049
10cdc3f3 2050 if (dev_maps)
6234f874
AD
2051 map = xmap_dereference(dev_maps->cpu_map[tci]);
2052 if (!map)
2053 return false;
537c00de 2054
6234f874
AD
2055 for (pos = map->len; pos--;) {
2056 if (map->queues[pos] != index)
2057 continue;
2058
2059 if (map->len > 1) {
2060 map->queues[pos] = map->queues[--map->len];
10cdc3f3 2061 break;
537c00de 2062 }
6234f874
AD
2063
2064 RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
2065 kfree_rcu(map, rcu);
2066 return false;
537c00de
AD
2067 }
2068
6234f874 2069 return true;
10cdc3f3
AD
2070}
2071
6234f874
AD
2072static bool remove_xps_queue_cpu(struct net_device *dev,
2073 struct xps_dev_maps *dev_maps,
2074 int cpu, u16 offset, u16 count)
2075{
184c449f
AD
2076 int num_tc = dev->num_tc ? : 1;
2077 bool active = false;
2078 int tci;
6234f874 2079
184c449f
AD
2080 for (tci = cpu * num_tc; num_tc--; tci++) {
2081 int i, j;
2082
2083 for (i = count, j = offset; i--; j++) {
2084 if (!remove_xps_queue(dev_maps, cpu, j))
2085 break;
2086 }
2087
2088 active |= i < 0;
6234f874
AD
2089 }
2090
184c449f 2091 return active;
6234f874
AD
2092}
2093
2094static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2095 u16 count)
10cdc3f3
AD
2096{
2097 struct xps_dev_maps *dev_maps;
024e9679 2098 int cpu, i;
10cdc3f3
AD
2099 bool active = false;
2100
2101 mutex_lock(&xps_map_mutex);
2102 dev_maps = xmap_dereference(dev->xps_maps);
2103
2104 if (!dev_maps)
2105 goto out_no_maps;
2106
6234f874
AD
2107 for_each_possible_cpu(cpu)
2108 active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
2109 offset, count);
10cdc3f3
AD
2110
2111 if (!active) {
537c00de
AD
2112 RCU_INIT_POINTER(dev->xps_maps, NULL);
2113 kfree_rcu(dev_maps, rcu);
2114 }
2115
6234f874 2116 for (i = offset + (count - 1); count--; i--)
024e9679
AD
2117 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2118 NUMA_NO_NODE);
2119
537c00de
AD
2120out_no_maps:
2121 mutex_unlock(&xps_map_mutex);
2122}
2123
6234f874
AD
2124static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2125{
2126 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2127}
2128
01c5f864
AD
2129static struct xps_map *expand_xps_map(struct xps_map *map,
2130 int cpu, u16 index)
2131{
2132 struct xps_map *new_map;
2133 int alloc_len = XPS_MIN_MAP_ALLOC;
2134 int i, pos;
2135
2136 for (pos = 0; map && pos < map->len; pos++) {
2137 if (map->queues[pos] != index)
2138 continue;
2139 return map;
2140 }
2141
2142 /* Need to add queue to this CPU's existing map */
2143 if (map) {
2144 if (pos < map->alloc_len)
2145 return map;
2146
2147 alloc_len = map->alloc_len * 2;
2148 }
2149
2150 /* Need to allocate new map to store queue on this CPU's map */
2151 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2152 cpu_to_node(cpu));
2153 if (!new_map)
2154 return NULL;
2155
2156 for (i = 0; i < pos; i++)
2157 new_map->queues[i] = map->queues[i];
2158 new_map->alloc_len = alloc_len;
2159 new_map->len = pos;
2160
2161 return new_map;
2162}
2163
3573540c
MT
2164int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2165 u16 index)
537c00de 2166{
01c5f864 2167 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
184c449f
AD
2168 int i, cpu, tci, numa_node_id = -2;
2169 int maps_sz, num_tc = 1, tc = 0;
537c00de 2170 struct xps_map *map, *new_map;
01c5f864 2171 bool active = false;
537c00de 2172
184c449f
AD
2173 if (dev->num_tc) {
2174 num_tc = dev->num_tc;
2175 tc = netdev_txq_to_tc(dev, index);
2176 if (tc < 0)
2177 return -EINVAL;
2178 }
2179
2180 maps_sz = XPS_DEV_MAPS_SIZE(num_tc);
2181 if (maps_sz < L1_CACHE_BYTES)
2182 maps_sz = L1_CACHE_BYTES;
2183
537c00de
AD
2184 mutex_lock(&xps_map_mutex);
2185
2186 dev_maps = xmap_dereference(dev->xps_maps);
2187
01c5f864 2188 /* allocate memory for queue storage */
184c449f 2189 for_each_cpu_and(cpu, cpu_online_mask, mask) {
01c5f864
AD
2190 if (!new_dev_maps)
2191 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2bb60cb9
AD
2192 if (!new_dev_maps) {
2193 mutex_unlock(&xps_map_mutex);
01c5f864 2194 return -ENOMEM;
2bb60cb9 2195 }
01c5f864 2196
184c449f
AD
2197 tci = cpu * num_tc + tc;
2198 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) :
01c5f864
AD
2199 NULL;
2200
2201 map = expand_xps_map(map, cpu, index);
2202 if (!map)
2203 goto error;
2204
184c449f 2205 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
01c5f864
AD
2206 }
2207
2208 if (!new_dev_maps)
2209 goto out_no_new_maps;
2210
537c00de 2211 for_each_possible_cpu(cpu) {
184c449f
AD
2212 /* copy maps belonging to foreign traffic classes */
2213 for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) {
2214 /* fill in the new device map from the old device map */
2215 map = xmap_dereference(dev_maps->cpu_map[tci]);
2216 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2217 }
2218
2219 /* We need to explicitly update tci as prevous loop
2220 * could break out early if dev_maps is NULL.
2221 */
2222 tci = cpu * num_tc + tc;
2223
01c5f864
AD
2224 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2225 /* add queue to CPU maps */
2226 int pos = 0;
2227
184c449f 2228 map = xmap_dereference(new_dev_maps->cpu_map[tci]);
01c5f864
AD
2229 while ((pos < map->len) && (map->queues[pos] != index))
2230 pos++;
2231
2232 if (pos == map->len)
2233 map->queues[map->len++] = index;
537c00de 2234#ifdef CONFIG_NUMA
537c00de
AD
2235 if (numa_node_id == -2)
2236 numa_node_id = cpu_to_node(cpu);
2237 else if (numa_node_id != cpu_to_node(cpu))
2238 numa_node_id = -1;
537c00de 2239#endif
01c5f864
AD
2240 } else if (dev_maps) {
2241 /* fill in the new device map from the old device map */
184c449f
AD
2242 map = xmap_dereference(dev_maps->cpu_map[tci]);
2243 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
537c00de 2244 }
01c5f864 2245
184c449f
AD
2246 /* copy maps belonging to foreign traffic classes */
2247 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2248 /* fill in the new device map from the old device map */
2249 map = xmap_dereference(dev_maps->cpu_map[tci]);
2250 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2251 }
537c00de
AD
2252 }
2253
01c5f864
AD
2254 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2255
537c00de 2256 /* Cleanup old maps */
184c449f
AD
2257 if (!dev_maps)
2258 goto out_no_old_maps;
2259
2260 for_each_possible_cpu(cpu) {
2261 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2262 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2263 map = xmap_dereference(dev_maps->cpu_map[tci]);
01c5f864
AD
2264 if (map && map != new_map)
2265 kfree_rcu(map, rcu);
2266 }
537c00de
AD
2267 }
2268
184c449f
AD
2269 kfree_rcu(dev_maps, rcu);
2270
2271out_no_old_maps:
01c5f864
AD
2272 dev_maps = new_dev_maps;
2273 active = true;
537c00de 2274
01c5f864
AD
2275out_no_new_maps:
2276 /* update Tx queue numa node */
537c00de
AD
2277 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2278 (numa_node_id >= 0) ? numa_node_id :
2279 NUMA_NO_NODE);
2280
01c5f864
AD
2281 if (!dev_maps)
2282 goto out_no_maps;
2283
2284 /* removes queue from unused CPUs */
2285 for_each_possible_cpu(cpu) {
184c449f
AD
2286 for (i = tc, tci = cpu * num_tc; i--; tci++)
2287 active |= remove_xps_queue(dev_maps, tci, index);
2288 if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu))
2289 active |= remove_xps_queue(dev_maps, tci, index);
2290 for (i = num_tc - tc, tci++; --i; tci++)
2291 active |= remove_xps_queue(dev_maps, tci, index);
01c5f864
AD
2292 }
2293
2294 /* free map if not active */
2295 if (!active) {
2296 RCU_INIT_POINTER(dev->xps_maps, NULL);
2297 kfree_rcu(dev_maps, rcu);
2298 }
2299
2300out_no_maps:
537c00de
AD
2301 mutex_unlock(&xps_map_mutex);
2302
2303 return 0;
2304error:
01c5f864
AD
2305 /* remove any maps that we added */
2306 for_each_possible_cpu(cpu) {
184c449f
AD
2307 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2308 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2309 map = dev_maps ?
2310 xmap_dereference(dev_maps->cpu_map[tci]) :
2311 NULL;
2312 if (new_map && new_map != map)
2313 kfree(new_map);
2314 }
01c5f864
AD
2315 }
2316
537c00de
AD
2317 mutex_unlock(&xps_map_mutex);
2318
537c00de
AD
2319 kfree(new_dev_maps);
2320 return -ENOMEM;
2321}
2322EXPORT_SYMBOL(netif_set_xps_queue);
2323
2324#endif
9cf1f6a8
AD
2325void netdev_reset_tc(struct net_device *dev)
2326{
6234f874
AD
2327#ifdef CONFIG_XPS
2328 netif_reset_xps_queues_gt(dev, 0);
2329#endif
9cf1f6a8
AD
2330 dev->num_tc = 0;
2331 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2332 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2333}
2334EXPORT_SYMBOL(netdev_reset_tc);
2335
2336int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2337{
2338 if (tc >= dev->num_tc)
2339 return -EINVAL;
2340
6234f874
AD
2341#ifdef CONFIG_XPS
2342 netif_reset_xps_queues(dev, offset, count);
2343#endif
9cf1f6a8
AD
2344 dev->tc_to_txq[tc].count = count;
2345 dev->tc_to_txq[tc].offset = offset;
2346 return 0;
2347}
2348EXPORT_SYMBOL(netdev_set_tc_queue);
2349
2350int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2351{
2352 if (num_tc > TC_MAX_QUEUE)
2353 return -EINVAL;
2354
6234f874
AD
2355#ifdef CONFIG_XPS
2356 netif_reset_xps_queues_gt(dev, 0);
2357#endif
9cf1f6a8
AD
2358 dev->num_tc = num_tc;
2359 return 0;
2360}
2361EXPORT_SYMBOL(netdev_set_num_tc);
2362
f0796d5c
JF
2363/*
2364 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2365 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2366 */
e6484930 2367int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 2368{
1d24eb48
TH
2369 int rc;
2370
e6484930
TH
2371 if (txq < 1 || txq > dev->num_tx_queues)
2372 return -EINVAL;
f0796d5c 2373
5c56580b
BH
2374 if (dev->reg_state == NETREG_REGISTERED ||
2375 dev->reg_state == NETREG_UNREGISTERING) {
e6484930
TH
2376 ASSERT_RTNL();
2377
1d24eb48
TH
2378 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2379 txq);
bf264145
TH
2380 if (rc)
2381 return rc;
2382
4f57c087
JF
2383 if (dev->num_tc)
2384 netif_setup_tc(dev, txq);
2385
024e9679 2386 if (txq < dev->real_num_tx_queues) {
e6484930 2387 qdisc_reset_all_tx_gt(dev, txq);
024e9679
AD
2388#ifdef CONFIG_XPS
2389 netif_reset_xps_queues_gt(dev, txq);
2390#endif
2391 }
f0796d5c 2392 }
e6484930
TH
2393
2394 dev->real_num_tx_queues = txq;
2395 return 0;
f0796d5c
JF
2396}
2397EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 2398
a953be53 2399#ifdef CONFIG_SYSFS
62fe0b40
BH
2400/**
2401 * netif_set_real_num_rx_queues - set actual number of RX queues used
2402 * @dev: Network device
2403 * @rxq: Actual number of RX queues
2404 *
2405 * This must be called either with the rtnl_lock held or before
2406 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
2407 * negative error code. If called before registration, it always
2408 * succeeds.
62fe0b40
BH
2409 */
2410int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2411{
2412 int rc;
2413
bd25fa7b
TH
2414 if (rxq < 1 || rxq > dev->num_rx_queues)
2415 return -EINVAL;
2416
62fe0b40
BH
2417 if (dev->reg_state == NETREG_REGISTERED) {
2418 ASSERT_RTNL();
2419
62fe0b40
BH
2420 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2421 rxq);
2422 if (rc)
2423 return rc;
62fe0b40
BH
2424 }
2425
2426 dev->real_num_rx_queues = rxq;
2427 return 0;
2428}
2429EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2430#endif
2431
2c53040f
BH
2432/**
2433 * netif_get_num_default_rss_queues - default number of RSS queues
16917b87
YM
2434 *
2435 * This routine should set an upper limit on the number of RSS queues
2436 * used by default by multiqueue devices.
2437 */
a55b138b 2438int netif_get_num_default_rss_queues(void)
16917b87 2439{
40e4e713
HS
2440 return is_kdump_kernel() ?
2441 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
16917b87
YM
2442}
2443EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2444
3bcb846c 2445static void __netif_reschedule(struct Qdisc *q)
56079431 2446{
def82a1d
JP
2447 struct softnet_data *sd;
2448 unsigned long flags;
56079431 2449
def82a1d 2450 local_irq_save(flags);
903ceff7 2451 sd = this_cpu_ptr(&softnet_data);
a9cbd588
CG
2452 q->next_sched = NULL;
2453 *sd->output_queue_tailp = q;
2454 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
2455 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2456 local_irq_restore(flags);
2457}
2458
2459void __netif_schedule(struct Qdisc *q)
2460{
2461 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2462 __netif_reschedule(q);
56079431
DV
2463}
2464EXPORT_SYMBOL(__netif_schedule);
2465
e6247027
ED
2466struct dev_kfree_skb_cb {
2467 enum skb_free_reason reason;
2468};
2469
2470static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
56079431 2471{
e6247027
ED
2472 return (struct dev_kfree_skb_cb *)skb->cb;
2473}
2474
46e5da40
JF
2475void netif_schedule_queue(struct netdev_queue *txq)
2476{
2477 rcu_read_lock();
2478 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2479 struct Qdisc *q = rcu_dereference(txq->qdisc);
2480
2481 __netif_schedule(q);
2482 }
2483 rcu_read_unlock();
2484}
2485EXPORT_SYMBOL(netif_schedule_queue);
2486
46e5da40
JF
2487void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2488{
2489 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2490 struct Qdisc *q;
2491
2492 rcu_read_lock();
2493 q = rcu_dereference(dev_queue->qdisc);
2494 __netif_schedule(q);
2495 rcu_read_unlock();
2496 }
2497}
2498EXPORT_SYMBOL(netif_tx_wake_queue);
2499
e6247027 2500void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
56079431 2501{
e6247027 2502 unsigned long flags;
56079431 2503
9899886d
MJ
2504 if (unlikely(!skb))
2505 return;
2506
63354797 2507 if (likely(refcount_read(&skb->users) == 1)) {
e6247027 2508 smp_rmb();
63354797
RE
2509 refcount_set(&skb->users, 0);
2510 } else if (likely(!refcount_dec_and_test(&skb->users))) {
e6247027 2511 return;
bea3348e 2512 }
e6247027
ED
2513 get_kfree_skb_cb(skb)->reason = reason;
2514 local_irq_save(flags);
2515 skb->next = __this_cpu_read(softnet_data.completion_queue);
2516 __this_cpu_write(softnet_data.completion_queue, skb);
2517 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2518 local_irq_restore(flags);
56079431 2519}
e6247027 2520EXPORT_SYMBOL(__dev_kfree_skb_irq);
56079431 2521
e6247027 2522void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
56079431
DV
2523{
2524 if (in_irq() || irqs_disabled())
e6247027 2525 __dev_kfree_skb_irq(skb, reason);
56079431
DV
2526 else
2527 dev_kfree_skb(skb);
2528}
e6247027 2529EXPORT_SYMBOL(__dev_kfree_skb_any);
56079431
DV
2530
2531
bea3348e
SH
2532/**
2533 * netif_device_detach - mark device as removed
2534 * @dev: network device
2535 *
2536 * Mark device as removed from system and therefore no longer available.
2537 */
56079431
DV
2538void netif_device_detach(struct net_device *dev)
2539{
2540 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2541 netif_running(dev)) {
d543103a 2542 netif_tx_stop_all_queues(dev);
56079431
DV
2543 }
2544}
2545EXPORT_SYMBOL(netif_device_detach);
2546
bea3348e
SH
2547/**
2548 * netif_device_attach - mark device as attached
2549 * @dev: network device
2550 *
2551 * Mark device as attached from system and restart if needed.
2552 */
56079431
DV
2553void netif_device_attach(struct net_device *dev)
2554{
2555 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2556 netif_running(dev)) {
d543103a 2557 netif_tx_wake_all_queues(dev);
4ec93edb 2558 __netdev_watchdog_up(dev);
56079431
DV
2559 }
2560}
2561EXPORT_SYMBOL(netif_device_attach);
2562
5605c762
JP
2563/*
2564 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2565 * to be used as a distribution range.
2566 */
2567u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2568 unsigned int num_tx_queues)
2569{
2570 u32 hash;
2571 u16 qoffset = 0;
2572 u16 qcount = num_tx_queues;
2573
2574 if (skb_rx_queue_recorded(skb)) {
2575 hash = skb_get_rx_queue(skb);
2576 while (unlikely(hash >= num_tx_queues))
2577 hash -= num_tx_queues;
2578 return hash;
2579 }
2580
2581 if (dev->num_tc) {
2582 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
f4563a75 2583
5605c762
JP
2584 qoffset = dev->tc_to_txq[tc].offset;
2585 qcount = dev->tc_to_txq[tc].count;
2586 }
2587
2588 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2589}
2590EXPORT_SYMBOL(__skb_tx_hash);
2591
36c92474
BH
2592static void skb_warn_bad_offload(const struct sk_buff *skb)
2593{
84d15ae5 2594 static const netdev_features_t null_features;
36c92474 2595 struct net_device *dev = skb->dev;
88ad4175 2596 const char *name = "";
36c92474 2597
c846ad9b
BG
2598 if (!net_ratelimit())
2599 return;
2600
88ad4175
BM
2601 if (dev) {
2602 if (dev->dev.parent)
2603 name = dev_driver_string(dev->dev.parent);
2604 else
2605 name = netdev_name(dev);
2606 }
36c92474
BH
2607 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2608 "gso_type=%d ip_summed=%d\n",
88ad4175 2609 name, dev ? &dev->features : &null_features,
65e9d2fa 2610 skb->sk ? &skb->sk->sk_route_caps : &null_features,
36c92474
BH
2611 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2612 skb_shinfo(skb)->gso_type, skb->ip_summed);
2613}
2614
1da177e4
LT
2615/*
2616 * Invalidate hardware checksum when packet is to be mangled, and
2617 * complete checksum manually on outgoing path.
2618 */
84fa7933 2619int skb_checksum_help(struct sk_buff *skb)
1da177e4 2620{
d3bc23e7 2621 __wsum csum;
663ead3b 2622 int ret = 0, offset;
1da177e4 2623
84fa7933 2624 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
2625 goto out_set_summed;
2626
2627 if (unlikely(skb_shinfo(skb)->gso_size)) {
36c92474
BH
2628 skb_warn_bad_offload(skb);
2629 return -EINVAL;
1da177e4
LT
2630 }
2631
cef401de
ED
2632 /* Before computing a checksum, we should make sure no frag could
2633 * be modified by an external entity : checksum could be wrong.
2634 */
2635 if (skb_has_shared_frag(skb)) {
2636 ret = __skb_linearize(skb);
2637 if (ret)
2638 goto out;
2639 }
2640
55508d60 2641 offset = skb_checksum_start_offset(skb);
a030847e
HX
2642 BUG_ON(offset >= skb_headlen(skb));
2643 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2644
2645 offset += skb->csum_offset;
2646 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2647
2648 if (skb_cloned(skb) &&
2649 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
2650 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2651 if (ret)
2652 goto out;
2653 }
2654
4f2e4ad5 2655 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
a430a43d 2656out_set_summed:
1da177e4 2657 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 2658out:
1da177e4
LT
2659 return ret;
2660}
d1b19dff 2661EXPORT_SYMBOL(skb_checksum_help);
1da177e4 2662
b72b5bf6
DC
2663int skb_crc32c_csum_help(struct sk_buff *skb)
2664{
2665 __le32 crc32c_csum;
2666 int ret = 0, offset, start;
2667
2668 if (skb->ip_summed != CHECKSUM_PARTIAL)
2669 goto out;
2670
2671 if (unlikely(skb_is_gso(skb)))
2672 goto out;
2673
2674 /* Before computing a checksum, we should make sure no frag could
2675 * be modified by an external entity : checksum could be wrong.
2676 */
2677 if (unlikely(skb_has_shared_frag(skb))) {
2678 ret = __skb_linearize(skb);
2679 if (ret)
2680 goto out;
2681 }
2682 start = skb_checksum_start_offset(skb);
2683 offset = start + offsetof(struct sctphdr, checksum);
2684 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
2685 ret = -EINVAL;
2686 goto out;
2687 }
2688 if (skb_cloned(skb) &&
2689 !skb_clone_writable(skb, offset + sizeof(__le32))) {
2690 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2691 if (ret)
2692 goto out;
2693 }
2694 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
2695 skb->len - start, ~(__u32)0,
2696 crc32c_csum_stub));
2697 *(__le32 *)(skb->data + offset) = crc32c_csum;
2698 skb->ip_summed = CHECKSUM_NONE;
dba00306 2699 skb->csum_not_inet = 0;
b72b5bf6
DC
2700out:
2701 return ret;
2702}
2703
53d6471c 2704__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
f6a78bfc 2705{
252e3346 2706 __be16 type = skb->protocol;
f6a78bfc 2707
19acc327
PS
2708 /* Tunnel gso handlers can set protocol to ethernet. */
2709 if (type == htons(ETH_P_TEB)) {
2710 struct ethhdr *eth;
2711
2712 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2713 return 0;
2714
2715 eth = (struct ethhdr *)skb_mac_header(skb);
2716 type = eth->h_proto;
2717 }
2718
d4bcef3f 2719 return __vlan_get_protocol(skb, type, depth);
ec5f0615
PS
2720}
2721
2722/**
2723 * skb_mac_gso_segment - mac layer segmentation handler.
2724 * @skb: buffer to segment
2725 * @features: features for the output path (see dev->features)
2726 */
2727struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2728 netdev_features_t features)
2729{
2730 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2731 struct packet_offload *ptype;
53d6471c
VY
2732 int vlan_depth = skb->mac_len;
2733 __be16 type = skb_network_protocol(skb, &vlan_depth);
ec5f0615
PS
2734
2735 if (unlikely(!type))
2736 return ERR_PTR(-EINVAL);
2737
53d6471c 2738 __skb_pull(skb, vlan_depth);
f6a78bfc
HX
2739
2740 rcu_read_lock();
22061d80 2741 list_for_each_entry_rcu(ptype, &offload_base, list) {
f191a1d1 2742 if (ptype->type == type && ptype->callbacks.gso_segment) {
f191a1d1 2743 segs = ptype->callbacks.gso_segment(skb, features);
f6a78bfc
HX
2744 break;
2745 }
2746 }
2747 rcu_read_unlock();
2748
98e399f8 2749 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 2750
f6a78bfc
HX
2751 return segs;
2752}
05e8ef4a
PS
2753EXPORT_SYMBOL(skb_mac_gso_segment);
2754
2755
2756/* openvswitch calls this on rx path, so we need a different check.
2757 */
2758static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2759{
2760 if (tx_path)
0c19f846
WB
2761 return skb->ip_summed != CHECKSUM_PARTIAL &&
2762 skb->ip_summed != CHECKSUM_UNNECESSARY;
6e7bc478
ED
2763
2764 return skb->ip_summed == CHECKSUM_NONE;
05e8ef4a
PS
2765}
2766
2767/**
2768 * __skb_gso_segment - Perform segmentation on skb.
2769 * @skb: buffer to segment
2770 * @features: features for the output path (see dev->features)
2771 * @tx_path: whether it is called in TX path
2772 *
2773 * This function segments the given skb and returns a list of segments.
2774 *
2775 * It may return NULL if the skb requires no segmentation. This is
2776 * only possible when GSO is used for verifying header integrity.
9207f9d4
KK
2777 *
2778 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
05e8ef4a
PS
2779 */
2780struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2781 netdev_features_t features, bool tx_path)
2782{
b2504a5d
ED
2783 struct sk_buff *segs;
2784
05e8ef4a
PS
2785 if (unlikely(skb_needs_check(skb, tx_path))) {
2786 int err;
2787
b2504a5d 2788 /* We're going to init ->check field in TCP or UDP header */
a40e0a66 2789 err = skb_cow_head(skb, 0);
2790 if (err < 0)
05e8ef4a
PS
2791 return ERR_PTR(err);
2792 }
2793
802ab55a
AD
2794 /* Only report GSO partial support if it will enable us to
2795 * support segmentation on this frame without needing additional
2796 * work.
2797 */
2798 if (features & NETIF_F_GSO_PARTIAL) {
2799 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
2800 struct net_device *dev = skb->dev;
2801
2802 partial_features |= dev->features & dev->gso_partial_features;
2803 if (!skb_gso_ok(skb, features | partial_features))
2804 features &= ~NETIF_F_GSO_PARTIAL;
2805 }
2806
9207f9d4
KK
2807 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2808 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2809
68c33163 2810 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3347c960
ED
2811 SKB_GSO_CB(skb)->encap_level = 0;
2812
05e8ef4a
PS
2813 skb_reset_mac_header(skb);
2814 skb_reset_mac_len(skb);
2815
b2504a5d
ED
2816 segs = skb_mac_gso_segment(skb, features);
2817
2818 if (unlikely(skb_needs_check(skb, tx_path)))
2819 skb_warn_bad_offload(skb);
2820
2821 return segs;
05e8ef4a 2822}
12b0004d 2823EXPORT_SYMBOL(__skb_gso_segment);
f6a78bfc 2824
fb286bb2
HX
2825/* Take action when hardware reception checksum errors are detected. */
2826#ifdef CONFIG_BUG
2827void netdev_rx_csum_fault(struct net_device *dev)
2828{
2829 if (net_ratelimit()) {
7b6cd1ce 2830 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
fb286bb2
HX
2831 dump_stack();
2832 }
2833}
2834EXPORT_SYMBOL(netdev_rx_csum_fault);
2835#endif
2836
1da177e4
LT
2837/* Actually, we should eliminate this check as soon as we know, that:
2838 * 1. IOMMU is present and allows to map all the memory.
2839 * 2. No high memory really exists on this machine.
2840 */
2841
c1e756bf 2842static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 2843{
3d3a8533 2844#ifdef CONFIG_HIGHMEM
1da177e4 2845 int i;
f4563a75 2846
5acbbd42 2847 if (!(dev->features & NETIF_F_HIGHDMA)) {
ea2ab693
IC
2848 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2849 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
f4563a75 2850
ea2ab693 2851 if (PageHighMem(skb_frag_page(frag)))
5acbbd42 2852 return 1;
ea2ab693 2853 }
5acbbd42 2854 }
1da177e4 2855
5acbbd42
FT
2856 if (PCI_DMA_BUS_IS_PHYS) {
2857 struct device *pdev = dev->dev.parent;
1da177e4 2858
9092c658
ED
2859 if (!pdev)
2860 return 0;
5acbbd42 2861 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
ea2ab693
IC
2862 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2863 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
f4563a75 2864
5acbbd42
FT
2865 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2866 return 1;
2867 }
2868 }
3d3a8533 2869#endif
1da177e4
LT
2870 return 0;
2871}
1da177e4 2872
3b392ddb
SH
2873/* If MPLS offload request, verify we are testing hardware MPLS features
2874 * instead of standard features for the netdev.
2875 */
d0edc7bf 2876#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3b392ddb
SH
2877static netdev_features_t net_mpls_features(struct sk_buff *skb,
2878 netdev_features_t features,
2879 __be16 type)
2880{
25cd9ba0 2881 if (eth_p_mpls(type))
3b392ddb
SH
2882 features &= skb->dev->mpls_features;
2883
2884 return features;
2885}
2886#else
2887static netdev_features_t net_mpls_features(struct sk_buff *skb,
2888 netdev_features_t features,
2889 __be16 type)
2890{
2891 return features;
2892}
2893#endif
2894
c8f44aff 2895static netdev_features_t harmonize_features(struct sk_buff *skb,
c1e756bf 2896 netdev_features_t features)
f01a5236 2897{
53d6471c 2898 int tmp;
3b392ddb
SH
2899 __be16 type;
2900
2901 type = skb_network_protocol(skb, &tmp);
2902 features = net_mpls_features(skb, features, type);
53d6471c 2903
c0d680e5 2904 if (skb->ip_summed != CHECKSUM_NONE &&
3b392ddb 2905 !can_checksum_protocol(features, type)) {
996e8021 2906 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
f01a5236 2907 }
7be2c82c
ED
2908 if (illegal_highdma(skb->dev, skb))
2909 features &= ~NETIF_F_SG;
f01a5236
JG
2910
2911 return features;
2912}
2913
e38f3025
TM
2914netdev_features_t passthru_features_check(struct sk_buff *skb,
2915 struct net_device *dev,
2916 netdev_features_t features)
2917{
2918 return features;
2919}
2920EXPORT_SYMBOL(passthru_features_check);
2921
8cb65d00
TM
2922static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2923 struct net_device *dev,
2924 netdev_features_t features)
2925{
2926 return vlan_features_check(skb, features);
2927}
2928
cbc53e08
AD
2929static netdev_features_t gso_features_check(const struct sk_buff *skb,
2930 struct net_device *dev,
2931 netdev_features_t features)
2932{
2933 u16 gso_segs = skb_shinfo(skb)->gso_segs;
2934
2935 if (gso_segs > dev->gso_max_segs)
2936 return features & ~NETIF_F_GSO_MASK;
2937
802ab55a
AD
2938 /* Support for GSO partial features requires software
2939 * intervention before we can actually process the packets
2940 * so we need to strip support for any partial features now
2941 * and we can pull them back in after we have partially
2942 * segmented the frame.
2943 */
2944 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
2945 features &= ~dev->gso_partial_features;
2946
2947 /* Make sure to clear the IPv4 ID mangling feature if the
2948 * IPv4 header has the potential to be fragmented.
cbc53e08
AD
2949 */
2950 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
2951 struct iphdr *iph = skb->encapsulation ?
2952 inner_ip_hdr(skb) : ip_hdr(skb);
2953
2954 if (!(iph->frag_off & htons(IP_DF)))
2955 features &= ~NETIF_F_TSO_MANGLEID;
2956 }
2957
2958 return features;
2959}
2960
c1e756bf 2961netdev_features_t netif_skb_features(struct sk_buff *skb)
58e998c6 2962{
5f35227e 2963 struct net_device *dev = skb->dev;
fcbeb976 2964 netdev_features_t features = dev->features;
58e998c6 2965
cbc53e08
AD
2966 if (skb_is_gso(skb))
2967 features = gso_features_check(skb, dev, features);
30b678d8 2968
5f35227e
JG
2969 /* If encapsulation offload request, verify we are testing
2970 * hardware encapsulation features instead of standard
2971 * features for the netdev
2972 */
2973 if (skb->encapsulation)
2974 features &= dev->hw_enc_features;
2975
f5a7fb88
TM
2976 if (skb_vlan_tagged(skb))
2977 features = netdev_intersect_features(features,
2978 dev->vlan_features |
2979 NETIF_F_HW_VLAN_CTAG_TX |
2980 NETIF_F_HW_VLAN_STAG_TX);
f01a5236 2981
5f35227e
JG
2982 if (dev->netdev_ops->ndo_features_check)
2983 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2984 features);
8cb65d00
TM
2985 else
2986 features &= dflt_features_check(skb, dev, features);
5f35227e 2987
c1e756bf 2988 return harmonize_features(skb, features);
58e998c6 2989}
c1e756bf 2990EXPORT_SYMBOL(netif_skb_features);
58e998c6 2991
2ea25513 2992static int xmit_one(struct sk_buff *skb, struct net_device *dev,
95f6b3dd 2993 struct netdev_queue *txq, bool more)
f6a78bfc 2994{
2ea25513
DM
2995 unsigned int len;
2996 int rc;
00829823 2997
7866a621 2998 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2ea25513 2999 dev_queue_xmit_nit(skb, dev);
fc741216 3000
2ea25513
DM
3001 len = skb->len;
3002 trace_net_dev_start_xmit(skb, dev);
95f6b3dd 3003 rc = netdev_start_xmit(skb, dev, txq, more);
2ea25513 3004 trace_net_dev_xmit(skb, rc, dev, len);
adf30907 3005
2ea25513
DM
3006 return rc;
3007}
7b9c6090 3008
8dcda22a
DM
3009struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3010 struct netdev_queue *txq, int *ret)
7f2e870f
DM
3011{
3012 struct sk_buff *skb = first;
3013 int rc = NETDEV_TX_OK;
7b9c6090 3014
7f2e870f
DM
3015 while (skb) {
3016 struct sk_buff *next = skb->next;
fc70fb64 3017
7f2e870f 3018 skb->next = NULL;
95f6b3dd 3019 rc = xmit_one(skb, dev, txq, next != NULL);
7f2e870f
DM
3020 if (unlikely(!dev_xmit_complete(rc))) {
3021 skb->next = next;
3022 goto out;
3023 }
6afff0ca 3024
7f2e870f
DM
3025 skb = next;
3026 if (netif_xmit_stopped(txq) && skb) {
3027 rc = NETDEV_TX_BUSY;
3028 break;
9ccb8975 3029 }
7f2e870f 3030 }
9ccb8975 3031
7f2e870f
DM
3032out:
3033 *ret = rc;
3034 return skb;
3035}
b40863c6 3036
1ff0dc94
ED
3037static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3038 netdev_features_t features)
f6a78bfc 3039{
df8a39de 3040 if (skb_vlan_tag_present(skb) &&
5968250c
JP
3041 !vlan_hw_offload_capable(features, skb->vlan_proto))
3042 skb = __vlan_hwaccel_push_inside(skb);
eae3f88e
DM
3043 return skb;
3044}
f6a78bfc 3045
43c26a1a
DC
3046int skb_csum_hwoffload_help(struct sk_buff *skb,
3047 const netdev_features_t features)
3048{
3049 if (unlikely(skb->csum_not_inet))
3050 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3051 skb_crc32c_csum_help(skb);
3052
3053 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3054}
3055EXPORT_SYMBOL(skb_csum_hwoffload_help);
3056
55a93b3e 3057static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
eae3f88e
DM
3058{
3059 netdev_features_t features;
f6a78bfc 3060
eae3f88e
DM
3061 features = netif_skb_features(skb);
3062 skb = validate_xmit_vlan(skb, features);
3063 if (unlikely(!skb))
3064 goto out_null;
7b9c6090 3065
8b86a61d 3066 if (netif_needs_gso(skb, features)) {
ce93718f
DM
3067 struct sk_buff *segs;
3068
3069 segs = skb_gso_segment(skb, features);
cecda693 3070 if (IS_ERR(segs)) {
af6dabc9 3071 goto out_kfree_skb;
cecda693
JW
3072 } else if (segs) {
3073 consume_skb(skb);
3074 skb = segs;
f6a78bfc 3075 }
eae3f88e
DM
3076 } else {
3077 if (skb_needs_linearize(skb, features) &&
3078 __skb_linearize(skb))
3079 goto out_kfree_skb;
4ec93edb 3080
f6e27114
SK
3081 if (validate_xmit_xfrm(skb, features))
3082 goto out_kfree_skb;
3083
eae3f88e
DM
3084 /* If packet is not checksummed and device does not
3085 * support checksumming for this protocol, complete
3086 * checksumming here.
3087 */
3088 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3089 if (skb->encapsulation)
3090 skb_set_inner_transport_header(skb,
3091 skb_checksum_start_offset(skb));
3092 else
3093 skb_set_transport_header(skb,
3094 skb_checksum_start_offset(skb));
43c26a1a 3095 if (skb_csum_hwoffload_help(skb, features))
eae3f88e 3096 goto out_kfree_skb;
7b9c6090 3097 }
0c772159 3098 }
7b9c6090 3099
eae3f88e 3100 return skb;
fc70fb64 3101
f6a78bfc
HX
3102out_kfree_skb:
3103 kfree_skb(skb);
eae3f88e 3104out_null:
d21fd63e 3105 atomic_long_inc(&dev->tx_dropped);
eae3f88e
DM
3106 return NULL;
3107}
6afff0ca 3108
55a93b3e
ED
3109struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
3110{
3111 struct sk_buff *next, *head = NULL, *tail;
3112
bec3cfdc 3113 for (; skb != NULL; skb = next) {
55a93b3e
ED
3114 next = skb->next;
3115 skb->next = NULL;
bec3cfdc
ED
3116
3117 /* in case skb wont be segmented, point to itself */
3118 skb->prev = skb;
3119
55a93b3e 3120 skb = validate_xmit_skb(skb, dev);
bec3cfdc
ED
3121 if (!skb)
3122 continue;
55a93b3e 3123
bec3cfdc
ED
3124 if (!head)
3125 head = skb;
3126 else
3127 tail->next = skb;
3128 /* If skb was segmented, skb->prev points to
3129 * the last segment. If not, it still contains skb.
3130 */
3131 tail = skb->prev;
55a93b3e
ED
3132 }
3133 return head;
f6a78bfc 3134}
104ba78c 3135EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
f6a78bfc 3136
1def9238
ED
3137static void qdisc_pkt_len_init(struct sk_buff *skb)
3138{
3139 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3140
3141 qdisc_skb_cb(skb)->pkt_len = skb->len;
3142
3143 /* To get more precise estimation of bytes sent on wire,
3144 * we add to pkt_len the headers size of all segments
3145 */
3146 if (shinfo->gso_size) {
757b8b1d 3147 unsigned int hdr_len;
15e5a030 3148 u16 gso_segs = shinfo->gso_segs;
1def9238 3149
757b8b1d
ED
3150 /* mac layer + network layer */
3151 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3152
3153 /* + transport layer */
1def9238
ED
3154 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3155 hdr_len += tcp_hdrlen(skb);
3156 else
3157 hdr_len += sizeof(struct udphdr);
15e5a030
JW
3158
3159 if (shinfo->gso_type & SKB_GSO_DODGY)
3160 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3161 shinfo->gso_size);
3162
3163 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
1def9238
ED
3164 }
3165}
3166
bbd8a0d3
KK
3167static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3168 struct net_device *dev,
3169 struct netdev_queue *txq)
3170{
3171 spinlock_t *root_lock = qdisc_lock(q);
520ac30f 3172 struct sk_buff *to_free = NULL;
a2da570d 3173 bool contended;
bbd8a0d3
KK
3174 int rc;
3175
a2da570d 3176 qdisc_calculate_pkt_len(skb, q);
79640a4c
ED
3177 /*
3178 * Heuristic to force contended enqueues to serialize on a
3179 * separate lock before trying to get qdisc main lock.
f9eb8aea 3180 * This permits qdisc->running owner to get the lock more
9bf2b8c2 3181 * often and dequeue packets faster.
79640a4c 3182 */
a2da570d 3183 contended = qdisc_is_running(q);
79640a4c
ED
3184 if (unlikely(contended))
3185 spin_lock(&q->busylock);
3186
bbd8a0d3
KK
3187 spin_lock(root_lock);
3188 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
520ac30f 3189 __qdisc_drop(skb, &to_free);
bbd8a0d3
KK
3190 rc = NET_XMIT_DROP;
3191 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 3192 qdisc_run_begin(q)) {
bbd8a0d3
KK
3193 /*
3194 * This is a work-conserving queue; there are no old skbs
3195 * waiting to be sent out; and the qdisc is not running -
3196 * xmit the skb directly.
3197 */
bfe0d029 3198
bfe0d029
ED
3199 qdisc_bstats_update(q, skb);
3200
55a93b3e 3201 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
79640a4c
ED
3202 if (unlikely(contended)) {
3203 spin_unlock(&q->busylock);
3204 contended = false;
3205 }
bbd8a0d3 3206 __qdisc_run(q);
79640a4c 3207 } else
bc135b23 3208 qdisc_run_end(q);
bbd8a0d3
KK
3209
3210 rc = NET_XMIT_SUCCESS;
3211 } else {
520ac30f 3212 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
79640a4c
ED
3213 if (qdisc_run_begin(q)) {
3214 if (unlikely(contended)) {
3215 spin_unlock(&q->busylock);
3216 contended = false;
3217 }
3218 __qdisc_run(q);
3219 }
bbd8a0d3
KK
3220 }
3221 spin_unlock(root_lock);
520ac30f
ED
3222 if (unlikely(to_free))
3223 kfree_skb_list(to_free);
79640a4c
ED
3224 if (unlikely(contended))
3225 spin_unlock(&q->busylock);
bbd8a0d3
KK
3226 return rc;
3227}
3228
86f8515f 3229#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
5bc1421e
NH
3230static void skb_update_prio(struct sk_buff *skb)
3231{
6977a79d 3232 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
5bc1421e 3233
91c68ce2 3234 if (!skb->priority && skb->sk && map) {
2a56a1fe
TH
3235 unsigned int prioidx =
3236 sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
91c68ce2
ED
3237
3238 if (prioidx < map->priomap_len)
3239 skb->priority = map->priomap[prioidx];
3240 }
5bc1421e
NH
3241}
3242#else
3243#define skb_update_prio(skb)
3244#endif
3245
f60e5990 3246DEFINE_PER_CPU(int, xmit_recursion);
3247EXPORT_SYMBOL(xmit_recursion);
3248
95603e22
MM
3249/**
3250 * dev_loopback_xmit - loop back @skb
0c4b51f0
EB
3251 * @net: network namespace this loopback is happening in
3252 * @sk: sk needed to be a netfilter okfn
95603e22
MM
3253 * @skb: buffer to transmit
3254 */
0c4b51f0 3255int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
95603e22
MM
3256{
3257 skb_reset_mac_header(skb);
3258 __skb_pull(skb, skb_network_offset(skb));
3259 skb->pkt_type = PACKET_LOOPBACK;
3260 skb->ip_summed = CHECKSUM_UNNECESSARY;
3261 WARN_ON(!skb_dst(skb));
3262 skb_dst_force(skb);
3263 netif_rx_ni(skb);
3264 return 0;
3265}
3266EXPORT_SYMBOL(dev_loopback_xmit);
3267
1f211a1b
DB
3268#ifdef CONFIG_NET_EGRESS
3269static struct sk_buff *
3270sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3271{
46209401 3272 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
1f211a1b
DB
3273 struct tcf_result cl_res;
3274
46209401 3275 if (!miniq)
1f211a1b
DB
3276 return skb;
3277
8dc07fdb 3278 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
46209401 3279 mini_qdisc_bstats_cpu_update(miniq, skb);
1f211a1b 3280
46209401 3281 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
1f211a1b
DB
3282 case TC_ACT_OK:
3283 case TC_ACT_RECLASSIFY:
3284 skb->tc_index = TC_H_MIN(cl_res.classid);
3285 break;
3286 case TC_ACT_SHOT:
46209401 3287 mini_qdisc_qstats_cpu_drop(miniq);
1f211a1b 3288 *ret = NET_XMIT_DROP;
7e2c3aea
DB
3289 kfree_skb(skb);
3290 return NULL;
1f211a1b
DB
3291 case TC_ACT_STOLEN:
3292 case TC_ACT_QUEUED:
e25ea21f 3293 case TC_ACT_TRAP:
1f211a1b 3294 *ret = NET_XMIT_SUCCESS;
7e2c3aea 3295 consume_skb(skb);
1f211a1b
DB
3296 return NULL;
3297 case TC_ACT_REDIRECT:
3298 /* No need to push/pop skb's mac_header here on egress! */
3299 skb_do_redirect(skb);
3300 *ret = NET_XMIT_SUCCESS;
3301 return NULL;
3302 default:
3303 break;
3304 }
3305
3306 return skb;
3307}
3308#endif /* CONFIG_NET_EGRESS */
3309
638b2a69
JP
3310static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3311{
3312#ifdef CONFIG_XPS
3313 struct xps_dev_maps *dev_maps;
3314 struct xps_map *map;
3315 int queue_index = -1;
3316
3317 rcu_read_lock();
3318 dev_maps = rcu_dereference(dev->xps_maps);
3319 if (dev_maps) {
184c449f
AD
3320 unsigned int tci = skb->sender_cpu - 1;
3321
3322 if (dev->num_tc) {
3323 tci *= dev->num_tc;
3324 tci += netdev_get_prio_tc_map(dev, skb->priority);
3325 }
3326
3327 map = rcu_dereference(dev_maps->cpu_map[tci]);
638b2a69
JP
3328 if (map) {
3329 if (map->len == 1)
3330 queue_index = map->queues[0];
3331 else
3332 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3333 map->len)];
3334 if (unlikely(queue_index >= dev->real_num_tx_queues))
3335 queue_index = -1;
3336 }
3337 }
3338 rcu_read_unlock();
3339
3340 return queue_index;
3341#else
3342 return -1;
3343#endif
3344}
3345
3346static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3347{
3348 struct sock *sk = skb->sk;
3349 int queue_index = sk_tx_queue_get(sk);
3350
3351 if (queue_index < 0 || skb->ooo_okay ||
3352 queue_index >= dev->real_num_tx_queues) {
3353 int new_index = get_xps_queue(dev, skb);
f4563a75 3354
638b2a69
JP
3355 if (new_index < 0)
3356 new_index = skb_tx_hash(dev, skb);
3357
3358 if (queue_index != new_index && sk &&
004a5d01 3359 sk_fullsock(sk) &&
638b2a69
JP
3360 rcu_access_pointer(sk->sk_dst_cache))
3361 sk_tx_queue_set(sk, new_index);
3362
3363 queue_index = new_index;
3364 }
3365
3366 return queue_index;
3367}
3368
3369struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3370 struct sk_buff *skb,
3371 void *accel_priv)
3372{
3373 int queue_index = 0;
3374
3375#ifdef CONFIG_XPS
52bd2d62
ED
3376 u32 sender_cpu = skb->sender_cpu - 1;
3377
3378 if (sender_cpu >= (u32)NR_CPUS)
638b2a69
JP
3379 skb->sender_cpu = raw_smp_processor_id() + 1;
3380#endif
3381
3382 if (dev->real_num_tx_queues != 1) {
3383 const struct net_device_ops *ops = dev->netdev_ops;
f4563a75 3384
638b2a69
JP
3385 if (ops->ndo_select_queue)
3386 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3387 __netdev_pick_tx);
3388 else
3389 queue_index = __netdev_pick_tx(dev, skb);
3390
3391 if (!accel_priv)
3392 queue_index = netdev_cap_txqueue(dev, queue_index);
3393 }
3394
3395 skb_set_queue_mapping(skb, queue_index);
3396 return netdev_get_tx_queue(dev, queue_index);
3397}
3398
d29f749e 3399/**
9d08dd3d 3400 * __dev_queue_xmit - transmit a buffer
d29f749e 3401 * @skb: buffer to transmit
9d08dd3d 3402 * @accel_priv: private data used for L2 forwarding offload
d29f749e
DJ
3403 *
3404 * Queue a buffer for transmission to a network device. The caller must
3405 * have set the device and priority and built the buffer before calling
3406 * this function. The function can be called from an interrupt.
3407 *
3408 * A negative errno code is returned on a failure. A success does not
3409 * guarantee the frame will be transmitted as it may be dropped due
3410 * to congestion or traffic shaping.
3411 *
3412 * -----------------------------------------------------------------------------------
3413 * I notice this method can also return errors from the queue disciplines,
3414 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3415 * be positive.
3416 *
3417 * Regardless of the return value, the skb is consumed, so it is currently
3418 * difficult to retry a send to this method. (You can bump the ref count
3419 * before sending to hold a reference for retry if you are careful.)
3420 *
3421 * When calling this method, interrupts MUST be enabled. This is because
3422 * the BH enable code must have IRQs enabled so that it will not deadlock.
3423 * --BLG
3424 */
0a59f3a9 3425static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
1da177e4
LT
3426{
3427 struct net_device *dev = skb->dev;
dc2b4847 3428 struct netdev_queue *txq;
1da177e4
LT
3429 struct Qdisc *q;
3430 int rc = -ENOMEM;
3431
6d1ccff6
ED
3432 skb_reset_mac_header(skb);
3433
e7fd2885
WB
3434 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3435 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3436
4ec93edb
YH
3437 /* Disable soft irqs for various locks below. Also
3438 * stops preemption for RCU.
1da177e4 3439 */
4ec93edb 3440 rcu_read_lock_bh();
1da177e4 3441
5bc1421e
NH
3442 skb_update_prio(skb);
3443
1f211a1b
DB
3444 qdisc_pkt_len_init(skb);
3445#ifdef CONFIG_NET_CLS_ACT
8dc07fdb 3446 skb->tc_at_ingress = 0;
1f211a1b
DB
3447# ifdef CONFIG_NET_EGRESS
3448 if (static_key_false(&egress_needed)) {
3449 skb = sch_handle_egress(skb, &rc, dev);
3450 if (!skb)
3451 goto out;
3452 }
3453# endif
3454#endif
02875878
ED
3455 /* If device/qdisc don't need skb->dst, release it right now while
3456 * its hot in this cpu cache.
3457 */
3458 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3459 skb_dst_drop(skb);
3460 else
3461 skb_dst_force(skb);
3462
f663dd9a 3463 txq = netdev_pick_tx(dev, skb, accel_priv);
a898def2 3464 q = rcu_dereference_bh(txq->qdisc);
37437bb2 3465
cf66ba58 3466 trace_net_dev_queue(skb);
1da177e4 3467 if (q->enqueue) {
bbd8a0d3 3468 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 3469 goto out;
1da177e4
LT
3470 }
3471
3472 /* The device has no queue. Common case for software devices:
eb13da1a 3473 * loopback, all the sorts of tunnels...
1da177e4 3474
eb13da1a 3475 * Really, it is unlikely that netif_tx_lock protection is necessary
3476 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3477 * counters.)
3478 * However, it is possible, that they rely on protection
3479 * made by us here.
1da177e4 3480
eb13da1a 3481 * Check this and shot the lock. It is not prone from deadlocks.
3482 *Either shot noqueue qdisc, it is even simpler 8)
1da177e4
LT
3483 */
3484 if (dev->flags & IFF_UP) {
3485 int cpu = smp_processor_id(); /* ok because BHs are off */
3486
c773e847 3487 if (txq->xmit_lock_owner != cpu) {
a70b506e
DB
3488 if (unlikely(__this_cpu_read(xmit_recursion) >
3489 XMIT_RECURSION_LIMIT))
745e20f1
ED
3490 goto recursion_alert;
3491
1f59533f
JDB
3492 skb = validate_xmit_skb(skb, dev);
3493 if (!skb)
d21fd63e 3494 goto out;
1f59533f 3495
c773e847 3496 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 3497
73466498 3498 if (!netif_xmit_stopped(txq)) {
745e20f1 3499 __this_cpu_inc(xmit_recursion);
ce93718f 3500 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
745e20f1 3501 __this_cpu_dec(xmit_recursion);
572a9d7b 3502 if (dev_xmit_complete(rc)) {
c773e847 3503 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
3504 goto out;
3505 }
3506 }
c773e847 3507 HARD_TX_UNLOCK(dev, txq);
e87cc472
JP
3508 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3509 dev->name);
1da177e4
LT
3510 } else {
3511 /* Recursion is detected! It is possible,
745e20f1
ED
3512 * unfortunately
3513 */
3514recursion_alert:
e87cc472
JP
3515 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3516 dev->name);
1da177e4
LT
3517 }
3518 }
3519
3520 rc = -ENETDOWN;
d4828d85 3521 rcu_read_unlock_bh();
1da177e4 3522
015f0688 3523 atomic_long_inc(&dev->tx_dropped);
1f59533f 3524 kfree_skb_list(skb);
1da177e4
LT
3525 return rc;
3526out:
d4828d85 3527 rcu_read_unlock_bh();
1da177e4
LT
3528 return rc;
3529}
f663dd9a 3530
2b4aa3ce 3531int dev_queue_xmit(struct sk_buff *skb)
f663dd9a
JW
3532{
3533 return __dev_queue_xmit(skb, NULL);
3534}
2b4aa3ce 3535EXPORT_SYMBOL(dev_queue_xmit);
1da177e4 3536
f663dd9a
JW
3537int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3538{
3539 return __dev_queue_xmit(skb, accel_priv);
3540}
3541EXPORT_SYMBOL(dev_queue_xmit_accel);
3542
1da177e4 3543
eb13da1a 3544/*************************************************************************
3545 * Receiver routines
3546 *************************************************************************/
1da177e4 3547
6b2bedc3 3548int netdev_max_backlog __read_mostly = 1000;
c9e6bc64
ED
3549EXPORT_SYMBOL(netdev_max_backlog);
3550
3b098e2d 3551int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3 3552int netdev_budget __read_mostly = 300;
7acf8a1e 3553unsigned int __read_mostly netdev_budget_usecs = 2000;
3d48b53f
MT
3554int weight_p __read_mostly = 64; /* old backlog weight */
3555int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
3556int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
3557int dev_rx_weight __read_mostly = 64;
3558int dev_tx_weight __read_mostly = 64;
1da177e4 3559
eecfd7c4
ED
3560/* Called with irq disabled */
3561static inline void ____napi_schedule(struct softnet_data *sd,
3562 struct napi_struct *napi)
3563{
3564 list_add_tail(&napi->poll_list, &sd->poll_list);
3565 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3566}
3567
bfb564e7
KK
3568#ifdef CONFIG_RPS
3569
3570/* One global table that all flow-based protocols share. */
6e3f7faf 3571struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7 3572EXPORT_SYMBOL(rps_sock_flow_table);
567e4b79
ED
3573u32 rps_cpu_mask __read_mostly;
3574EXPORT_SYMBOL(rps_cpu_mask);
bfb564e7 3575
c5905afb 3576struct static_key rps_needed __read_mostly;
3df97ba8 3577EXPORT_SYMBOL(rps_needed);
13bfff25
ED
3578struct static_key rfs_needed __read_mostly;
3579EXPORT_SYMBOL(rfs_needed);
adc9300e 3580
c445477d
BH
3581static struct rps_dev_flow *
3582set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3583 struct rps_dev_flow *rflow, u16 next_cpu)
3584{
a31196b0 3585 if (next_cpu < nr_cpu_ids) {
c445477d
BH
3586#ifdef CONFIG_RFS_ACCEL
3587 struct netdev_rx_queue *rxqueue;
3588 struct rps_dev_flow_table *flow_table;
3589 struct rps_dev_flow *old_rflow;
3590 u32 flow_id;
3591 u16 rxq_index;
3592 int rc;
3593
3594 /* Should we steer this flow to a different hardware queue? */
69a19ee6
BH
3595 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3596 !(dev->features & NETIF_F_NTUPLE))
c445477d
BH
3597 goto out;
3598 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3599 if (rxq_index == skb_get_rx_queue(skb))
3600 goto out;
3601
3602 rxqueue = dev->_rx + rxq_index;
3603 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3604 if (!flow_table)
3605 goto out;
61b905da 3606 flow_id = skb_get_hash(skb) & flow_table->mask;
c445477d
BH
3607 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3608 rxq_index, flow_id);
3609 if (rc < 0)
3610 goto out;
3611 old_rflow = rflow;
3612 rflow = &flow_table->flows[flow_id];
c445477d
BH
3613 rflow->filter = rc;
3614 if (old_rflow->filter == rflow->filter)
3615 old_rflow->filter = RPS_NO_FILTER;
3616 out:
3617#endif
3618 rflow->last_qtail =
09994d1b 3619 per_cpu(softnet_data, next_cpu).input_queue_head;
c445477d
BH
3620 }
3621
09994d1b 3622 rflow->cpu = next_cpu;
c445477d
BH
3623 return rflow;
3624}
3625
bfb564e7
KK
3626/*
3627 * get_rps_cpu is called from netif_receive_skb and returns the target
3628 * CPU from the RPS map of the receiving queue for a given skb.
3629 * rcu_read_lock must be held on entry.
3630 */
3631static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3632 struct rps_dev_flow **rflowp)
3633{
567e4b79
ED
3634 const struct rps_sock_flow_table *sock_flow_table;
3635 struct netdev_rx_queue *rxqueue = dev->_rx;
bfb564e7 3636 struct rps_dev_flow_table *flow_table;
567e4b79 3637 struct rps_map *map;
bfb564e7 3638 int cpu = -1;
567e4b79 3639 u32 tcpu;
61b905da 3640 u32 hash;
bfb564e7
KK
3641
3642 if (skb_rx_queue_recorded(skb)) {
3643 u16 index = skb_get_rx_queue(skb);
567e4b79 3644
62fe0b40
BH
3645 if (unlikely(index >= dev->real_num_rx_queues)) {
3646 WARN_ONCE(dev->real_num_rx_queues > 1,
3647 "%s received packet on queue %u, but number "
3648 "of RX queues is %u\n",
3649 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
3650 goto done;
3651 }
567e4b79
ED
3652 rxqueue += index;
3653 }
bfb564e7 3654
567e4b79
ED
3655 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3656
3657 flow_table = rcu_dereference(rxqueue->rps_flow_table);
6e3f7faf 3658 map = rcu_dereference(rxqueue->rps_map);
567e4b79 3659 if (!flow_table && !map)
bfb564e7
KK
3660 goto done;
3661
2d47b459 3662 skb_reset_network_header(skb);
61b905da
TH
3663 hash = skb_get_hash(skb);
3664 if (!hash)
bfb564e7
KK
3665 goto done;
3666
fec5e652
TH
3667 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3668 if (flow_table && sock_flow_table) {
fec5e652 3669 struct rps_dev_flow *rflow;
567e4b79
ED
3670 u32 next_cpu;
3671 u32 ident;
3672
3673 /* First check into global flow table if there is a match */
3674 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3675 if ((ident ^ hash) & ~rps_cpu_mask)
3676 goto try_rps;
fec5e652 3677
567e4b79
ED
3678 next_cpu = ident & rps_cpu_mask;
3679
3680 /* OK, now we know there is a match,
3681 * we can look at the local (per receive queue) flow table
3682 */
61b905da 3683 rflow = &flow_table->flows[hash & flow_table->mask];
fec5e652
TH
3684 tcpu = rflow->cpu;
3685
fec5e652
TH
3686 /*
3687 * If the desired CPU (where last recvmsg was done) is
3688 * different from current CPU (one in the rx-queue flow
3689 * table entry), switch if one of the following holds:
a31196b0 3690 * - Current CPU is unset (>= nr_cpu_ids).
fec5e652
TH
3691 * - Current CPU is offline.
3692 * - The current CPU's queue tail has advanced beyond the
3693 * last packet that was enqueued using this table entry.
3694 * This guarantees that all previous packets for the flow
3695 * have been dequeued, thus preserving in order delivery.
3696 */
3697 if (unlikely(tcpu != next_cpu) &&
a31196b0 3698 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
fec5e652 3699 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
baefa31d
TH
3700 rflow->last_qtail)) >= 0)) {
3701 tcpu = next_cpu;
c445477d 3702 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
baefa31d 3703 }
c445477d 3704
a31196b0 3705 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
fec5e652
TH
3706 *rflowp = rflow;
3707 cpu = tcpu;
3708 goto done;
3709 }
3710 }
3711
567e4b79
ED
3712try_rps:
3713
0a9627f2 3714 if (map) {
8fc54f68 3715 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
0a9627f2
TH
3716 if (cpu_online(tcpu)) {
3717 cpu = tcpu;
3718 goto done;
3719 }
3720 }
3721
3722done:
0a9627f2
TH
3723 return cpu;
3724}
3725
c445477d
BH
3726#ifdef CONFIG_RFS_ACCEL
3727
3728/**
3729 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3730 * @dev: Device on which the filter was set
3731 * @rxq_index: RX queue index
3732 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3733 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3734 *
3735 * Drivers that implement ndo_rx_flow_steer() should periodically call
3736 * this function for each installed filter and remove the filters for
3737 * which it returns %true.
3738 */
3739bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3740 u32 flow_id, u16 filter_id)
3741{
3742 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3743 struct rps_dev_flow_table *flow_table;
3744 struct rps_dev_flow *rflow;
3745 bool expire = true;
a31196b0 3746 unsigned int cpu;
c445477d
BH
3747
3748 rcu_read_lock();
3749 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3750 if (flow_table && flow_id <= flow_table->mask) {
3751 rflow = &flow_table->flows[flow_id];
6aa7de05 3752 cpu = READ_ONCE(rflow->cpu);
a31196b0 3753 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
c445477d
BH
3754 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3755 rflow->last_qtail) <
3756 (int)(10 * flow_table->mask)))
3757 expire = false;
3758 }
3759 rcu_read_unlock();
3760 return expire;
3761}
3762EXPORT_SYMBOL(rps_may_expire_flow);
3763
3764#endif /* CONFIG_RFS_ACCEL */
3765
0a9627f2 3766/* Called from hardirq (IPI) context */
e36fa2f7 3767static void rps_trigger_softirq(void *data)
0a9627f2 3768{
e36fa2f7
ED
3769 struct softnet_data *sd = data;
3770
eecfd7c4 3771 ____napi_schedule(sd, &sd->backlog);
dee42870 3772 sd->received_rps++;
0a9627f2 3773}
e36fa2f7 3774
fec5e652 3775#endif /* CONFIG_RPS */
0a9627f2 3776
e36fa2f7
ED
3777/*
3778 * Check if this softnet_data structure is another cpu one
3779 * If yes, queue it to our IPI list and return 1
3780 * If no, return 0
3781 */
3782static int rps_ipi_queued(struct softnet_data *sd)
3783{
3784#ifdef CONFIG_RPS
903ceff7 3785 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
e36fa2f7
ED
3786
3787 if (sd != mysd) {
3788 sd->rps_ipi_next = mysd->rps_ipi_list;
3789 mysd->rps_ipi_list = sd;
3790
3791 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3792 return 1;
3793 }
3794#endif /* CONFIG_RPS */
3795 return 0;
3796}
3797
99bbc707
WB
3798#ifdef CONFIG_NET_FLOW_LIMIT
3799int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3800#endif
3801
3802static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3803{
3804#ifdef CONFIG_NET_FLOW_LIMIT
3805 struct sd_flow_limit *fl;
3806 struct softnet_data *sd;
3807 unsigned int old_flow, new_flow;
3808
3809 if (qlen < (netdev_max_backlog >> 1))
3810 return false;
3811
903ceff7 3812 sd = this_cpu_ptr(&softnet_data);
99bbc707
WB
3813
3814 rcu_read_lock();
3815 fl = rcu_dereference(sd->flow_limit);
3816 if (fl) {
3958afa1 3817 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
99bbc707
WB
3818 old_flow = fl->history[fl->history_head];
3819 fl->history[fl->history_head] = new_flow;
3820
3821 fl->history_head++;
3822 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3823
3824 if (likely(fl->buckets[old_flow]))
3825 fl->buckets[old_flow]--;
3826
3827 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3828 fl->count++;
3829 rcu_read_unlock();
3830 return true;
3831 }
3832 }
3833 rcu_read_unlock();
3834#endif
3835 return false;
3836}
3837
0a9627f2
TH
3838/*
3839 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3840 * queue (may be a remote CPU queue).
3841 */
fec5e652
TH
3842static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3843 unsigned int *qtail)
0a9627f2 3844{
e36fa2f7 3845 struct softnet_data *sd;
0a9627f2 3846 unsigned long flags;
99bbc707 3847 unsigned int qlen;
0a9627f2 3848
e36fa2f7 3849 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
3850
3851 local_irq_save(flags);
0a9627f2 3852
e36fa2f7 3853 rps_lock(sd);
e9e4dd32
JA
3854 if (!netif_running(skb->dev))
3855 goto drop;
99bbc707
WB
3856 qlen = skb_queue_len(&sd->input_pkt_queue);
3857 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
e008f3f0 3858 if (qlen) {
0a9627f2 3859enqueue:
e36fa2f7 3860 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 3861 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 3862 rps_unlock(sd);
152102c7 3863 local_irq_restore(flags);
0a9627f2
TH
3864 return NET_RX_SUCCESS;
3865 }
3866
ebda37c2
ED
3867 /* Schedule NAPI for backlog device
3868 * We can use non atomic operation since we own the queue lock
3869 */
3870 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 3871 if (!rps_ipi_queued(sd))
eecfd7c4 3872 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
3873 }
3874 goto enqueue;
3875 }
3876
e9e4dd32 3877drop:
dee42870 3878 sd->dropped++;
e36fa2f7 3879 rps_unlock(sd);
0a9627f2 3880
0a9627f2
TH
3881 local_irq_restore(flags);
3882
caf586e5 3883 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
3884 kfree_skb(skb);
3885 return NET_RX_DROP;
3886}
1da177e4 3887
d4455169
JF
3888static u32 netif_receive_generic_xdp(struct sk_buff *skb,
3889 struct bpf_prog *xdp_prog)
3890{
de8f3a83 3891 u32 metalen, act = XDP_DROP;
d4455169 3892 struct xdp_buff xdp;
d4455169
JF
3893 void *orig_data;
3894 int hlen, off;
3895 u32 mac_len;
3896
3897 /* Reinjected packets coming from act_mirred or similar should
3898 * not get XDP generic processing.
3899 */
3900 if (skb_cloned(skb))
3901 return XDP_PASS;
3902
de8f3a83
DB
3903 /* XDP packets must be linear and must have sufficient headroom
3904 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
3905 * native XDP provides, thus we need to do it here as well.
3906 */
3907 if (skb_is_nonlinear(skb) ||
3908 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
3909 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
3910 int troom = skb->tail + skb->data_len - skb->end;
3911
3912 /* In case we have to go down the path and also linearize,
3913 * then lets do the pskb_expand_head() work just once here.
3914 */
3915 if (pskb_expand_head(skb,
3916 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
3917 troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
3918 goto do_drop;
2d17d8d7 3919 if (skb_linearize(skb))
de8f3a83
DB
3920 goto do_drop;
3921 }
d4455169
JF
3922
3923 /* The XDP program wants to see the packet starting at the MAC
3924 * header.
3925 */
3926 mac_len = skb->data - skb_mac_header(skb);
3927 hlen = skb_headlen(skb) + mac_len;
3928 xdp.data = skb->data - mac_len;
de8f3a83 3929 xdp.data_meta = xdp.data;
d4455169
JF
3930 xdp.data_end = xdp.data + hlen;
3931 xdp.data_hard_start = skb->data - skb_headroom(skb);
3932 orig_data = xdp.data;
3933
3934 act = bpf_prog_run_xdp(xdp_prog, &xdp);
3935
3936 off = xdp.data - orig_data;
3937 if (off > 0)
3938 __skb_pull(skb, off);
3939 else if (off < 0)
3940 __skb_push(skb, -off);
92dd5452 3941 skb->mac_header += off;
d4455169
JF
3942
3943 switch (act) {
6103aa96 3944 case XDP_REDIRECT:
d4455169
JF
3945 case XDP_TX:
3946 __skb_push(skb, mac_len);
de8f3a83 3947 break;
d4455169 3948 case XDP_PASS:
de8f3a83
DB
3949 metalen = xdp.data - xdp.data_meta;
3950 if (metalen)
3951 skb_metadata_set(skb, metalen);
d4455169 3952 break;
d4455169
JF
3953 default:
3954 bpf_warn_invalid_xdp_action(act);
3955 /* fall through */
3956 case XDP_ABORTED:
3957 trace_xdp_exception(skb->dev, xdp_prog, act);
3958 /* fall through */
3959 case XDP_DROP:
3960 do_drop:
3961 kfree_skb(skb);
3962 break;
3963 }
3964
3965 return act;
3966}
3967
3968/* When doing generic XDP we have to bypass the qdisc layer and the
3969 * network taps in order to match in-driver-XDP behavior.
3970 */
7c497478 3971void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
d4455169
JF
3972{
3973 struct net_device *dev = skb->dev;
3974 struct netdev_queue *txq;
3975 bool free_skb = true;
3976 int cpu, rc;
3977
3978 txq = netdev_pick_tx(dev, skb, NULL);
3979 cpu = smp_processor_id();
3980 HARD_TX_LOCK(dev, txq, cpu);
3981 if (!netif_xmit_stopped(txq)) {
3982 rc = netdev_start_xmit(skb, dev, txq, 0);
3983 if (dev_xmit_complete(rc))
3984 free_skb = false;
3985 }
3986 HARD_TX_UNLOCK(dev, txq);
3987 if (free_skb) {
3988 trace_xdp_exception(dev, xdp_prog, XDP_TX);
3989 kfree_skb(skb);
3990 }
3991}
7c497478 3992EXPORT_SYMBOL_GPL(generic_xdp_tx);
d4455169
JF
3993
3994static struct static_key generic_xdp_needed __read_mostly;
3995
7c497478 3996int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
d4455169 3997{
d4455169
JF
3998 if (xdp_prog) {
3999 u32 act = netif_receive_generic_xdp(skb, xdp_prog);
6103aa96 4000 int err;
d4455169
JF
4001
4002 if (act != XDP_PASS) {
6103aa96
JF
4003 switch (act) {
4004 case XDP_REDIRECT:
2facaad6
JDB
4005 err = xdp_do_generic_redirect(skb->dev, skb,
4006 xdp_prog);
6103aa96
JF
4007 if (err)
4008 goto out_redir;
4009 /* fallthru to submit skb */
4010 case XDP_TX:
d4455169 4011 generic_xdp_tx(skb, xdp_prog);
6103aa96
JF
4012 break;
4013 }
d4455169
JF
4014 return XDP_DROP;
4015 }
4016 }
4017 return XDP_PASS;
6103aa96 4018out_redir:
6103aa96
JF
4019 kfree_skb(skb);
4020 return XDP_DROP;
d4455169 4021}
7c497478 4022EXPORT_SYMBOL_GPL(do_xdp_generic);
d4455169 4023
ae78dbfa 4024static int netif_rx_internal(struct sk_buff *skb)
1da177e4 4025{
b0e28f1e 4026 int ret;
1da177e4 4027
588f0330 4028 net_timestamp_check(netdev_tstamp_prequeue, skb);
1da177e4 4029
cf66ba58 4030 trace_netif_rx(skb);
d4455169
JF
4031
4032 if (static_key_false(&generic_xdp_needed)) {
bbbe211c
JF
4033 int ret;
4034
4035 preempt_disable();
4036 rcu_read_lock();
4037 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4038 rcu_read_unlock();
4039 preempt_enable();
d4455169 4040
6103aa96
JF
4041 /* Consider XDP consuming the packet a success from
4042 * the netdev point of view we do not want to count
4043 * this as an error.
4044 */
d4455169 4045 if (ret != XDP_PASS)
6103aa96 4046 return NET_RX_SUCCESS;
d4455169
JF
4047 }
4048
df334545 4049#ifdef CONFIG_RPS
c5905afb 4050 if (static_key_false(&rps_needed)) {
fec5e652 4051 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
4052 int cpu;
4053
cece1945 4054 preempt_disable();
b0e28f1e 4055 rcu_read_lock();
fec5e652
TH
4056
4057 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
4058 if (cpu < 0)
4059 cpu = smp_processor_id();
fec5e652
TH
4060
4061 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4062
b0e28f1e 4063 rcu_read_unlock();
cece1945 4064 preempt_enable();
adc9300e
ED
4065 } else
4066#endif
fec5e652
TH
4067 {
4068 unsigned int qtail;
f4563a75 4069
fec5e652
TH
4070 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4071 put_cpu();
4072 }
b0e28f1e 4073 return ret;
1da177e4 4074}
ae78dbfa
BH
4075
4076/**
4077 * netif_rx - post buffer to the network code
4078 * @skb: buffer to post
4079 *
4080 * This function receives a packet from a device driver and queues it for
4081 * the upper (protocol) levels to process. It always succeeds. The buffer
4082 * may be dropped during processing for congestion control or by the
4083 * protocol layers.
4084 *
4085 * return values:
4086 * NET_RX_SUCCESS (no congestion)
4087 * NET_RX_DROP (packet was dropped)
4088 *
4089 */
4090
4091int netif_rx(struct sk_buff *skb)
4092{
4093 trace_netif_rx_entry(skb);
4094
4095 return netif_rx_internal(skb);
4096}
d1b19dff 4097EXPORT_SYMBOL(netif_rx);
1da177e4
LT
4098
4099int netif_rx_ni(struct sk_buff *skb)
4100{
4101 int err;
4102
ae78dbfa
BH
4103 trace_netif_rx_ni_entry(skb);
4104
1da177e4 4105 preempt_disable();
ae78dbfa 4106 err = netif_rx_internal(skb);
1da177e4
LT
4107 if (local_softirq_pending())
4108 do_softirq();
4109 preempt_enable();
4110
4111 return err;
4112}
1da177e4
LT
4113EXPORT_SYMBOL(netif_rx_ni);
4114
0766f788 4115static __latent_entropy void net_tx_action(struct softirq_action *h)
1da177e4 4116{
903ceff7 4117 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
1da177e4
LT
4118
4119 if (sd->completion_queue) {
4120 struct sk_buff *clist;
4121
4122 local_irq_disable();
4123 clist = sd->completion_queue;
4124 sd->completion_queue = NULL;
4125 local_irq_enable();
4126
4127 while (clist) {
4128 struct sk_buff *skb = clist;
f4563a75 4129
1da177e4
LT
4130 clist = clist->next;
4131
63354797 4132 WARN_ON(refcount_read(&skb->users));
e6247027
ED
4133 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4134 trace_consume_skb(skb);
4135 else
4136 trace_kfree_skb(skb, net_tx_action);
15fad714
JDB
4137
4138 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4139 __kfree_skb(skb);
4140 else
4141 __kfree_skb_defer(skb);
1da177e4 4142 }
15fad714
JDB
4143
4144 __kfree_skb_flush();
1da177e4
LT
4145 }
4146
4147 if (sd->output_queue) {
37437bb2 4148 struct Qdisc *head;
1da177e4
LT
4149
4150 local_irq_disable();
4151 head = sd->output_queue;
4152 sd->output_queue = NULL;
a9cbd588 4153 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
4154 local_irq_enable();
4155
4156 while (head) {
37437bb2
DM
4157 struct Qdisc *q = head;
4158 spinlock_t *root_lock;
4159
1da177e4
LT
4160 head = head->next_sched;
4161
5fb66229 4162 root_lock = qdisc_lock(q);
3bcb846c
ED
4163 spin_lock(root_lock);
4164 /* We need to make sure head->next_sched is read
4165 * before clearing __QDISC_STATE_SCHED
4166 */
4167 smp_mb__before_atomic();
4168 clear_bit(__QDISC_STATE_SCHED, &q->state);
4169 qdisc_run(q);
4170 spin_unlock(root_lock);
1da177e4
LT
4171 }
4172 }
4173}
4174
181402a5 4175#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
da678292
MM
4176/* This hook is defined here for ATM LANE */
4177int (*br_fdb_test_addr_hook)(struct net_device *dev,
4178 unsigned char *addr) __read_mostly;
4fb019a0 4179EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 4180#endif
1da177e4 4181
1f211a1b
DB
4182static inline struct sk_buff *
4183sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4184 struct net_device *orig_dev)
f697c3e8 4185{
e7582bab 4186#ifdef CONFIG_NET_CLS_ACT
46209401 4187 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
d2788d34 4188 struct tcf_result cl_res;
24824a09 4189
c9e99fd0
DB
4190 /* If there's at least one ingress present somewhere (so
4191 * we get here via enabled static key), remaining devices
4192 * that are not configured with an ingress qdisc will bail
d2788d34 4193 * out here.
c9e99fd0 4194 */
46209401 4195 if (!miniq)
4577139b 4196 return skb;
46209401 4197
f697c3e8
HX
4198 if (*pt_prev) {
4199 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4200 *pt_prev = NULL;
1da177e4
LT
4201 }
4202
3365495c 4203 qdisc_skb_cb(skb)->pkt_len = skb->len;
8dc07fdb 4204 skb->tc_at_ingress = 1;
46209401 4205 mini_qdisc_bstats_cpu_update(miniq, skb);
c9e99fd0 4206
46209401 4207 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
d2788d34
DB
4208 case TC_ACT_OK:
4209 case TC_ACT_RECLASSIFY:
4210 skb->tc_index = TC_H_MIN(cl_res.classid);
4211 break;
4212 case TC_ACT_SHOT:
46209401 4213 mini_qdisc_qstats_cpu_drop(miniq);
8a3a4c6e
ED
4214 kfree_skb(skb);
4215 return NULL;
d2788d34
DB
4216 case TC_ACT_STOLEN:
4217 case TC_ACT_QUEUED:
e25ea21f 4218 case TC_ACT_TRAP:
8a3a4c6e 4219 consume_skb(skb);
d2788d34 4220 return NULL;
27b29f63
AS
4221 case TC_ACT_REDIRECT:
4222 /* skb_mac_header check was done by cls/act_bpf, so
4223 * we can safely push the L2 header back before
4224 * redirecting to another netdev
4225 */
4226 __skb_push(skb, skb->mac_len);
4227 skb_do_redirect(skb);
4228 return NULL;
d2788d34
DB
4229 default:
4230 break;
f697c3e8 4231 }
e7582bab 4232#endif /* CONFIG_NET_CLS_ACT */
e687ad60
PN
4233 return skb;
4234}
1da177e4 4235
24b27fc4
MB
4236/**
4237 * netdev_is_rx_handler_busy - check if receive handler is registered
4238 * @dev: device to check
4239 *
4240 * Check if a receive handler is already registered for a given device.
4241 * Return true if there one.
4242 *
4243 * The caller must hold the rtnl_mutex.
4244 */
4245bool netdev_is_rx_handler_busy(struct net_device *dev)
4246{
4247 ASSERT_RTNL();
4248 return dev && rtnl_dereference(dev->rx_handler);
4249}
4250EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4251
ab95bfe0
JP
4252/**
4253 * netdev_rx_handler_register - register receive handler
4254 * @dev: device to register a handler for
4255 * @rx_handler: receive handler to register
93e2c32b 4256 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0 4257 *
e227867f 4258 * Register a receive handler for a device. This handler will then be
ab95bfe0
JP
4259 * called from __netif_receive_skb. A negative errno code is returned
4260 * on a failure.
4261 *
4262 * The caller must hold the rtnl_mutex.
8a4eb573
JP
4263 *
4264 * For a general description of rx_handler, see enum rx_handler_result.
ab95bfe0
JP
4265 */
4266int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
4267 rx_handler_func_t *rx_handler,
4268 void *rx_handler_data)
ab95bfe0 4269{
1b7cd004 4270 if (netdev_is_rx_handler_busy(dev))
ab95bfe0
JP
4271 return -EBUSY;
4272
00cfec37 4273 /* Note: rx_handler_data must be set before rx_handler */
93e2c32b 4274 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
4275 rcu_assign_pointer(dev->rx_handler, rx_handler);
4276
4277 return 0;
4278}
4279EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4280
4281/**
4282 * netdev_rx_handler_unregister - unregister receive handler
4283 * @dev: device to unregister a handler from
4284 *
166ec369 4285 * Unregister a receive handler from a device.
ab95bfe0
JP
4286 *
4287 * The caller must hold the rtnl_mutex.
4288 */
4289void netdev_rx_handler_unregister(struct net_device *dev)
4290{
4291
4292 ASSERT_RTNL();
a9b3cd7f 4293 RCU_INIT_POINTER(dev->rx_handler, NULL);
00cfec37
ED
4294 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4295 * section has a guarantee to see a non NULL rx_handler_data
4296 * as well.
4297 */
4298 synchronize_net();
a9b3cd7f 4299 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
ab95bfe0
JP
4300}
4301EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4302
b4b9e355
MG
4303/*
4304 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4305 * the special handling of PFMEMALLOC skbs.
4306 */
4307static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4308{
4309 switch (skb->protocol) {
2b8837ae
JP
4310 case htons(ETH_P_ARP):
4311 case htons(ETH_P_IP):
4312 case htons(ETH_P_IPV6):
4313 case htons(ETH_P_8021Q):
4314 case htons(ETH_P_8021AD):
b4b9e355
MG
4315 return true;
4316 default:
4317 return false;
4318 }
4319}
4320
e687ad60
PN
4321static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4322 int *ret, struct net_device *orig_dev)
4323{
e7582bab 4324#ifdef CONFIG_NETFILTER_INGRESS
e687ad60 4325 if (nf_hook_ingress_active(skb)) {
2c1e2703
AC
4326 int ingress_retval;
4327
e687ad60
PN
4328 if (*pt_prev) {
4329 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4330 *pt_prev = NULL;
4331 }
4332
2c1e2703
AC
4333 rcu_read_lock();
4334 ingress_retval = nf_hook_ingress(skb);
4335 rcu_read_unlock();
4336 return ingress_retval;
e687ad60 4337 }
e7582bab 4338#endif /* CONFIG_NETFILTER_INGRESS */
e687ad60
PN
4339 return 0;
4340}
e687ad60 4341
9754e293 4342static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
1da177e4
LT
4343{
4344 struct packet_type *ptype, *pt_prev;
ab95bfe0 4345 rx_handler_func_t *rx_handler;
f2ccd8fa 4346 struct net_device *orig_dev;
8a4eb573 4347 bool deliver_exact = false;
1da177e4 4348 int ret = NET_RX_DROP;
252e3346 4349 __be16 type;
1da177e4 4350
588f0330 4351 net_timestamp_check(!netdev_tstamp_prequeue, skb);
81bbb3d4 4352
cf66ba58 4353 trace_netif_receive_skb(skb);
9b22ea56 4354
cc9bd5ce 4355 orig_dev = skb->dev;
8f903c70 4356
c1d2bbe1 4357 skb_reset_network_header(skb);
fda55eca
ED
4358 if (!skb_transport_header_was_set(skb))
4359 skb_reset_transport_header(skb);
0b5c9db1 4360 skb_reset_mac_len(skb);
1da177e4
LT
4361
4362 pt_prev = NULL;
4363
63d8ea7f 4364another_round:
b6858177 4365 skb->skb_iif = skb->dev->ifindex;
63d8ea7f
DM
4366
4367 __this_cpu_inc(softnet_data.processed);
4368
8ad227ff
PM
4369 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4370 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
0d5501c1 4371 skb = skb_vlan_untag(skb);
bcc6d479 4372 if (unlikely(!skb))
2c17d27c 4373 goto out;
bcc6d479
JP
4374 }
4375
e7246e12
WB
4376 if (skb_skip_tc_classify(skb))
4377 goto skip_classify;
1da177e4 4378
9754e293 4379 if (pfmemalloc)
b4b9e355
MG
4380 goto skip_taps;
4381
1da177e4 4382 list_for_each_entry_rcu(ptype, &ptype_all, list) {
7866a621
SN
4383 if (pt_prev)
4384 ret = deliver_skb(skb, pt_prev, orig_dev);
4385 pt_prev = ptype;
4386 }
4387
4388 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4389 if (pt_prev)
4390 ret = deliver_skb(skb, pt_prev, orig_dev);
4391 pt_prev = ptype;
1da177e4
LT
4392 }
4393
b4b9e355 4394skip_taps:
1cf51900 4395#ifdef CONFIG_NET_INGRESS
4577139b 4396 if (static_key_false(&ingress_needed)) {
1f211a1b 4397 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
4577139b 4398 if (!skb)
2c17d27c 4399 goto out;
e687ad60
PN
4400
4401 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
2c17d27c 4402 goto out;
4577139b 4403 }
1cf51900 4404#endif
a5135bcf 4405 skb_reset_tc(skb);
e7246e12 4406skip_classify:
9754e293 4407 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
b4b9e355
MG
4408 goto drop;
4409
df8a39de 4410 if (skb_vlan_tag_present(skb)) {
2425717b
JF
4411 if (pt_prev) {
4412 ret = deliver_skb(skb, pt_prev, orig_dev);
4413 pt_prev = NULL;
4414 }
48cc32d3 4415 if (vlan_do_receive(&skb))
2425717b
JF
4416 goto another_round;
4417 else if (unlikely(!skb))
2c17d27c 4418 goto out;
2425717b
JF
4419 }
4420
48cc32d3 4421 rx_handler = rcu_dereference(skb->dev->rx_handler);
ab95bfe0
JP
4422 if (rx_handler) {
4423 if (pt_prev) {
4424 ret = deliver_skb(skb, pt_prev, orig_dev);
4425 pt_prev = NULL;
4426 }
8a4eb573
JP
4427 switch (rx_handler(&skb)) {
4428 case RX_HANDLER_CONSUMED:
3bc1b1ad 4429 ret = NET_RX_SUCCESS;
2c17d27c 4430 goto out;
8a4eb573 4431 case RX_HANDLER_ANOTHER:
63d8ea7f 4432 goto another_round;
8a4eb573
JP
4433 case RX_HANDLER_EXACT:
4434 deliver_exact = true;
4435 case RX_HANDLER_PASS:
4436 break;
4437 default:
4438 BUG();
4439 }
ab95bfe0 4440 }
1da177e4 4441
df8a39de
JP
4442 if (unlikely(skb_vlan_tag_present(skb))) {
4443 if (skb_vlan_tag_get_id(skb))
d4b812de
ED
4444 skb->pkt_type = PACKET_OTHERHOST;
4445 /* Note: we might in the future use prio bits
4446 * and set skb->priority like in vlan_do_receive()
4447 * For the time being, just ignore Priority Code Point
4448 */
4449 skb->vlan_tci = 0;
4450 }
48cc32d3 4451
7866a621
SN
4452 type = skb->protocol;
4453
63d8ea7f 4454 /* deliver only exact match when indicated */
7866a621
SN
4455 if (likely(!deliver_exact)) {
4456 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4457 &ptype_base[ntohs(type) &
4458 PTYPE_HASH_MASK]);
4459 }
1f3c8804 4460
7866a621
SN
4461 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4462 &orig_dev->ptype_specific);
4463
4464 if (unlikely(skb->dev != orig_dev)) {
4465 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4466 &skb->dev->ptype_specific);
1da177e4
LT
4467 }
4468
4469 if (pt_prev) {
1f8b977a 4470 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
0e698bf6 4471 goto drop;
1080e512
MT
4472 else
4473 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4 4474 } else {
b4b9e355 4475drop:
6e7333d3
JW
4476 if (!deliver_exact)
4477 atomic_long_inc(&skb->dev->rx_dropped);
4478 else
4479 atomic_long_inc(&skb->dev->rx_nohandler);
1da177e4
LT
4480 kfree_skb(skb);
4481 /* Jamal, now you will not able to escape explaining
4482 * me how you were going to use this. :-)
4483 */
4484 ret = NET_RX_DROP;
4485 }
4486
2c17d27c 4487out:
9754e293
DM
4488 return ret;
4489}
4490
1c601d82
JDB
4491/**
4492 * netif_receive_skb_core - special purpose version of netif_receive_skb
4493 * @skb: buffer to process
4494 *
4495 * More direct receive version of netif_receive_skb(). It should
4496 * only be used by callers that have a need to skip RPS and Generic XDP.
4497 * Caller must also take care of handling if (page_is_)pfmemalloc.
4498 *
4499 * This function may only be called from softirq context and interrupts
4500 * should be enabled.
4501 *
4502 * Return values (usually ignored):
4503 * NET_RX_SUCCESS: no congestion
4504 * NET_RX_DROP: packet was dropped
4505 */
4506int netif_receive_skb_core(struct sk_buff *skb)
4507{
4508 int ret;
4509
4510 rcu_read_lock();
4511 ret = __netif_receive_skb_core(skb, false);
4512 rcu_read_unlock();
4513
4514 return ret;
4515}
4516EXPORT_SYMBOL(netif_receive_skb_core);
4517
9754e293
DM
4518static int __netif_receive_skb(struct sk_buff *skb)
4519{
4520 int ret;
4521
4522 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
f1083048 4523 unsigned int noreclaim_flag;
9754e293
DM
4524
4525 /*
4526 * PFMEMALLOC skbs are special, they should
4527 * - be delivered to SOCK_MEMALLOC sockets only
4528 * - stay away from userspace
4529 * - have bounded memory usage
4530 *
4531 * Use PF_MEMALLOC as this saves us from propagating the allocation
4532 * context down to all allocation sites.
4533 */
f1083048 4534 noreclaim_flag = memalloc_noreclaim_save();
9754e293 4535 ret = __netif_receive_skb_core(skb, true);
f1083048 4536 memalloc_noreclaim_restore(noreclaim_flag);
9754e293
DM
4537 } else
4538 ret = __netif_receive_skb_core(skb, false);
4539
1da177e4
LT
4540 return ret;
4541}
0a9627f2 4542
f4e63525 4543static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
b5cdae32 4544{
58038695 4545 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
b5cdae32
DM
4546 struct bpf_prog *new = xdp->prog;
4547 int ret = 0;
4548
4549 switch (xdp->command) {
58038695 4550 case XDP_SETUP_PROG:
b5cdae32
DM
4551 rcu_assign_pointer(dev->xdp_prog, new);
4552 if (old)
4553 bpf_prog_put(old);
4554
4555 if (old && !new) {
4556 static_key_slow_dec(&generic_xdp_needed);
4557 } else if (new && !old) {
4558 static_key_slow_inc(&generic_xdp_needed);
4559 dev_disable_lro(dev);
4560 }
4561 break;
b5cdae32
DM
4562
4563 case XDP_QUERY_PROG:
58038695
MKL
4564 xdp->prog_attached = !!old;
4565 xdp->prog_id = old ? old->aux->id : 0;
b5cdae32
DM
4566 break;
4567
4568 default:
4569 ret = -EINVAL;
4570 break;
4571 }
4572
4573 return ret;
4574}
4575
ae78dbfa 4576static int netif_receive_skb_internal(struct sk_buff *skb)
0a9627f2 4577{
2c17d27c
JA
4578 int ret;
4579
588f0330 4580 net_timestamp_check(netdev_tstamp_prequeue, skb);
3b098e2d 4581
c1f19b51
RC
4582 if (skb_defer_rx_timestamp(skb))
4583 return NET_RX_SUCCESS;
4584
b5cdae32 4585 if (static_key_false(&generic_xdp_needed)) {
bbbe211c 4586 int ret;
b5cdae32 4587
bbbe211c
JF
4588 preempt_disable();
4589 rcu_read_lock();
4590 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4591 rcu_read_unlock();
4592 preempt_enable();
4593
4594 if (ret != XDP_PASS)
d4455169 4595 return NET_RX_DROP;
b5cdae32
DM
4596 }
4597
bbbe211c 4598 rcu_read_lock();
df334545 4599#ifdef CONFIG_RPS
c5905afb 4600 if (static_key_false(&rps_needed)) {
3b098e2d 4601 struct rps_dev_flow voidflow, *rflow = &voidflow;
2c17d27c 4602 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 4603
3b098e2d
ED
4604 if (cpu >= 0) {
4605 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4606 rcu_read_unlock();
adc9300e 4607 return ret;
3b098e2d 4608 }
fec5e652 4609 }
1e94d72f 4610#endif
2c17d27c
JA
4611 ret = __netif_receive_skb(skb);
4612 rcu_read_unlock();
4613 return ret;
0a9627f2 4614}
ae78dbfa
BH
4615
4616/**
4617 * netif_receive_skb - process receive buffer from network
4618 * @skb: buffer to process
4619 *
4620 * netif_receive_skb() is the main receive data processing function.
4621 * It always succeeds. The buffer may be dropped during processing
4622 * for congestion control or by the protocol layers.
4623 *
4624 * This function may only be called from softirq context and interrupts
4625 * should be enabled.
4626 *
4627 * Return values (usually ignored):
4628 * NET_RX_SUCCESS: no congestion
4629 * NET_RX_DROP: packet was dropped
4630 */
04eb4489 4631int netif_receive_skb(struct sk_buff *skb)
ae78dbfa
BH
4632{
4633 trace_netif_receive_skb_entry(skb);
4634
4635 return netif_receive_skb_internal(skb);
4636}
04eb4489 4637EXPORT_SYMBOL(netif_receive_skb);
1da177e4 4638
41852497 4639DEFINE_PER_CPU(struct work_struct, flush_works);
145dd5f9
PA
4640
4641/* Network device is going away, flush any packets still pending */
4642static void flush_backlog(struct work_struct *work)
6e583ce5 4643{
6e583ce5 4644 struct sk_buff *skb, *tmp;
145dd5f9
PA
4645 struct softnet_data *sd;
4646
4647 local_bh_disable();
4648 sd = this_cpu_ptr(&softnet_data);
6e583ce5 4649
145dd5f9 4650 local_irq_disable();
e36fa2f7 4651 rps_lock(sd);
6e7676c1 4652 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
41852497 4653 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
e36fa2f7 4654 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 4655 kfree_skb(skb);
76cc8b13 4656 input_queue_head_incr(sd);
6e583ce5 4657 }
6e7676c1 4658 }
e36fa2f7 4659 rps_unlock(sd);
145dd5f9 4660 local_irq_enable();
6e7676c1
CG
4661
4662 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
41852497 4663 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
6e7676c1
CG
4664 __skb_unlink(skb, &sd->process_queue);
4665 kfree_skb(skb);
76cc8b13 4666 input_queue_head_incr(sd);
6e7676c1
CG
4667 }
4668 }
145dd5f9
PA
4669 local_bh_enable();
4670}
4671
41852497 4672static void flush_all_backlogs(void)
145dd5f9
PA
4673{
4674 unsigned int cpu;
4675
4676 get_online_cpus();
4677
41852497
ED
4678 for_each_online_cpu(cpu)
4679 queue_work_on(cpu, system_highpri_wq,
4680 per_cpu_ptr(&flush_works, cpu));
145dd5f9
PA
4681
4682 for_each_online_cpu(cpu)
41852497 4683 flush_work(per_cpu_ptr(&flush_works, cpu));
145dd5f9
PA
4684
4685 put_online_cpus();
6e583ce5
SH
4686}
4687
d565b0a1
HX
4688static int napi_gro_complete(struct sk_buff *skb)
4689{
22061d80 4690 struct packet_offload *ptype;
d565b0a1 4691 __be16 type = skb->protocol;
22061d80 4692 struct list_head *head = &offload_base;
d565b0a1
HX
4693 int err = -ENOENT;
4694
c3c7c254
ED
4695 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4696
fc59f9a3
HX
4697 if (NAPI_GRO_CB(skb)->count == 1) {
4698 skb_shinfo(skb)->gso_size = 0;
d565b0a1 4699 goto out;
fc59f9a3 4700 }
d565b0a1
HX
4701
4702 rcu_read_lock();
4703 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4704 if (ptype->type != type || !ptype->callbacks.gro_complete)
d565b0a1
HX
4705 continue;
4706
299603e8 4707 err = ptype->callbacks.gro_complete(skb, 0);
d565b0a1
HX
4708 break;
4709 }
4710 rcu_read_unlock();
4711
4712 if (err) {
4713 WARN_ON(&ptype->list == head);
4714 kfree_skb(skb);
4715 return NET_RX_SUCCESS;
4716 }
4717
4718out:
ae78dbfa 4719 return netif_receive_skb_internal(skb);
d565b0a1
HX
4720}
4721
2e71a6f8
ED
4722/* napi->gro_list contains packets ordered by age.
4723 * youngest packets at the head of it.
4724 * Complete skbs in reverse order to reduce latencies.
4725 */
4726void napi_gro_flush(struct napi_struct *napi, bool flush_old)
d565b0a1 4727{
2e71a6f8 4728 struct sk_buff *skb, *prev = NULL;
d565b0a1 4729
2e71a6f8
ED
4730 /* scan list and build reverse chain */
4731 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4732 skb->prev = prev;
4733 prev = skb;
4734 }
4735
4736 for (skb = prev; skb; skb = prev) {
d565b0a1 4737 skb->next = NULL;
2e71a6f8
ED
4738
4739 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4740 return;
4741
4742 prev = skb->prev;
d565b0a1 4743 napi_gro_complete(skb);
2e71a6f8 4744 napi->gro_count--;
d565b0a1
HX
4745 }
4746
4747 napi->gro_list = NULL;
4748}
86cac58b 4749EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 4750
89c5fa33
ED
4751static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4752{
4753 struct sk_buff *p;
4754 unsigned int maclen = skb->dev->hard_header_len;
0b4cec8c 4755 u32 hash = skb_get_hash_raw(skb);
89c5fa33
ED
4756
4757 for (p = napi->gro_list; p; p = p->next) {
4758 unsigned long diffs;
4759
0b4cec8c
TH
4760 NAPI_GRO_CB(p)->flush = 0;
4761
4762 if (hash != skb_get_hash_raw(p)) {
4763 NAPI_GRO_CB(p)->same_flow = 0;
4764 continue;
4765 }
4766
89c5fa33
ED
4767 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4768 diffs |= p->vlan_tci ^ skb->vlan_tci;
ce87fc6c 4769 diffs |= skb_metadata_dst_cmp(p, skb);
de8f3a83 4770 diffs |= skb_metadata_differs(p, skb);
89c5fa33
ED
4771 if (maclen == ETH_HLEN)
4772 diffs |= compare_ether_header(skb_mac_header(p),
a50e233c 4773 skb_mac_header(skb));
89c5fa33
ED
4774 else if (!diffs)
4775 diffs = memcmp(skb_mac_header(p),
a50e233c 4776 skb_mac_header(skb),
89c5fa33
ED
4777 maclen);
4778 NAPI_GRO_CB(p)->same_flow = !diffs;
89c5fa33
ED
4779 }
4780}
4781
299603e8
JC
4782static void skb_gro_reset_offset(struct sk_buff *skb)
4783{
4784 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4785 const skb_frag_t *frag0 = &pinfo->frags[0];
4786
4787 NAPI_GRO_CB(skb)->data_offset = 0;
4788 NAPI_GRO_CB(skb)->frag0 = NULL;
4789 NAPI_GRO_CB(skb)->frag0_len = 0;
4790
4791 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4792 pinfo->nr_frags &&
4793 !PageHighMem(skb_frag_page(frag0))) {
4794 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
7cfd5fd5
ED
4795 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
4796 skb_frag_size(frag0),
4797 skb->end - skb->tail);
89c5fa33
ED
4798 }
4799}
4800
a50e233c
ED
4801static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4802{
4803 struct skb_shared_info *pinfo = skb_shinfo(skb);
4804
4805 BUG_ON(skb->end - skb->tail < grow);
4806
4807 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4808
4809 skb->data_len -= grow;
4810 skb->tail += grow;
4811
4812 pinfo->frags[0].page_offset += grow;
4813 skb_frag_size_sub(&pinfo->frags[0], grow);
4814
4815 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4816 skb_frag_unref(skb, 0);
4817 memmove(pinfo->frags, pinfo->frags + 1,
4818 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4819 }
4820}
4821
bb728820 4822static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
4823{
4824 struct sk_buff **pp = NULL;
22061d80 4825 struct packet_offload *ptype;
d565b0a1 4826 __be16 type = skb->protocol;
22061d80 4827 struct list_head *head = &offload_base;
0da2afd5 4828 int same_flow;
5b252f0c 4829 enum gro_result ret;
a50e233c 4830 int grow;
d565b0a1 4831
b5cdae32 4832 if (netif_elide_gro(skb->dev))
d565b0a1
HX
4833 goto normal;
4834
89c5fa33
ED
4835 gro_list_prepare(napi, skb);
4836
d565b0a1
HX
4837 rcu_read_lock();
4838 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4839 if (ptype->type != type || !ptype->callbacks.gro_receive)
d565b0a1
HX
4840 continue;
4841
86911732 4842 skb_set_network_header(skb, skb_gro_offset(skb));
efd9450e 4843 skb_reset_mac_len(skb);
d565b0a1 4844 NAPI_GRO_CB(skb)->same_flow = 0;
d61d072e 4845 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
5d38a079 4846 NAPI_GRO_CB(skb)->free = 0;
fac8e0f5 4847 NAPI_GRO_CB(skb)->encap_mark = 0;
fcd91dd4 4848 NAPI_GRO_CB(skb)->recursion_counter = 0;
a0ca153f 4849 NAPI_GRO_CB(skb)->is_fou = 0;
1530545e 4850 NAPI_GRO_CB(skb)->is_atomic = 1;
15e2396d 4851 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
d565b0a1 4852
662880f4
TH
4853 /* Setup for GRO checksum validation */
4854 switch (skb->ip_summed) {
4855 case CHECKSUM_COMPLETE:
4856 NAPI_GRO_CB(skb)->csum = skb->csum;
4857 NAPI_GRO_CB(skb)->csum_valid = 1;
4858 NAPI_GRO_CB(skb)->csum_cnt = 0;
4859 break;
4860 case CHECKSUM_UNNECESSARY:
4861 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4862 NAPI_GRO_CB(skb)->csum_valid = 0;
4863 break;
4864 default:
4865 NAPI_GRO_CB(skb)->csum_cnt = 0;
4866 NAPI_GRO_CB(skb)->csum_valid = 0;
4867 }
d565b0a1 4868
f191a1d1 4869 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
d565b0a1
HX
4870 break;
4871 }
4872 rcu_read_unlock();
4873
4874 if (&ptype->list == head)
4875 goto normal;
4876
25393d3f
SK
4877 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
4878 ret = GRO_CONSUMED;
4879 goto ok;
4880 }
4881
0da2afd5 4882 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 4883 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 4884
d565b0a1
HX
4885 if (pp) {
4886 struct sk_buff *nskb = *pp;
4887
4888 *pp = nskb->next;
4889 nskb->next = NULL;
4890 napi_gro_complete(nskb);
4ae5544f 4891 napi->gro_count--;
d565b0a1
HX
4892 }
4893
0da2afd5 4894 if (same_flow)
d565b0a1
HX
4895 goto ok;
4896
600adc18 4897 if (NAPI_GRO_CB(skb)->flush)
d565b0a1 4898 goto normal;
d565b0a1 4899
600adc18
ED
4900 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4901 struct sk_buff *nskb = napi->gro_list;
4902
4903 /* locate the end of the list to select the 'oldest' flow */
4904 while (nskb->next) {
4905 pp = &nskb->next;
4906 nskb = *pp;
4907 }
4908 *pp = NULL;
4909 nskb->next = NULL;
4910 napi_gro_complete(nskb);
4911 } else {
4912 napi->gro_count++;
4913 }
d565b0a1 4914 NAPI_GRO_CB(skb)->count = 1;
2e71a6f8 4915 NAPI_GRO_CB(skb)->age = jiffies;
29e98242 4916 NAPI_GRO_CB(skb)->last = skb;
86911732 4917 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
4918 skb->next = napi->gro_list;
4919 napi->gro_list = skb;
5d0d9be8 4920 ret = GRO_HELD;
d565b0a1 4921
ad0f9904 4922pull:
a50e233c
ED
4923 grow = skb_gro_offset(skb) - skb_headlen(skb);
4924 if (grow > 0)
4925 gro_pull_from_frag0(skb, grow);
d565b0a1 4926ok:
5d0d9be8 4927 return ret;
d565b0a1
HX
4928
4929normal:
ad0f9904
HX
4930 ret = GRO_NORMAL;
4931 goto pull;
5d38a079 4932}
96e93eab 4933
bf5a755f
JC
4934struct packet_offload *gro_find_receive_by_type(__be16 type)
4935{
4936 struct list_head *offload_head = &offload_base;
4937 struct packet_offload *ptype;
4938
4939 list_for_each_entry_rcu(ptype, offload_head, list) {
4940 if (ptype->type != type || !ptype->callbacks.gro_receive)
4941 continue;
4942 return ptype;
4943 }
4944 return NULL;
4945}
e27a2f83 4946EXPORT_SYMBOL(gro_find_receive_by_type);
bf5a755f
JC
4947
4948struct packet_offload *gro_find_complete_by_type(__be16 type)
4949{
4950 struct list_head *offload_head = &offload_base;
4951 struct packet_offload *ptype;
4952
4953 list_for_each_entry_rcu(ptype, offload_head, list) {
4954 if (ptype->type != type || !ptype->callbacks.gro_complete)
4955 continue;
4956 return ptype;
4957 }
4958 return NULL;
4959}
e27a2f83 4960EXPORT_SYMBOL(gro_find_complete_by_type);
5d38a079 4961
e44699d2
MK
4962static void napi_skb_free_stolen_head(struct sk_buff *skb)
4963{
4964 skb_dst_drop(skb);
4965 secpath_reset(skb);
4966 kmem_cache_free(skbuff_head_cache, skb);
4967}
4968
bb728820 4969static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 4970{
5d0d9be8
HX
4971 switch (ret) {
4972 case GRO_NORMAL:
ae78dbfa 4973 if (netif_receive_skb_internal(skb))
c7c4b3b6
BH
4974 ret = GRO_DROP;
4975 break;
5d38a079 4976
5d0d9be8 4977 case GRO_DROP:
5d38a079
HX
4978 kfree_skb(skb);
4979 break;
5b252f0c 4980
daa86548 4981 case GRO_MERGED_FREE:
e44699d2
MK
4982 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4983 napi_skb_free_stolen_head(skb);
4984 else
d7e8883c 4985 __kfree_skb(skb);
daa86548
ED
4986 break;
4987
5b252f0c
BH
4988 case GRO_HELD:
4989 case GRO_MERGED:
25393d3f 4990 case GRO_CONSUMED:
5b252f0c 4991 break;
5d38a079
HX
4992 }
4993
c7c4b3b6 4994 return ret;
5d0d9be8 4995}
5d0d9be8 4996
c7c4b3b6 4997gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 4998{
93f93a44 4999 skb_mark_napi_id(skb, napi);
ae78dbfa 5000 trace_napi_gro_receive_entry(skb);
86911732 5001
a50e233c
ED
5002 skb_gro_reset_offset(skb);
5003
89c5fa33 5004 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
d565b0a1
HX
5005}
5006EXPORT_SYMBOL(napi_gro_receive);
5007
d0c2b0d2 5008static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 5009{
93a35f59
ED
5010 if (unlikely(skb->pfmemalloc)) {
5011 consume_skb(skb);
5012 return;
5013 }
96e93eab 5014 __skb_pull(skb, skb_headlen(skb));
2a2a459e
ED
5015 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
5016 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3701e513 5017 skb->vlan_tci = 0;
66c46d74 5018 skb->dev = napi->dev;
6d152e23 5019 skb->skb_iif = 0;
c3caf119
JC
5020 skb->encapsulation = 0;
5021 skb_shinfo(skb)->gso_type = 0;
e33d0ba8 5022 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
f991bb9d 5023 secpath_reset(skb);
96e93eab
HX
5024
5025 napi->skb = skb;
5026}
96e93eab 5027
76620aaf 5028struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 5029{
5d38a079 5030 struct sk_buff *skb = napi->skb;
5d38a079
HX
5031
5032 if (!skb) {
fd11a83d 5033 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
e2f9dc3b
ED
5034 if (skb) {
5035 napi->skb = skb;
5036 skb_mark_napi_id(skb, napi);
5037 }
80595d59 5038 }
96e93eab
HX
5039 return skb;
5040}
76620aaf 5041EXPORT_SYMBOL(napi_get_frags);
96e93eab 5042
a50e233c
ED
5043static gro_result_t napi_frags_finish(struct napi_struct *napi,
5044 struct sk_buff *skb,
5045 gro_result_t ret)
96e93eab 5046{
5d0d9be8
HX
5047 switch (ret) {
5048 case GRO_NORMAL:
a50e233c
ED
5049 case GRO_HELD:
5050 __skb_push(skb, ETH_HLEN);
5051 skb->protocol = eth_type_trans(skb, skb->dev);
5052 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
c7c4b3b6 5053 ret = GRO_DROP;
86911732 5054 break;
5d38a079 5055
5d0d9be8 5056 case GRO_DROP:
5d0d9be8
HX
5057 napi_reuse_skb(napi, skb);
5058 break;
5b252f0c 5059
e44699d2
MK
5060 case GRO_MERGED_FREE:
5061 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5062 napi_skb_free_stolen_head(skb);
5063 else
5064 napi_reuse_skb(napi, skb);
5065 break;
5066
5b252f0c 5067 case GRO_MERGED:
25393d3f 5068 case GRO_CONSUMED:
5b252f0c 5069 break;
5d0d9be8 5070 }
5d38a079 5071
c7c4b3b6 5072 return ret;
5d38a079 5073}
5d0d9be8 5074
a50e233c
ED
5075/* Upper GRO stack assumes network header starts at gro_offset=0
5076 * Drivers could call both napi_gro_frags() and napi_gro_receive()
5077 * We copy ethernet header into skb->data to have a common layout.
5078 */
4adb9c4a 5079static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
76620aaf
HX
5080{
5081 struct sk_buff *skb = napi->skb;
a50e233c
ED
5082 const struct ethhdr *eth;
5083 unsigned int hlen = sizeof(*eth);
76620aaf
HX
5084
5085 napi->skb = NULL;
5086
a50e233c
ED
5087 skb_reset_mac_header(skb);
5088 skb_gro_reset_offset(skb);
5089
5090 eth = skb_gro_header_fast(skb, 0);
5091 if (unlikely(skb_gro_header_hard(skb, hlen))) {
5092 eth = skb_gro_header_slow(skb, hlen, 0);
5093 if (unlikely(!eth)) {
4da46ceb
AC
5094 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5095 __func__, napi->dev->name);
a50e233c
ED
5096 napi_reuse_skb(napi, skb);
5097 return NULL;
5098 }
5099 } else {
5100 gro_pull_from_frag0(skb, hlen);
5101 NAPI_GRO_CB(skb)->frag0 += hlen;
5102 NAPI_GRO_CB(skb)->frag0_len -= hlen;
76620aaf 5103 }
a50e233c
ED
5104 __skb_pull(skb, hlen);
5105
5106 /*
5107 * This works because the only protocols we care about don't require
5108 * special handling.
5109 * We'll fix it up properly in napi_frags_finish()
5110 */
5111 skb->protocol = eth->h_proto;
76620aaf 5112
76620aaf
HX
5113 return skb;
5114}
76620aaf 5115
c7c4b3b6 5116gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 5117{
76620aaf 5118 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
5119
5120 if (!skb)
c7c4b3b6 5121 return GRO_DROP;
5d0d9be8 5122
ae78dbfa
BH
5123 trace_napi_gro_frags_entry(skb);
5124
89c5fa33 5125 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5d0d9be8 5126}
5d38a079
HX
5127EXPORT_SYMBOL(napi_gro_frags);
5128
573e8fca
TH
5129/* Compute the checksum from gro_offset and return the folded value
5130 * after adding in any pseudo checksum.
5131 */
5132__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
5133{
5134 __wsum wsum;
5135 __sum16 sum;
5136
5137 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
5138
5139 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5140 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
5141 if (likely(!sum)) {
5142 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
5143 !skb->csum_complete_sw)
5144 netdev_rx_csum_fault(skb->dev);
5145 }
5146
5147 NAPI_GRO_CB(skb)->csum = wsum;
5148 NAPI_GRO_CB(skb)->csum_valid = 1;
5149
5150 return sum;
5151}
5152EXPORT_SYMBOL(__skb_gro_checksum_complete);
5153
773fc8f6 5154static void net_rps_send_ipi(struct softnet_data *remsd)
5155{
5156#ifdef CONFIG_RPS
5157 while (remsd) {
5158 struct softnet_data *next = remsd->rps_ipi_next;
5159
5160 if (cpu_online(remsd->cpu))
5161 smp_call_function_single_async(remsd->cpu, &remsd->csd);
5162 remsd = next;
5163 }
5164#endif
5165}
5166
e326bed2 5167/*
855abcf0 5168 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
e326bed2
ED
5169 * Note: called with local irq disabled, but exits with local irq enabled.
5170 */
5171static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5172{
5173#ifdef CONFIG_RPS
5174 struct softnet_data *remsd = sd->rps_ipi_list;
5175
5176 if (remsd) {
5177 sd->rps_ipi_list = NULL;
5178
5179 local_irq_enable();
5180
5181 /* Send pending IPI's to kick RPS processing on remote cpus. */
773fc8f6 5182 net_rps_send_ipi(remsd);
e326bed2
ED
5183 } else
5184#endif
5185 local_irq_enable();
5186}
5187
d75b1ade
ED
5188static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5189{
5190#ifdef CONFIG_RPS
5191 return sd->rps_ipi_list != NULL;
5192#else
5193 return false;
5194#endif
5195}
5196
bea3348e 5197static int process_backlog(struct napi_struct *napi, int quota)
1da177e4 5198{
eecfd7c4 5199 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
145dd5f9
PA
5200 bool again = true;
5201 int work = 0;
1da177e4 5202
e326bed2
ED
5203 /* Check if we have pending ipi, its better to send them now,
5204 * not waiting net_rx_action() end.
5205 */
d75b1ade 5206 if (sd_has_rps_ipi_waiting(sd)) {
e326bed2
ED
5207 local_irq_disable();
5208 net_rps_action_and_irq_enable(sd);
5209 }
d75b1ade 5210
3d48b53f 5211 napi->weight = dev_rx_weight;
145dd5f9 5212 while (again) {
1da177e4 5213 struct sk_buff *skb;
6e7676c1
CG
5214
5215 while ((skb = __skb_dequeue(&sd->process_queue))) {
2c17d27c 5216 rcu_read_lock();
6e7676c1 5217 __netif_receive_skb(skb);
2c17d27c 5218 rcu_read_unlock();
76cc8b13 5219 input_queue_head_incr(sd);
145dd5f9 5220 if (++work >= quota)
76cc8b13 5221 return work;
145dd5f9 5222
6e7676c1 5223 }
1da177e4 5224
145dd5f9 5225 local_irq_disable();
e36fa2f7 5226 rps_lock(sd);
11ef7a89 5227 if (skb_queue_empty(&sd->input_pkt_queue)) {
eecfd7c4
ED
5228 /*
5229 * Inline a custom version of __napi_complete().
5230 * only current cpu owns and manipulates this napi,
11ef7a89
TH
5231 * and NAPI_STATE_SCHED is the only possible flag set
5232 * on backlog.
5233 * We can use a plain write instead of clear_bit(),
eecfd7c4
ED
5234 * and we dont need an smp_mb() memory barrier.
5235 */
eecfd7c4 5236 napi->state = 0;
145dd5f9
PA
5237 again = false;
5238 } else {
5239 skb_queue_splice_tail_init(&sd->input_pkt_queue,
5240 &sd->process_queue);
bea3348e 5241 }
e36fa2f7 5242 rps_unlock(sd);
145dd5f9 5243 local_irq_enable();
6e7676c1 5244 }
1da177e4 5245
bea3348e
SH
5246 return work;
5247}
1da177e4 5248
bea3348e
SH
5249/**
5250 * __napi_schedule - schedule for receive
c4ea43c5 5251 * @n: entry to schedule
bea3348e 5252 *
bc9ad166
ED
5253 * The entry's receive function will be scheduled to run.
5254 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
bea3348e 5255 */
b5606c2d 5256void __napi_schedule(struct napi_struct *n)
bea3348e
SH
5257{
5258 unsigned long flags;
1da177e4 5259
bea3348e 5260 local_irq_save(flags);
903ceff7 5261 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
bea3348e 5262 local_irq_restore(flags);
1da177e4 5263}
bea3348e
SH
5264EXPORT_SYMBOL(__napi_schedule);
5265
39e6c820
ED
5266/**
5267 * napi_schedule_prep - check if napi can be scheduled
5268 * @n: napi context
5269 *
5270 * Test if NAPI routine is already running, and if not mark
5271 * it as running. This is used as a condition variable
5272 * insure only one NAPI poll instance runs. We also make
5273 * sure there is no pending NAPI disable.
5274 */
5275bool napi_schedule_prep(struct napi_struct *n)
5276{
5277 unsigned long val, new;
5278
5279 do {
5280 val = READ_ONCE(n->state);
5281 if (unlikely(val & NAPIF_STATE_DISABLE))
5282 return false;
5283 new = val | NAPIF_STATE_SCHED;
5284
5285 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5286 * This was suggested by Alexander Duyck, as compiler
5287 * emits better code than :
5288 * if (val & NAPIF_STATE_SCHED)
5289 * new |= NAPIF_STATE_MISSED;
5290 */
5291 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5292 NAPIF_STATE_MISSED;
5293 } while (cmpxchg(&n->state, val, new) != val);
5294
5295 return !(val & NAPIF_STATE_SCHED);
5296}
5297EXPORT_SYMBOL(napi_schedule_prep);
5298
bc9ad166
ED
5299/**
5300 * __napi_schedule_irqoff - schedule for receive
5301 * @n: entry to schedule
5302 *
5303 * Variant of __napi_schedule() assuming hard irqs are masked
5304 */
5305void __napi_schedule_irqoff(struct napi_struct *n)
5306{
5307 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5308}
5309EXPORT_SYMBOL(__napi_schedule_irqoff);
5310
364b6055 5311bool napi_complete_done(struct napi_struct *n, int work_done)
d565b0a1 5312{
39e6c820 5313 unsigned long flags, val, new;
d565b0a1
HX
5314
5315 /*
217f6974
ED
5316 * 1) Don't let napi dequeue from the cpu poll list
5317 * just in case its running on a different cpu.
5318 * 2) If we are busy polling, do nothing here, we have
5319 * the guarantee we will be called later.
d565b0a1 5320 */
217f6974
ED
5321 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
5322 NAPIF_STATE_IN_BUSY_POLL)))
364b6055 5323 return false;
d565b0a1 5324
3b47d303
ED
5325 if (n->gro_list) {
5326 unsigned long timeout = 0;
d75b1ade 5327
3b47d303
ED
5328 if (work_done)
5329 timeout = n->dev->gro_flush_timeout;
5330
5331 if (timeout)
5332 hrtimer_start(&n->timer, ns_to_ktime(timeout),
5333 HRTIMER_MODE_REL_PINNED);
5334 else
5335 napi_gro_flush(n, false);
5336 }
02c1602e 5337 if (unlikely(!list_empty(&n->poll_list))) {
d75b1ade
ED
5338 /* If n->poll_list is not empty, we need to mask irqs */
5339 local_irq_save(flags);
02c1602e 5340 list_del_init(&n->poll_list);
d75b1ade
ED
5341 local_irq_restore(flags);
5342 }
39e6c820
ED
5343
5344 do {
5345 val = READ_ONCE(n->state);
5346
5347 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
5348
5349 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
5350
5351 /* If STATE_MISSED was set, leave STATE_SCHED set,
5352 * because we will call napi->poll() one more time.
5353 * This C code was suggested by Alexander Duyck to help gcc.
5354 */
5355 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
5356 NAPIF_STATE_SCHED;
5357 } while (cmpxchg(&n->state, val, new) != val);
5358
5359 if (unlikely(val & NAPIF_STATE_MISSED)) {
5360 __napi_schedule(n);
5361 return false;
5362 }
5363
364b6055 5364 return true;
d565b0a1 5365}
3b47d303 5366EXPORT_SYMBOL(napi_complete_done);
d565b0a1 5367
af12fa6e 5368/* must be called under rcu_read_lock(), as we dont take a reference */
02d62e86 5369static struct napi_struct *napi_by_id(unsigned int napi_id)
af12fa6e
ET
5370{
5371 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
5372 struct napi_struct *napi;
5373
5374 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
5375 if (napi->napi_id == napi_id)
5376 return napi;
5377
5378 return NULL;
5379}
02d62e86
ED
5380
5381#if defined(CONFIG_NET_RX_BUSY_POLL)
217f6974 5382
ce6aea93 5383#define BUSY_POLL_BUDGET 8
217f6974
ED
5384
5385static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5386{
5387 int rc;
5388
39e6c820
ED
5389 /* Busy polling means there is a high chance device driver hard irq
5390 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5391 * set in napi_schedule_prep().
5392 * Since we are about to call napi->poll() once more, we can safely
5393 * clear NAPI_STATE_MISSED.
5394 *
5395 * Note: x86 could use a single "lock and ..." instruction
5396 * to perform these two clear_bit()
5397 */
5398 clear_bit(NAPI_STATE_MISSED, &napi->state);
217f6974
ED
5399 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
5400
5401 local_bh_disable();
5402
5403 /* All we really want here is to re-enable device interrupts.
5404 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5405 */
5406 rc = napi->poll(napi, BUSY_POLL_BUDGET);
1e22391e 5407 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
217f6974
ED
5408 netpoll_poll_unlock(have_poll_lock);
5409 if (rc == BUSY_POLL_BUDGET)
5410 __napi_schedule(napi);
5411 local_bh_enable();
217f6974
ED
5412}
5413
7db6b048
SS
5414void napi_busy_loop(unsigned int napi_id,
5415 bool (*loop_end)(void *, unsigned long),
5416 void *loop_end_arg)
02d62e86 5417{
7db6b048 5418 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
217f6974 5419 int (*napi_poll)(struct napi_struct *napi, int budget);
217f6974 5420 void *have_poll_lock = NULL;
02d62e86 5421 struct napi_struct *napi;
217f6974
ED
5422
5423restart:
217f6974 5424 napi_poll = NULL;
02d62e86 5425
2a028ecb 5426 rcu_read_lock();
02d62e86 5427
545cd5e5 5428 napi = napi_by_id(napi_id);
02d62e86
ED
5429 if (!napi)
5430 goto out;
5431
217f6974
ED
5432 preempt_disable();
5433 for (;;) {
2b5cd0df
AD
5434 int work = 0;
5435
2a028ecb 5436 local_bh_disable();
217f6974
ED
5437 if (!napi_poll) {
5438 unsigned long val = READ_ONCE(napi->state);
5439
5440 /* If multiple threads are competing for this napi,
5441 * we avoid dirtying napi->state as much as we can.
5442 */
5443 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
5444 NAPIF_STATE_IN_BUSY_POLL))
5445 goto count;
5446 if (cmpxchg(&napi->state, val,
5447 val | NAPIF_STATE_IN_BUSY_POLL |
5448 NAPIF_STATE_SCHED) != val)
5449 goto count;
5450 have_poll_lock = netpoll_poll_lock(napi);
5451 napi_poll = napi->poll;
5452 }
2b5cd0df
AD
5453 work = napi_poll(napi, BUSY_POLL_BUDGET);
5454 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
217f6974 5455count:
2b5cd0df 5456 if (work > 0)
7db6b048 5457 __NET_ADD_STATS(dev_net(napi->dev),
2b5cd0df 5458 LINUX_MIB_BUSYPOLLRXPACKETS, work);
2a028ecb 5459 local_bh_enable();
02d62e86 5460
7db6b048 5461 if (!loop_end || loop_end(loop_end_arg, start_time))
217f6974 5462 break;
02d62e86 5463
217f6974
ED
5464 if (unlikely(need_resched())) {
5465 if (napi_poll)
5466 busy_poll_stop(napi, have_poll_lock);
5467 preempt_enable();
5468 rcu_read_unlock();
5469 cond_resched();
7db6b048 5470 if (loop_end(loop_end_arg, start_time))
2b5cd0df 5471 return;
217f6974
ED
5472 goto restart;
5473 }
6cdf89b1 5474 cpu_relax();
217f6974
ED
5475 }
5476 if (napi_poll)
5477 busy_poll_stop(napi, have_poll_lock);
5478 preempt_enable();
02d62e86 5479out:
2a028ecb 5480 rcu_read_unlock();
02d62e86 5481}
7db6b048 5482EXPORT_SYMBOL(napi_busy_loop);
02d62e86
ED
5483
5484#endif /* CONFIG_NET_RX_BUSY_POLL */
af12fa6e 5485
149d6ad8 5486static void napi_hash_add(struct napi_struct *napi)
af12fa6e 5487{
d64b5e85
ED
5488 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
5489 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
52bd2d62 5490 return;
af12fa6e 5491
52bd2d62 5492 spin_lock(&napi_hash_lock);
af12fa6e 5493
545cd5e5 5494 /* 0..NR_CPUS range is reserved for sender_cpu use */
52bd2d62 5495 do {
545cd5e5
AD
5496 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
5497 napi_gen_id = MIN_NAPI_ID;
52bd2d62
ED
5498 } while (napi_by_id(napi_gen_id));
5499 napi->napi_id = napi_gen_id;
af12fa6e 5500
52bd2d62
ED
5501 hlist_add_head_rcu(&napi->napi_hash_node,
5502 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
af12fa6e 5503
52bd2d62 5504 spin_unlock(&napi_hash_lock);
af12fa6e 5505}
af12fa6e
ET
5506
5507/* Warning : caller is responsible to make sure rcu grace period
5508 * is respected before freeing memory containing @napi
5509 */
34cbe27e 5510bool napi_hash_del(struct napi_struct *napi)
af12fa6e 5511{
34cbe27e
ED
5512 bool rcu_sync_needed = false;
5513
af12fa6e
ET
5514 spin_lock(&napi_hash_lock);
5515
34cbe27e
ED
5516 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
5517 rcu_sync_needed = true;
af12fa6e 5518 hlist_del_rcu(&napi->napi_hash_node);
34cbe27e 5519 }
af12fa6e 5520 spin_unlock(&napi_hash_lock);
34cbe27e 5521 return rcu_sync_needed;
af12fa6e
ET
5522}
5523EXPORT_SYMBOL_GPL(napi_hash_del);
5524
3b47d303
ED
5525static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
5526{
5527 struct napi_struct *napi;
5528
5529 napi = container_of(timer, struct napi_struct, timer);
39e6c820
ED
5530
5531 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
5532 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
5533 */
5534 if (napi->gro_list && !napi_disable_pending(napi) &&
5535 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
5536 __napi_schedule_irqoff(napi);
3b47d303
ED
5537
5538 return HRTIMER_NORESTART;
5539}
5540
d565b0a1
HX
5541void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5542 int (*poll)(struct napi_struct *, int), int weight)
5543{
5544 INIT_LIST_HEAD(&napi->poll_list);
3b47d303
ED
5545 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5546 napi->timer.function = napi_watchdog;
4ae5544f 5547 napi->gro_count = 0;
d565b0a1 5548 napi->gro_list = NULL;
5d38a079 5549 napi->skb = NULL;
d565b0a1 5550 napi->poll = poll;
82dc3c63
ED
5551 if (weight > NAPI_POLL_WEIGHT)
5552 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5553 weight, dev->name);
d565b0a1
HX
5554 napi->weight = weight;
5555 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 5556 napi->dev = dev;
5d38a079 5557#ifdef CONFIG_NETPOLL
d565b0a1
HX
5558 napi->poll_owner = -1;
5559#endif
5560 set_bit(NAPI_STATE_SCHED, &napi->state);
93d05d4a 5561 napi_hash_add(napi);
d565b0a1
HX
5562}
5563EXPORT_SYMBOL(netif_napi_add);
5564
3b47d303
ED
5565void napi_disable(struct napi_struct *n)
5566{
5567 might_sleep();
5568 set_bit(NAPI_STATE_DISABLE, &n->state);
5569
5570 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
5571 msleep(1);
2d8bff12
NH
5572 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
5573 msleep(1);
3b47d303
ED
5574
5575 hrtimer_cancel(&n->timer);
5576
5577 clear_bit(NAPI_STATE_DISABLE, &n->state);
5578}
5579EXPORT_SYMBOL(napi_disable);
5580
93d05d4a 5581/* Must be called in process context */
d565b0a1
HX
5582void netif_napi_del(struct napi_struct *napi)
5583{
93d05d4a
ED
5584 might_sleep();
5585 if (napi_hash_del(napi))
5586 synchronize_net();
d7b06636 5587 list_del_init(&napi->dev_list);
76620aaf 5588 napi_free_frags(napi);
d565b0a1 5589
289dccbe 5590 kfree_skb_list(napi->gro_list);
d565b0a1 5591 napi->gro_list = NULL;
4ae5544f 5592 napi->gro_count = 0;
d565b0a1
HX
5593}
5594EXPORT_SYMBOL(netif_napi_del);
5595
726ce70e
HX
5596static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5597{
5598 void *have;
5599 int work, weight;
5600
5601 list_del_init(&n->poll_list);
5602
5603 have = netpoll_poll_lock(n);
5604
5605 weight = n->weight;
5606
5607 /* This NAPI_STATE_SCHED test is for avoiding a race
5608 * with netpoll's poll_napi(). Only the entity which
5609 * obtains the lock and sees NAPI_STATE_SCHED set will
5610 * actually make the ->poll() call. Therefore we avoid
5611 * accidentally calling ->poll() when NAPI is not scheduled.
5612 */
5613 work = 0;
5614 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
5615 work = n->poll(n, weight);
1db19db7 5616 trace_napi_poll(n, work, weight);
726ce70e
HX
5617 }
5618
5619 WARN_ON_ONCE(work > weight);
5620
5621 if (likely(work < weight))
5622 goto out_unlock;
5623
5624 /* Drivers must not modify the NAPI state if they
5625 * consume the entire weight. In such cases this code
5626 * still "owns" the NAPI instance and therefore can
5627 * move the instance around on the list at-will.
5628 */
5629 if (unlikely(napi_disable_pending(n))) {
5630 napi_complete(n);
5631 goto out_unlock;
5632 }
5633
5634 if (n->gro_list) {
5635 /* flush too old packets
5636 * If HZ < 1000, flush all packets.
5637 */
5638 napi_gro_flush(n, HZ >= 1000);
5639 }
5640
001ce546
HX
5641 /* Some drivers may have called napi_schedule
5642 * prior to exhausting their budget.
5643 */
5644 if (unlikely(!list_empty(&n->poll_list))) {
5645 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5646 n->dev ? n->dev->name : "backlog");
5647 goto out_unlock;
5648 }
5649
726ce70e
HX
5650 list_add_tail(&n->poll_list, repoll);
5651
5652out_unlock:
5653 netpoll_poll_unlock(have);
5654
5655 return work;
5656}
5657
0766f788 5658static __latent_entropy void net_rx_action(struct softirq_action *h)
1da177e4 5659{
903ceff7 5660 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
7acf8a1e
MW
5661 unsigned long time_limit = jiffies +
5662 usecs_to_jiffies(netdev_budget_usecs);
51b0bded 5663 int budget = netdev_budget;
d75b1ade
ED
5664 LIST_HEAD(list);
5665 LIST_HEAD(repoll);
53fb95d3 5666
1da177e4 5667 local_irq_disable();
d75b1ade
ED
5668 list_splice_init(&sd->poll_list, &list);
5669 local_irq_enable();
1da177e4 5670
ceb8d5bf 5671 for (;;) {
bea3348e 5672 struct napi_struct *n;
1da177e4 5673
ceb8d5bf
HX
5674 if (list_empty(&list)) {
5675 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
f52dffe0 5676 goto out;
ceb8d5bf
HX
5677 break;
5678 }
5679
6bd373eb
HX
5680 n = list_first_entry(&list, struct napi_struct, poll_list);
5681 budget -= napi_poll(n, &repoll);
5682
d75b1ade 5683 /* If softirq window is exhausted then punt.
24f8b238
SH
5684 * Allow this to run for 2 jiffies since which will allow
5685 * an average latency of 1.5/HZ.
bea3348e 5686 */
ceb8d5bf
HX
5687 if (unlikely(budget <= 0 ||
5688 time_after_eq(jiffies, time_limit))) {
5689 sd->time_squeeze++;
5690 break;
5691 }
1da177e4 5692 }
d75b1ade 5693
d75b1ade
ED
5694 local_irq_disable();
5695
5696 list_splice_tail_init(&sd->poll_list, &list);
5697 list_splice_tail(&repoll, &list);
5698 list_splice(&list, &sd->poll_list);
5699 if (!list_empty(&sd->poll_list))
5700 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
5701
e326bed2 5702 net_rps_action_and_irq_enable(sd);
f52dffe0
ED
5703out:
5704 __kfree_skb_flush();
1da177e4
LT
5705}
5706
aa9d8560 5707struct netdev_adjacent {
9ff162a8 5708 struct net_device *dev;
5d261913
VF
5709
5710 /* upper master flag, there can only be one master device per list */
9ff162a8 5711 bool master;
5d261913 5712
5d261913
VF
5713 /* counter for the number of times this device was added to us */
5714 u16 ref_nr;
5715
402dae96
VF
5716 /* private field for the users */
5717 void *private;
5718
9ff162a8
JP
5719 struct list_head list;
5720 struct rcu_head rcu;
9ff162a8
JP
5721};
5722
6ea29da1 5723static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
2f268f12 5724 struct list_head *adj_list)
9ff162a8 5725{
5d261913 5726 struct netdev_adjacent *adj;
5d261913 5727
2f268f12 5728 list_for_each_entry(adj, adj_list, list) {
5d261913
VF
5729 if (adj->dev == adj_dev)
5730 return adj;
9ff162a8
JP
5731 }
5732 return NULL;
5733}
5734
f1170fd4
DA
5735static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
5736{
5737 struct net_device *dev = data;
5738
5739 return upper_dev == dev;
5740}
5741
9ff162a8
JP
5742/**
5743 * netdev_has_upper_dev - Check if device is linked to an upper device
5744 * @dev: device
5745 * @upper_dev: upper device to check
5746 *
5747 * Find out if a device is linked to specified upper device and return true
5748 * in case it is. Note that this checks only immediate upper device,
5749 * not through a complete stack of devices. The caller must hold the RTNL lock.
5750 */
5751bool netdev_has_upper_dev(struct net_device *dev,
5752 struct net_device *upper_dev)
5753{
5754 ASSERT_RTNL();
5755
f1170fd4
DA
5756 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5757 upper_dev);
9ff162a8
JP
5758}
5759EXPORT_SYMBOL(netdev_has_upper_dev);
5760
1a3f060c
DA
5761/**
5762 * netdev_has_upper_dev_all - Check if device is linked to an upper device
5763 * @dev: device
5764 * @upper_dev: upper device to check
5765 *
5766 * Find out if a device is linked to specified upper device and return true
5767 * in case it is. Note that this checks the entire upper device chain.
5768 * The caller must hold rcu lock.
5769 */
5770
1a3f060c
DA
5771bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
5772 struct net_device *upper_dev)
5773{
5774 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5775 upper_dev);
5776}
5777EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
5778
9ff162a8
JP
5779/**
5780 * netdev_has_any_upper_dev - Check if device is linked to some device
5781 * @dev: device
5782 *
5783 * Find out if a device is linked to an upper device and return true in case
5784 * it is. The caller must hold the RTNL lock.
5785 */
25cc72a3 5786bool netdev_has_any_upper_dev(struct net_device *dev)
9ff162a8
JP
5787{
5788 ASSERT_RTNL();
5789
f1170fd4 5790 return !list_empty(&dev->adj_list.upper);
9ff162a8 5791}
25cc72a3 5792EXPORT_SYMBOL(netdev_has_any_upper_dev);
9ff162a8
JP
5793
5794/**
5795 * netdev_master_upper_dev_get - Get master upper device
5796 * @dev: device
5797 *
5798 * Find a master upper device and return pointer to it or NULL in case
5799 * it's not there. The caller must hold the RTNL lock.
5800 */
5801struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5802{
aa9d8560 5803 struct netdev_adjacent *upper;
9ff162a8
JP
5804
5805 ASSERT_RTNL();
5806
2f268f12 5807 if (list_empty(&dev->adj_list.upper))
9ff162a8
JP
5808 return NULL;
5809
2f268f12 5810 upper = list_first_entry(&dev->adj_list.upper,
aa9d8560 5811 struct netdev_adjacent, list);
9ff162a8
JP
5812 if (likely(upper->master))
5813 return upper->dev;
5814 return NULL;
5815}
5816EXPORT_SYMBOL(netdev_master_upper_dev_get);
5817
0f524a80
DA
5818/**
5819 * netdev_has_any_lower_dev - Check if device is linked to some device
5820 * @dev: device
5821 *
5822 * Find out if a device is linked to a lower device and return true in case
5823 * it is. The caller must hold the RTNL lock.
5824 */
5825static bool netdev_has_any_lower_dev(struct net_device *dev)
5826{
5827 ASSERT_RTNL();
5828
5829 return !list_empty(&dev->adj_list.lower);
5830}
5831
b6ccba4c
VF
5832void *netdev_adjacent_get_private(struct list_head *adj_list)
5833{
5834 struct netdev_adjacent *adj;
5835
5836 adj = list_entry(adj_list, struct netdev_adjacent, list);
5837
5838 return adj->private;
5839}
5840EXPORT_SYMBOL(netdev_adjacent_get_private);
5841
44a40855
VY
5842/**
5843 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5844 * @dev: device
5845 * @iter: list_head ** of the current position
5846 *
5847 * Gets the next device from the dev's upper list, starting from iter
5848 * position. The caller must hold RCU read lock.
5849 */
5850struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5851 struct list_head **iter)
5852{
5853 struct netdev_adjacent *upper;
5854
5855 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5856
5857 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5858
5859 if (&upper->list == &dev->adj_list.upper)
5860 return NULL;
5861
5862 *iter = &upper->list;
5863
5864 return upper->dev;
5865}
5866EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5867
1a3f060c
DA
5868static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
5869 struct list_head **iter)
5870{
5871 struct netdev_adjacent *upper;
5872
5873 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5874
5875 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5876
5877 if (&upper->list == &dev->adj_list.upper)
5878 return NULL;
5879
5880 *iter = &upper->list;
5881
5882 return upper->dev;
5883}
5884
5885int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
5886 int (*fn)(struct net_device *dev,
5887 void *data),
5888 void *data)
5889{
5890 struct net_device *udev;
5891 struct list_head *iter;
5892 int ret;
5893
5894 for (iter = &dev->adj_list.upper,
5895 udev = netdev_next_upper_dev_rcu(dev, &iter);
5896 udev;
5897 udev = netdev_next_upper_dev_rcu(dev, &iter)) {
5898 /* first is the upper device itself */
5899 ret = fn(udev, data);
5900 if (ret)
5901 return ret;
5902
5903 /* then look at all of its upper devices */
5904 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
5905 if (ret)
5906 return ret;
5907 }
5908
5909 return 0;
5910}
5911EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
5912
31088a11
VF
5913/**
5914 * netdev_lower_get_next_private - Get the next ->private from the
5915 * lower neighbour list
5916 * @dev: device
5917 * @iter: list_head ** of the current position
5918 *
5919 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5920 * list, starting from iter position. The caller must hold either hold the
5921 * RTNL lock or its own locking that guarantees that the neighbour lower
b469139e 5922 * list will remain unchanged.
31088a11
VF
5923 */
5924void *netdev_lower_get_next_private(struct net_device *dev,
5925 struct list_head **iter)
5926{
5927 struct netdev_adjacent *lower;
5928
5929 lower = list_entry(*iter, struct netdev_adjacent, list);
5930
5931 if (&lower->list == &dev->adj_list.lower)
5932 return NULL;
5933
6859e7df 5934 *iter = lower->list.next;
31088a11
VF
5935
5936 return lower->private;
5937}
5938EXPORT_SYMBOL(netdev_lower_get_next_private);
5939
5940/**
5941 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5942 * lower neighbour list, RCU
5943 * variant
5944 * @dev: device
5945 * @iter: list_head ** of the current position
5946 *
5947 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5948 * list, starting from iter position. The caller must hold RCU read lock.
5949 */
5950void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5951 struct list_head **iter)
5952{
5953 struct netdev_adjacent *lower;
5954
5955 WARN_ON_ONCE(!rcu_read_lock_held());
5956
5957 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5958
5959 if (&lower->list == &dev->adj_list.lower)
5960 return NULL;
5961
6859e7df 5962 *iter = &lower->list;
31088a11
VF
5963
5964 return lower->private;
5965}
5966EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5967
4085ebe8
VY
5968/**
5969 * netdev_lower_get_next - Get the next device from the lower neighbour
5970 * list
5971 * @dev: device
5972 * @iter: list_head ** of the current position
5973 *
5974 * Gets the next netdev_adjacent from the dev's lower neighbour
5975 * list, starting from iter position. The caller must hold RTNL lock or
5976 * its own locking that guarantees that the neighbour lower
b469139e 5977 * list will remain unchanged.
4085ebe8
VY
5978 */
5979void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5980{
5981 struct netdev_adjacent *lower;
5982
cfdd28be 5983 lower = list_entry(*iter, struct netdev_adjacent, list);
4085ebe8
VY
5984
5985 if (&lower->list == &dev->adj_list.lower)
5986 return NULL;
5987
cfdd28be 5988 *iter = lower->list.next;
4085ebe8
VY
5989
5990 return lower->dev;
5991}
5992EXPORT_SYMBOL(netdev_lower_get_next);
5993
1a3f060c
DA
5994static struct net_device *netdev_next_lower_dev(struct net_device *dev,
5995 struct list_head **iter)
5996{
5997 struct netdev_adjacent *lower;
5998
46b5ab1a 5999 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
1a3f060c
DA
6000
6001 if (&lower->list == &dev->adj_list.lower)
6002 return NULL;
6003
46b5ab1a 6004 *iter = &lower->list;
1a3f060c
DA
6005
6006 return lower->dev;
6007}
6008
6009int netdev_walk_all_lower_dev(struct net_device *dev,
6010 int (*fn)(struct net_device *dev,
6011 void *data),
6012 void *data)
6013{
6014 struct net_device *ldev;
6015 struct list_head *iter;
6016 int ret;
6017
6018 for (iter = &dev->adj_list.lower,
6019 ldev = netdev_next_lower_dev(dev, &iter);
6020 ldev;
6021 ldev = netdev_next_lower_dev(dev, &iter)) {
6022 /* first is the lower device itself */
6023 ret = fn(ldev, data);
6024 if (ret)
6025 return ret;
6026
6027 /* then look at all of its lower devices */
6028 ret = netdev_walk_all_lower_dev(ldev, fn, data);
6029 if (ret)
6030 return ret;
6031 }
6032
6033 return 0;
6034}
6035EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
6036
1a3f060c
DA
6037static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6038 struct list_head **iter)
6039{
6040 struct netdev_adjacent *lower;
6041
6042 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6043 if (&lower->list == &dev->adj_list.lower)
6044 return NULL;
6045
6046 *iter = &lower->list;
6047
6048 return lower->dev;
6049}
6050
6051int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
6052 int (*fn)(struct net_device *dev,
6053 void *data),
6054 void *data)
6055{
6056 struct net_device *ldev;
6057 struct list_head *iter;
6058 int ret;
6059
6060 for (iter = &dev->adj_list.lower,
6061 ldev = netdev_next_lower_dev_rcu(dev, &iter);
6062 ldev;
6063 ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
6064 /* first is the lower device itself */
6065 ret = fn(ldev, data);
6066 if (ret)
6067 return ret;
6068
6069 /* then look at all of its lower devices */
6070 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
6071 if (ret)
6072 return ret;
6073 }
6074
6075 return 0;
6076}
6077EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
6078
e001bfad 6079/**
6080 * netdev_lower_get_first_private_rcu - Get the first ->private from the
6081 * lower neighbour list, RCU
6082 * variant
6083 * @dev: device
6084 *
6085 * Gets the first netdev_adjacent->private from the dev's lower neighbour
6086 * list. The caller must hold RCU read lock.
6087 */
6088void *netdev_lower_get_first_private_rcu(struct net_device *dev)
6089{
6090 struct netdev_adjacent *lower;
6091
6092 lower = list_first_or_null_rcu(&dev->adj_list.lower,
6093 struct netdev_adjacent, list);
6094 if (lower)
6095 return lower->private;
6096 return NULL;
6097}
6098EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
6099
9ff162a8
JP
6100/**
6101 * netdev_master_upper_dev_get_rcu - Get master upper device
6102 * @dev: device
6103 *
6104 * Find a master upper device and return pointer to it or NULL in case
6105 * it's not there. The caller must hold the RCU read lock.
6106 */
6107struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
6108{
aa9d8560 6109 struct netdev_adjacent *upper;
9ff162a8 6110
2f268f12 6111 upper = list_first_or_null_rcu(&dev->adj_list.upper,
aa9d8560 6112 struct netdev_adjacent, list);
9ff162a8
JP
6113 if (upper && likely(upper->master))
6114 return upper->dev;
6115 return NULL;
6116}
6117EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
6118
0a59f3a9 6119static int netdev_adjacent_sysfs_add(struct net_device *dev,
3ee32707
VF
6120 struct net_device *adj_dev,
6121 struct list_head *dev_list)
6122{
6123 char linkname[IFNAMSIZ+7];
f4563a75 6124
3ee32707
VF
6125 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6126 "upper_%s" : "lower_%s", adj_dev->name);
6127 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
6128 linkname);
6129}
0a59f3a9 6130static void netdev_adjacent_sysfs_del(struct net_device *dev,
3ee32707
VF
6131 char *name,
6132 struct list_head *dev_list)
6133{
6134 char linkname[IFNAMSIZ+7];
f4563a75 6135
3ee32707
VF
6136 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6137 "upper_%s" : "lower_%s", name);
6138 sysfs_remove_link(&(dev->dev.kobj), linkname);
6139}
6140
7ce64c79
AF
6141static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
6142 struct net_device *adj_dev,
6143 struct list_head *dev_list)
6144{
6145 return (dev_list == &dev->adj_list.upper ||
6146 dev_list == &dev->adj_list.lower) &&
6147 net_eq(dev_net(dev), dev_net(adj_dev));
6148}
3ee32707 6149
5d261913
VF
6150static int __netdev_adjacent_dev_insert(struct net_device *dev,
6151 struct net_device *adj_dev,
7863c054 6152 struct list_head *dev_list,
402dae96 6153 void *private, bool master)
5d261913
VF
6154{
6155 struct netdev_adjacent *adj;
842d67a7 6156 int ret;
5d261913 6157
6ea29da1 6158 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913
VF
6159
6160 if (adj) {
790510d9 6161 adj->ref_nr += 1;
67b62f98
DA
6162 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6163 dev->name, adj_dev->name, adj->ref_nr);
6164
5d261913
VF
6165 return 0;
6166 }
6167
6168 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
6169 if (!adj)
6170 return -ENOMEM;
6171
6172 adj->dev = adj_dev;
6173 adj->master = master;
790510d9 6174 adj->ref_nr = 1;
402dae96 6175 adj->private = private;
5d261913 6176 dev_hold(adj_dev);
2f268f12 6177
67b62f98
DA
6178 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6179 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
5d261913 6180
7ce64c79 6181 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
3ee32707 6182 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5831d66e
VF
6183 if (ret)
6184 goto free_adj;
6185 }
6186
7863c054 6187 /* Ensure that master link is always the first item in list. */
842d67a7
VF
6188 if (master) {
6189 ret = sysfs_create_link(&(dev->dev.kobj),
6190 &(adj_dev->dev.kobj), "master");
6191 if (ret)
5831d66e 6192 goto remove_symlinks;
842d67a7 6193
7863c054 6194 list_add_rcu(&adj->list, dev_list);
842d67a7 6195 } else {
7863c054 6196 list_add_tail_rcu(&adj->list, dev_list);
842d67a7 6197 }
5d261913
VF
6198
6199 return 0;
842d67a7 6200
5831d66e 6201remove_symlinks:
7ce64c79 6202 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 6203 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
842d67a7
VF
6204free_adj:
6205 kfree(adj);
974daef7 6206 dev_put(adj_dev);
842d67a7
VF
6207
6208 return ret;
5d261913
VF
6209}
6210
1d143d9f 6211static void __netdev_adjacent_dev_remove(struct net_device *dev,
6212 struct net_device *adj_dev,
93409033 6213 u16 ref_nr,
1d143d9f 6214 struct list_head *dev_list)
5d261913
VF
6215{
6216 struct netdev_adjacent *adj;
6217
67b62f98
DA
6218 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6219 dev->name, adj_dev->name, ref_nr);
6220
6ea29da1 6221 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913 6222
2f268f12 6223 if (!adj) {
67b62f98 6224 pr_err("Adjacency does not exist for device %s from %s\n",
2f268f12 6225 dev->name, adj_dev->name);
67b62f98
DA
6226 WARN_ON(1);
6227 return;
2f268f12 6228 }
5d261913 6229
93409033 6230 if (adj->ref_nr > ref_nr) {
67b62f98
DA
6231 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6232 dev->name, adj_dev->name, ref_nr,
6233 adj->ref_nr - ref_nr);
93409033 6234 adj->ref_nr -= ref_nr;
5d261913
VF
6235 return;
6236 }
6237
842d67a7
VF
6238 if (adj->master)
6239 sysfs_remove_link(&(dev->dev.kobj), "master");
6240
7ce64c79 6241 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 6242 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5831d66e 6243
5d261913 6244 list_del_rcu(&adj->list);
67b62f98 6245 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
2f268f12 6246 adj_dev->name, dev->name, adj_dev->name);
5d261913
VF
6247 dev_put(adj_dev);
6248 kfree_rcu(adj, rcu);
6249}
6250
1d143d9f 6251static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
6252 struct net_device *upper_dev,
6253 struct list_head *up_list,
6254 struct list_head *down_list,
6255 void *private, bool master)
5d261913
VF
6256{
6257 int ret;
6258
790510d9 6259 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
93409033 6260 private, master);
5d261913
VF
6261 if (ret)
6262 return ret;
6263
790510d9 6264 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
93409033 6265 private, false);
5d261913 6266 if (ret) {
790510d9 6267 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
5d261913
VF
6268 return ret;
6269 }
6270
6271 return 0;
6272}
6273
1d143d9f 6274static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
6275 struct net_device *upper_dev,
93409033 6276 u16 ref_nr,
1d143d9f 6277 struct list_head *up_list,
6278 struct list_head *down_list)
5d261913 6279{
93409033
AC
6280 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
6281 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
5d261913
VF
6282}
6283
1d143d9f 6284static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
6285 struct net_device *upper_dev,
6286 void *private, bool master)
2f268f12 6287{
f1170fd4
DA
6288 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
6289 &dev->adj_list.upper,
6290 &upper_dev->adj_list.lower,
6291 private, master);
5d261913
VF
6292}
6293
1d143d9f 6294static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
6295 struct net_device *upper_dev)
2f268f12 6296{
93409033 6297 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
2f268f12
VF
6298 &dev->adj_list.upper,
6299 &upper_dev->adj_list.lower);
6300}
5d261913 6301
9ff162a8 6302static int __netdev_upper_dev_link(struct net_device *dev,
402dae96 6303 struct net_device *upper_dev, bool master,
42ab19ee
DA
6304 void *upper_priv, void *upper_info,
6305 struct netlink_ext_ack *extack)
9ff162a8 6306{
51d0c047
DA
6307 struct netdev_notifier_changeupper_info changeupper_info = {
6308 .info = {
6309 .dev = dev,
42ab19ee 6310 .extack = extack,
51d0c047
DA
6311 },
6312 .upper_dev = upper_dev,
6313 .master = master,
6314 .linking = true,
6315 .upper_info = upper_info,
6316 };
5d261913 6317 int ret = 0;
9ff162a8
JP
6318
6319 ASSERT_RTNL();
6320
6321 if (dev == upper_dev)
6322 return -EBUSY;
6323
6324 /* To prevent loops, check if dev is not upper device to upper_dev. */
f1170fd4 6325 if (netdev_has_upper_dev(upper_dev, dev))
9ff162a8
JP
6326 return -EBUSY;
6327
f1170fd4 6328 if (netdev_has_upper_dev(dev, upper_dev))
9ff162a8
JP
6329 return -EEXIST;
6330
6331 if (master && netdev_master_upper_dev_get(dev))
6332 return -EBUSY;
6333
51d0c047 6334 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
573c7ba0
JP
6335 &changeupper_info.info);
6336 ret = notifier_to_errno(ret);
6337 if (ret)
6338 return ret;
6339
6dffb044 6340 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
402dae96 6341 master);
5d261913
VF
6342 if (ret)
6343 return ret;
9ff162a8 6344
51d0c047 6345 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
b03804e7
IS
6346 &changeupper_info.info);
6347 ret = notifier_to_errno(ret);
6348 if (ret)
f1170fd4 6349 goto rollback;
b03804e7 6350
9ff162a8 6351 return 0;
5d261913 6352
f1170fd4 6353rollback:
2f268f12 6354 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
6355
6356 return ret;
9ff162a8
JP
6357}
6358
6359/**
6360 * netdev_upper_dev_link - Add a link to the upper device
6361 * @dev: device
6362 * @upper_dev: new upper device
6363 *
6364 * Adds a link to device which is upper to this one. The caller must hold
6365 * the RTNL lock. On a failure a negative errno code is returned.
6366 * On success the reference counts are adjusted and the function
6367 * returns zero.
6368 */
6369int netdev_upper_dev_link(struct net_device *dev,
42ab19ee
DA
6370 struct net_device *upper_dev,
6371 struct netlink_ext_ack *extack)
9ff162a8 6372{
42ab19ee
DA
6373 return __netdev_upper_dev_link(dev, upper_dev, false,
6374 NULL, NULL, extack);
9ff162a8
JP
6375}
6376EXPORT_SYMBOL(netdev_upper_dev_link);
6377
6378/**
6379 * netdev_master_upper_dev_link - Add a master link to the upper device
6380 * @dev: device
6381 * @upper_dev: new upper device
6dffb044 6382 * @upper_priv: upper device private
29bf24af 6383 * @upper_info: upper info to be passed down via notifier
9ff162a8
JP
6384 *
6385 * Adds a link to device which is upper to this one. In this case, only
6386 * one master upper device can be linked, although other non-master devices
6387 * might be linked as well. The caller must hold the RTNL lock.
6388 * On a failure a negative errno code is returned. On success the reference
6389 * counts are adjusted and the function returns zero.
6390 */
6391int netdev_master_upper_dev_link(struct net_device *dev,
6dffb044 6392 struct net_device *upper_dev,
42ab19ee
DA
6393 void *upper_priv, void *upper_info,
6394 struct netlink_ext_ack *extack)
9ff162a8 6395{
29bf24af 6396 return __netdev_upper_dev_link(dev, upper_dev, true,
42ab19ee 6397 upper_priv, upper_info, extack);
9ff162a8
JP
6398}
6399EXPORT_SYMBOL(netdev_master_upper_dev_link);
6400
6401/**
6402 * netdev_upper_dev_unlink - Removes a link to upper device
6403 * @dev: device
6404 * @upper_dev: new upper device
6405 *
6406 * Removes a link to device which is upper to this one. The caller must hold
6407 * the RTNL lock.
6408 */
6409void netdev_upper_dev_unlink(struct net_device *dev,
6410 struct net_device *upper_dev)
6411{
51d0c047
DA
6412 struct netdev_notifier_changeupper_info changeupper_info = {
6413 .info = {
6414 .dev = dev,
6415 },
6416 .upper_dev = upper_dev,
6417 .linking = false,
6418 };
f4563a75 6419
9ff162a8
JP
6420 ASSERT_RTNL();
6421
0e4ead9d 6422 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
0e4ead9d 6423
51d0c047 6424 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
573c7ba0
JP
6425 &changeupper_info.info);
6426
2f268f12 6427 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913 6428
51d0c047 6429 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
0e4ead9d 6430 &changeupper_info.info);
9ff162a8
JP
6431}
6432EXPORT_SYMBOL(netdev_upper_dev_unlink);
6433
61bd3857
MS
6434/**
6435 * netdev_bonding_info_change - Dispatch event about slave change
6436 * @dev: device
4a26e453 6437 * @bonding_info: info to dispatch
61bd3857
MS
6438 *
6439 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
6440 * The caller must hold the RTNL lock.
6441 */
6442void netdev_bonding_info_change(struct net_device *dev,
6443 struct netdev_bonding_info *bonding_info)
6444{
51d0c047
DA
6445 struct netdev_notifier_bonding_info info = {
6446 .info.dev = dev,
6447 };
61bd3857
MS
6448
6449 memcpy(&info.bonding_info, bonding_info,
6450 sizeof(struct netdev_bonding_info));
51d0c047 6451 call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
61bd3857
MS
6452 &info.info);
6453}
6454EXPORT_SYMBOL(netdev_bonding_info_change);
6455
2ce1ee17 6456static void netdev_adjacent_add_links(struct net_device *dev)
4c75431a
AF
6457{
6458 struct netdev_adjacent *iter;
6459
6460 struct net *net = dev_net(dev);
6461
6462 list_for_each_entry(iter, &dev->adj_list.upper, list) {
be4da0e3 6463 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
6464 continue;
6465 netdev_adjacent_sysfs_add(iter->dev, dev,
6466 &iter->dev->adj_list.lower);
6467 netdev_adjacent_sysfs_add(dev, iter->dev,
6468 &dev->adj_list.upper);
6469 }
6470
6471 list_for_each_entry(iter, &dev->adj_list.lower, list) {
be4da0e3 6472 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
6473 continue;
6474 netdev_adjacent_sysfs_add(iter->dev, dev,
6475 &iter->dev->adj_list.upper);
6476 netdev_adjacent_sysfs_add(dev, iter->dev,
6477 &dev->adj_list.lower);
6478 }
6479}
6480
2ce1ee17 6481static void netdev_adjacent_del_links(struct net_device *dev)
4c75431a
AF
6482{
6483 struct netdev_adjacent *iter;
6484
6485 struct net *net = dev_net(dev);
6486
6487 list_for_each_entry(iter, &dev->adj_list.upper, list) {
be4da0e3 6488 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
6489 continue;
6490 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6491 &iter->dev->adj_list.lower);
6492 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6493 &dev->adj_list.upper);
6494 }
6495
6496 list_for_each_entry(iter, &dev->adj_list.lower, list) {
be4da0e3 6497 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
6498 continue;
6499 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6500 &iter->dev->adj_list.upper);
6501 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6502 &dev->adj_list.lower);
6503 }
6504}
6505
5bb025fa 6506void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
402dae96 6507{
5bb025fa 6508 struct netdev_adjacent *iter;
402dae96 6509
4c75431a
AF
6510 struct net *net = dev_net(dev);
6511
5bb025fa 6512 list_for_each_entry(iter, &dev->adj_list.upper, list) {
be4da0e3 6513 if (!net_eq(net, dev_net(iter->dev)))
4c75431a 6514 continue;
5bb025fa
VF
6515 netdev_adjacent_sysfs_del(iter->dev, oldname,
6516 &iter->dev->adj_list.lower);
6517 netdev_adjacent_sysfs_add(iter->dev, dev,
6518 &iter->dev->adj_list.lower);
6519 }
402dae96 6520
5bb025fa 6521 list_for_each_entry(iter, &dev->adj_list.lower, list) {
be4da0e3 6522 if (!net_eq(net, dev_net(iter->dev)))
4c75431a 6523 continue;
5bb025fa
VF
6524 netdev_adjacent_sysfs_del(iter->dev, oldname,
6525 &iter->dev->adj_list.upper);
6526 netdev_adjacent_sysfs_add(iter->dev, dev,
6527 &iter->dev->adj_list.upper);
6528 }
402dae96 6529}
402dae96
VF
6530
6531void *netdev_lower_dev_get_private(struct net_device *dev,
6532 struct net_device *lower_dev)
6533{
6534 struct netdev_adjacent *lower;
6535
6536 if (!lower_dev)
6537 return NULL;
6ea29da1 6538 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
402dae96
VF
6539 if (!lower)
6540 return NULL;
6541
6542 return lower->private;
6543}
6544EXPORT_SYMBOL(netdev_lower_dev_get_private);
6545
4085ebe8 6546
952fcfd0 6547int dev_get_nest_level(struct net_device *dev)
4085ebe8
VY
6548{
6549 struct net_device *lower = NULL;
6550 struct list_head *iter;
6551 int max_nest = -1;
6552 int nest;
6553
6554 ASSERT_RTNL();
6555
6556 netdev_for_each_lower_dev(dev, lower, iter) {
952fcfd0 6557 nest = dev_get_nest_level(lower);
4085ebe8
VY
6558 if (max_nest < nest)
6559 max_nest = nest;
6560 }
6561
952fcfd0 6562 return max_nest + 1;
4085ebe8
VY
6563}
6564EXPORT_SYMBOL(dev_get_nest_level);
6565
04d48266
JP
6566/**
6567 * netdev_lower_change - Dispatch event about lower device state change
6568 * @lower_dev: device
6569 * @lower_state_info: state to dispatch
6570 *
6571 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
6572 * The caller must hold the RTNL lock.
6573 */
6574void netdev_lower_state_changed(struct net_device *lower_dev,
6575 void *lower_state_info)
6576{
51d0c047
DA
6577 struct netdev_notifier_changelowerstate_info changelowerstate_info = {
6578 .info.dev = lower_dev,
6579 };
04d48266
JP
6580
6581 ASSERT_RTNL();
6582 changelowerstate_info.lower_state_info = lower_state_info;
51d0c047 6583 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
04d48266
JP
6584 &changelowerstate_info.info);
6585}
6586EXPORT_SYMBOL(netdev_lower_state_changed);
6587
b6c40d68
PM
6588static void dev_change_rx_flags(struct net_device *dev, int flags)
6589{
d314774c
SH
6590 const struct net_device_ops *ops = dev->netdev_ops;
6591
d2615bf4 6592 if (ops->ndo_change_rx_flags)
d314774c 6593 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
6594}
6595
991fb3f7 6596static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
1da177e4 6597{
b536db93 6598 unsigned int old_flags = dev->flags;
d04a48b0
EB
6599 kuid_t uid;
6600 kgid_t gid;
1da177e4 6601
24023451
PM
6602 ASSERT_RTNL();
6603
dad9b335
WC
6604 dev->flags |= IFF_PROMISC;
6605 dev->promiscuity += inc;
6606 if (dev->promiscuity == 0) {
6607 /*
6608 * Avoid overflow.
6609 * If inc causes overflow, untouch promisc and return error.
6610 */
6611 if (inc < 0)
6612 dev->flags &= ~IFF_PROMISC;
6613 else {
6614 dev->promiscuity -= inc;
7b6cd1ce
JP
6615 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6616 dev->name);
dad9b335
WC
6617 return -EOVERFLOW;
6618 }
6619 }
52609c0b 6620 if (dev->flags != old_flags) {
7b6cd1ce
JP
6621 pr_info("device %s %s promiscuous mode\n",
6622 dev->name,
6623 dev->flags & IFF_PROMISC ? "entered" : "left");
8192b0c4
DH
6624 if (audit_enabled) {
6625 current_uid_gid(&uid, &gid);
7759db82
KHK
6626 audit_log(current->audit_context, GFP_ATOMIC,
6627 AUDIT_ANOM_PROMISCUOUS,
6628 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6629 dev->name, (dev->flags & IFF_PROMISC),
6630 (old_flags & IFF_PROMISC),
e1760bd5 6631 from_kuid(&init_user_ns, audit_get_loginuid(current)),
d04a48b0
EB
6632 from_kuid(&init_user_ns, uid),
6633 from_kgid(&init_user_ns, gid),
7759db82 6634 audit_get_sessionid(current));
8192b0c4 6635 }
24023451 6636
b6c40d68 6637 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 6638 }
991fb3f7
ND
6639 if (notify)
6640 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
dad9b335 6641 return 0;
1da177e4
LT
6642}
6643
4417da66
PM
6644/**
6645 * dev_set_promiscuity - update promiscuity count on a device
6646 * @dev: device
6647 * @inc: modifier
6648 *
6649 * Add or remove promiscuity from a device. While the count in the device
6650 * remains above zero the interface remains promiscuous. Once it hits zero
6651 * the device reverts back to normal filtering operation. A negative inc
6652 * value is used to drop promiscuity on the device.
dad9b335 6653 * Return 0 if successful or a negative errno code on error.
4417da66 6654 */
dad9b335 6655int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66 6656{
b536db93 6657 unsigned int old_flags = dev->flags;
dad9b335 6658 int err;
4417da66 6659
991fb3f7 6660 err = __dev_set_promiscuity(dev, inc, true);
4b5a698e 6661 if (err < 0)
dad9b335 6662 return err;
4417da66
PM
6663 if (dev->flags != old_flags)
6664 dev_set_rx_mode(dev);
dad9b335 6665 return err;
4417da66 6666}
d1b19dff 6667EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 6668
991fb3f7 6669static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
1da177e4 6670{
991fb3f7 6671 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
1da177e4 6672
24023451
PM
6673 ASSERT_RTNL();
6674
1da177e4 6675 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
6676 dev->allmulti += inc;
6677 if (dev->allmulti == 0) {
6678 /*
6679 * Avoid overflow.
6680 * If inc causes overflow, untouch allmulti and return error.
6681 */
6682 if (inc < 0)
6683 dev->flags &= ~IFF_ALLMULTI;
6684 else {
6685 dev->allmulti -= inc;
7b6cd1ce
JP
6686 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6687 dev->name);
dad9b335
WC
6688 return -EOVERFLOW;
6689 }
6690 }
24023451 6691 if (dev->flags ^ old_flags) {
b6c40d68 6692 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 6693 dev_set_rx_mode(dev);
991fb3f7
ND
6694 if (notify)
6695 __dev_notify_flags(dev, old_flags,
6696 dev->gflags ^ old_gflags);
24023451 6697 }
dad9b335 6698 return 0;
4417da66 6699}
991fb3f7
ND
6700
6701/**
6702 * dev_set_allmulti - update allmulti count on a device
6703 * @dev: device
6704 * @inc: modifier
6705 *
6706 * Add or remove reception of all multicast frames to a device. While the
6707 * count in the device remains above zero the interface remains listening
6708 * to all interfaces. Once it hits zero the device reverts back to normal
6709 * filtering operation. A negative @inc value is used to drop the counter
6710 * when releasing a resource needing all multicasts.
6711 * Return 0 if successful or a negative errno code on error.
6712 */
6713
6714int dev_set_allmulti(struct net_device *dev, int inc)
6715{
6716 return __dev_set_allmulti(dev, inc, true);
6717}
d1b19dff 6718EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
6719
6720/*
6721 * Upload unicast and multicast address lists to device and
6722 * configure RX filtering. When the device doesn't support unicast
53ccaae1 6723 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
6724 * are present.
6725 */
6726void __dev_set_rx_mode(struct net_device *dev)
6727{
d314774c
SH
6728 const struct net_device_ops *ops = dev->netdev_ops;
6729
4417da66
PM
6730 /* dev_open will call this function so the list will stay sane. */
6731 if (!(dev->flags&IFF_UP))
6732 return;
6733
6734 if (!netif_device_present(dev))
40b77c94 6735 return;
4417da66 6736
01789349 6737 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4417da66
PM
6738 /* Unicast addresses changes may only happen under the rtnl,
6739 * therefore calling __dev_set_promiscuity here is safe.
6740 */
32e7bfc4 6741 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
991fb3f7 6742 __dev_set_promiscuity(dev, 1, false);
2d348d1f 6743 dev->uc_promisc = true;
32e7bfc4 6744 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
991fb3f7 6745 __dev_set_promiscuity(dev, -1, false);
2d348d1f 6746 dev->uc_promisc = false;
4417da66 6747 }
4417da66 6748 }
01789349
JP
6749
6750 if (ops->ndo_set_rx_mode)
6751 ops->ndo_set_rx_mode(dev);
4417da66
PM
6752}
6753
6754void dev_set_rx_mode(struct net_device *dev)
6755{
b9e40857 6756 netif_addr_lock_bh(dev);
4417da66 6757 __dev_set_rx_mode(dev);
b9e40857 6758 netif_addr_unlock_bh(dev);
1da177e4
LT
6759}
6760
f0db275a
SH
6761/**
6762 * dev_get_flags - get flags reported to userspace
6763 * @dev: device
6764 *
6765 * Get the combination of flag bits exported through APIs to userspace.
6766 */
95c96174 6767unsigned int dev_get_flags(const struct net_device *dev)
1da177e4 6768{
95c96174 6769 unsigned int flags;
1da177e4
LT
6770
6771 flags = (dev->flags & ~(IFF_PROMISC |
6772 IFF_ALLMULTI |
b00055aa
SR
6773 IFF_RUNNING |
6774 IFF_LOWER_UP |
6775 IFF_DORMANT)) |
1da177e4
LT
6776 (dev->gflags & (IFF_PROMISC |
6777 IFF_ALLMULTI));
6778
b00055aa
SR
6779 if (netif_running(dev)) {
6780 if (netif_oper_up(dev))
6781 flags |= IFF_RUNNING;
6782 if (netif_carrier_ok(dev))
6783 flags |= IFF_LOWER_UP;
6784 if (netif_dormant(dev))
6785 flags |= IFF_DORMANT;
6786 }
1da177e4
LT
6787
6788 return flags;
6789}
d1b19dff 6790EXPORT_SYMBOL(dev_get_flags);
1da177e4 6791
bd380811 6792int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 6793{
b536db93 6794 unsigned int old_flags = dev->flags;
bd380811 6795 int ret;
1da177e4 6796
24023451
PM
6797 ASSERT_RTNL();
6798
1da177e4
LT
6799 /*
6800 * Set the flags on our device.
6801 */
6802
6803 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
6804 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
6805 IFF_AUTOMEDIA)) |
6806 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
6807 IFF_ALLMULTI));
6808
6809 /*
6810 * Load in the correct multicast list now the flags have changed.
6811 */
6812
b6c40d68
PM
6813 if ((old_flags ^ flags) & IFF_MULTICAST)
6814 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 6815
4417da66 6816 dev_set_rx_mode(dev);
1da177e4
LT
6817
6818 /*
6819 * Have we downed the interface. We handle IFF_UP ourselves
6820 * according to user attempts to set it, rather than blindly
6821 * setting it.
6822 */
6823
6824 ret = 0;
7051b88a 6825 if ((old_flags ^ flags) & IFF_UP) {
6826 if (old_flags & IFF_UP)
6827 __dev_close(dev);
6828 else
6829 ret = __dev_open(dev);
6830 }
1da177e4 6831
1da177e4 6832 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff 6833 int inc = (flags & IFF_PROMISC) ? 1 : -1;
991fb3f7 6834 unsigned int old_flags = dev->flags;
d1b19dff 6835
1da177e4 6836 dev->gflags ^= IFF_PROMISC;
991fb3f7
ND
6837
6838 if (__dev_set_promiscuity(dev, inc, false) >= 0)
6839 if (dev->flags != old_flags)
6840 dev_set_rx_mode(dev);
1da177e4
LT
6841 }
6842
6843 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
eb13da1a 6844 * is important. Some (broken) drivers set IFF_PROMISC, when
6845 * IFF_ALLMULTI is requested not asking us and not reporting.
1da177e4
LT
6846 */
6847 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
6848 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6849
1da177e4 6850 dev->gflags ^= IFF_ALLMULTI;
991fb3f7 6851 __dev_set_allmulti(dev, inc, false);
1da177e4
LT
6852 }
6853
bd380811
PM
6854 return ret;
6855}
6856
a528c219
ND
6857void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6858 unsigned int gchanges)
bd380811
PM
6859{
6860 unsigned int changes = dev->flags ^ old_flags;
6861
a528c219 6862 if (gchanges)
7f294054 6863 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
a528c219 6864
bd380811
PM
6865 if (changes & IFF_UP) {
6866 if (dev->flags & IFF_UP)
6867 call_netdevice_notifiers(NETDEV_UP, dev);
6868 else
6869 call_netdevice_notifiers(NETDEV_DOWN, dev);
6870 }
6871
6872 if (dev->flags & IFF_UP &&
be9efd36 6873 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
51d0c047
DA
6874 struct netdev_notifier_change_info change_info = {
6875 .info = {
6876 .dev = dev,
6877 },
6878 .flags_changed = changes,
6879 };
be9efd36 6880
51d0c047 6881 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
be9efd36 6882 }
bd380811
PM
6883}
6884
6885/**
6886 * dev_change_flags - change device settings
6887 * @dev: device
6888 * @flags: device state flags
6889 *
6890 * Change settings on device based state flags. The flags are
6891 * in the userspace exported format.
6892 */
b536db93 6893int dev_change_flags(struct net_device *dev, unsigned int flags)
bd380811 6894{
b536db93 6895 int ret;
991fb3f7 6896 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
bd380811
PM
6897
6898 ret = __dev_change_flags(dev, flags);
6899 if (ret < 0)
6900 return ret;
6901
991fb3f7 6902 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
a528c219 6903 __dev_notify_flags(dev, old_flags, changes);
1da177e4
LT
6904 return ret;
6905}
d1b19dff 6906EXPORT_SYMBOL(dev_change_flags);
1da177e4 6907
f51048c3 6908int __dev_set_mtu(struct net_device *dev, int new_mtu)
2315dc91
VF
6909{
6910 const struct net_device_ops *ops = dev->netdev_ops;
6911
6912 if (ops->ndo_change_mtu)
6913 return ops->ndo_change_mtu(dev, new_mtu);
6914
6915 dev->mtu = new_mtu;
6916 return 0;
6917}
f51048c3 6918EXPORT_SYMBOL(__dev_set_mtu);
2315dc91 6919
f0db275a
SH
6920/**
6921 * dev_set_mtu - Change maximum transfer unit
6922 * @dev: device
6923 * @new_mtu: new transfer unit
6924 *
6925 * Change the maximum transfer size of the network device.
6926 */
1da177e4
LT
6927int dev_set_mtu(struct net_device *dev, int new_mtu)
6928{
2315dc91 6929 int err, orig_mtu;
1da177e4
LT
6930
6931 if (new_mtu == dev->mtu)
6932 return 0;
6933
61e84623
JW
6934 /* MTU must be positive, and in range */
6935 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
6936 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
6937 dev->name, new_mtu, dev->min_mtu);
1da177e4 6938 return -EINVAL;
61e84623
JW
6939 }
6940
6941 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
6942 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
a0e65de7 6943 dev->name, new_mtu, dev->max_mtu);
61e84623
JW
6944 return -EINVAL;
6945 }
1da177e4
LT
6946
6947 if (!netif_device_present(dev))
6948 return -ENODEV;
6949
1d486bfb
VF
6950 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6951 err = notifier_to_errno(err);
6952 if (err)
6953 return err;
d314774c 6954
2315dc91
VF
6955 orig_mtu = dev->mtu;
6956 err = __dev_set_mtu(dev, new_mtu);
d314774c 6957
2315dc91
VF
6958 if (!err) {
6959 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6960 err = notifier_to_errno(err);
6961 if (err) {
6962 /* setting mtu back and notifying everyone again,
6963 * so that they have a chance to revert changes.
6964 */
6965 __dev_set_mtu(dev, orig_mtu);
6966 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6967 }
6968 }
1da177e4
LT
6969 return err;
6970}
d1b19dff 6971EXPORT_SYMBOL(dev_set_mtu);
1da177e4 6972
cbda10fa
VD
6973/**
6974 * dev_set_group - Change group this device belongs to
6975 * @dev: device
6976 * @new_group: group this device should belong to
6977 */
6978void dev_set_group(struct net_device *dev, int new_group)
6979{
6980 dev->group = new_group;
6981}
6982EXPORT_SYMBOL(dev_set_group);
6983
f0db275a
SH
6984/**
6985 * dev_set_mac_address - Change Media Access Control Address
6986 * @dev: device
6987 * @sa: new address
6988 *
6989 * Change the hardware (MAC) address of the device
6990 */
1da177e4
LT
6991int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6992{
d314774c 6993 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
6994 int err;
6995
d314774c 6996 if (!ops->ndo_set_mac_address)
1da177e4
LT
6997 return -EOPNOTSUPP;
6998 if (sa->sa_family != dev->type)
6999 return -EINVAL;
7000 if (!netif_device_present(dev))
7001 return -ENODEV;
d314774c 7002 err = ops->ndo_set_mac_address(dev, sa);
f6521516
JP
7003 if (err)
7004 return err;
fbdeca2d 7005 dev->addr_assign_type = NET_ADDR_SET;
f6521516 7006 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7bf23575 7007 add_device_randomness(dev->dev_addr, dev->addr_len);
f6521516 7008 return 0;
1da177e4 7009}
d1b19dff 7010EXPORT_SYMBOL(dev_set_mac_address);
1da177e4 7011
4bf84c35
JP
7012/**
7013 * dev_change_carrier - Change device carrier
7014 * @dev: device
691b3b7e 7015 * @new_carrier: new value
4bf84c35
JP
7016 *
7017 * Change device carrier
7018 */
7019int dev_change_carrier(struct net_device *dev, bool new_carrier)
7020{
7021 const struct net_device_ops *ops = dev->netdev_ops;
7022
7023 if (!ops->ndo_change_carrier)
7024 return -EOPNOTSUPP;
7025 if (!netif_device_present(dev))
7026 return -ENODEV;
7027 return ops->ndo_change_carrier(dev, new_carrier);
7028}
7029EXPORT_SYMBOL(dev_change_carrier);
7030
66b52b0d
JP
7031/**
7032 * dev_get_phys_port_id - Get device physical port ID
7033 * @dev: device
7034 * @ppid: port ID
7035 *
7036 * Get device physical port ID
7037 */
7038int dev_get_phys_port_id(struct net_device *dev,
02637fce 7039 struct netdev_phys_item_id *ppid)
66b52b0d
JP
7040{
7041 const struct net_device_ops *ops = dev->netdev_ops;
7042
7043 if (!ops->ndo_get_phys_port_id)
7044 return -EOPNOTSUPP;
7045 return ops->ndo_get_phys_port_id(dev, ppid);
7046}
7047EXPORT_SYMBOL(dev_get_phys_port_id);
7048
db24a904
DA
7049/**
7050 * dev_get_phys_port_name - Get device physical port name
7051 * @dev: device
7052 * @name: port name
ed49e650 7053 * @len: limit of bytes to copy to name
db24a904
DA
7054 *
7055 * Get device physical port name
7056 */
7057int dev_get_phys_port_name(struct net_device *dev,
7058 char *name, size_t len)
7059{
7060 const struct net_device_ops *ops = dev->netdev_ops;
7061
7062 if (!ops->ndo_get_phys_port_name)
7063 return -EOPNOTSUPP;
7064 return ops->ndo_get_phys_port_name(dev, name, len);
7065}
7066EXPORT_SYMBOL(dev_get_phys_port_name);
7067
d746d707
AK
7068/**
7069 * dev_change_proto_down - update protocol port state information
7070 * @dev: device
7071 * @proto_down: new value
7072 *
7073 * This info can be used by switch drivers to set the phys state of the
7074 * port.
7075 */
7076int dev_change_proto_down(struct net_device *dev, bool proto_down)
7077{
7078 const struct net_device_ops *ops = dev->netdev_ops;
7079
7080 if (!ops->ndo_change_proto_down)
7081 return -EOPNOTSUPP;
7082 if (!netif_device_present(dev))
7083 return -ENODEV;
7084 return ops->ndo_change_proto_down(dev, proto_down);
7085}
7086EXPORT_SYMBOL(dev_change_proto_down);
7087
f4e63525 7088u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op, u32 *prog_id)
d67b9cd2 7089{
f4e63525 7090 struct netdev_bpf xdp;
d67b9cd2
DB
7091
7092 memset(&xdp, 0, sizeof(xdp));
7093 xdp.command = XDP_QUERY_PROG;
7094
7095 /* Query must always succeed. */
f4e63525 7096 WARN_ON(bpf_op(dev, &xdp) < 0);
58038695
MKL
7097 if (prog_id)
7098 *prog_id = xdp.prog_id;
7099
d67b9cd2
DB
7100 return xdp.prog_attached;
7101}
7102
f4e63525 7103static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
32d60277 7104 struct netlink_ext_ack *extack, u32 flags,
d67b9cd2
DB
7105 struct bpf_prog *prog)
7106{
f4e63525 7107 struct netdev_bpf xdp;
d67b9cd2
DB
7108
7109 memset(&xdp, 0, sizeof(xdp));
ee5d032f
JK
7110 if (flags & XDP_FLAGS_HW_MODE)
7111 xdp.command = XDP_SETUP_PROG_HW;
7112 else
7113 xdp.command = XDP_SETUP_PROG;
d67b9cd2 7114 xdp.extack = extack;
32d60277 7115 xdp.flags = flags;
d67b9cd2
DB
7116 xdp.prog = prog;
7117
f4e63525 7118 return bpf_op(dev, &xdp);
d67b9cd2
DB
7119}
7120
a7862b45
BB
7121/**
7122 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
7123 * @dev: device
b5d60989 7124 * @extack: netlink extended ack
a7862b45 7125 * @fd: new program fd or negative value to clear
85de8576 7126 * @flags: xdp-related flags
a7862b45
BB
7127 *
7128 * Set or clear a bpf program for a device
7129 */
ddf9f970
JK
7130int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
7131 int fd, u32 flags)
a7862b45
BB
7132{
7133 const struct net_device_ops *ops = dev->netdev_ops;
7134 struct bpf_prog *prog = NULL;
f4e63525 7135 bpf_op_t bpf_op, bpf_chk;
a7862b45
BB
7136 int err;
7137
85de8576
DB
7138 ASSERT_RTNL();
7139
f4e63525
JK
7140 bpf_op = bpf_chk = ops->ndo_bpf;
7141 if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
0489df9a 7142 return -EOPNOTSUPP;
f4e63525
JK
7143 if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
7144 bpf_op = generic_xdp_install;
7145 if (bpf_op == bpf_chk)
7146 bpf_chk = generic_xdp_install;
b5cdae32 7147
a7862b45 7148 if (fd >= 0) {
f4e63525 7149 if (bpf_chk && __dev_xdp_attached(dev, bpf_chk, NULL))
d67b9cd2
DB
7150 return -EEXIST;
7151 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
f4e63525 7152 __dev_xdp_attached(dev, bpf_op, NULL))
d67b9cd2 7153 return -EBUSY;
85de8576 7154
288b3de5
JK
7155 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
7156 bpf_op == ops->ndo_bpf);
a7862b45
BB
7157 if (IS_ERR(prog))
7158 return PTR_ERR(prog);
441a3303
JK
7159
7160 if (!(flags & XDP_FLAGS_HW_MODE) &&
7161 bpf_prog_is_dev_bound(prog->aux)) {
7162 NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
7163 bpf_prog_put(prog);
7164 return -EINVAL;
7165 }
a7862b45
BB
7166 }
7167
f4e63525 7168 err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
a7862b45
BB
7169 if (err < 0 && prog)
7170 bpf_prog_put(prog);
7171
7172 return err;
7173}
a7862b45 7174
1da177e4
LT
7175/**
7176 * dev_new_index - allocate an ifindex
c4ea43c5 7177 * @net: the applicable net namespace
1da177e4
LT
7178 *
7179 * Returns a suitable unique value for a new device interface
7180 * number. The caller must hold the rtnl semaphore or the
7181 * dev_base_lock to be sure it remains unique.
7182 */
881d966b 7183static int dev_new_index(struct net *net)
1da177e4 7184{
aa79e66e 7185 int ifindex = net->ifindex;
f4563a75 7186
1da177e4
LT
7187 for (;;) {
7188 if (++ifindex <= 0)
7189 ifindex = 1;
881d966b 7190 if (!__dev_get_by_index(net, ifindex))
aa79e66e 7191 return net->ifindex = ifindex;
1da177e4
LT
7192 }
7193}
7194
1da177e4 7195/* Delayed registration/unregisteration */
3b5b34fd 7196static LIST_HEAD(net_todo_list);
200b916f 7197DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
1da177e4 7198
6f05f629 7199static void net_set_todo(struct net_device *dev)
1da177e4 7200{
1da177e4 7201 list_add_tail(&dev->todo_list, &net_todo_list);
50624c93 7202 dev_net(dev)->dev_unreg_count++;
1da177e4
LT
7203}
7204
9b5e383c 7205static void rollback_registered_many(struct list_head *head)
93ee31f1 7206{
e93737b0 7207 struct net_device *dev, *tmp;
5cde2829 7208 LIST_HEAD(close_head);
9b5e383c 7209
93ee31f1
DL
7210 BUG_ON(dev_boot_phase);
7211 ASSERT_RTNL();
7212
e93737b0 7213 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 7214 /* Some devices call without registering
e93737b0
KK
7215 * for initialization unwind. Remove those
7216 * devices and proceed with the remaining.
9b5e383c
ED
7217 */
7218 if (dev->reg_state == NETREG_UNINITIALIZED) {
7b6cd1ce
JP
7219 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
7220 dev->name, dev);
93ee31f1 7221
9b5e383c 7222 WARN_ON(1);
e93737b0
KK
7223 list_del(&dev->unreg_list);
7224 continue;
9b5e383c 7225 }
449f4544 7226 dev->dismantle = true;
9b5e383c 7227 BUG_ON(dev->reg_state != NETREG_REGISTERED);
44345724 7228 }
93ee31f1 7229
44345724 7230 /* If device is running, close it first. */
5cde2829
EB
7231 list_for_each_entry(dev, head, unreg_list)
7232 list_add_tail(&dev->close_list, &close_head);
99c4a26a 7233 dev_close_many(&close_head, true);
93ee31f1 7234
44345724 7235 list_for_each_entry(dev, head, unreg_list) {
9b5e383c
ED
7236 /* And unlink it from device chain. */
7237 unlist_netdevice(dev);
93ee31f1 7238
9b5e383c
ED
7239 dev->reg_state = NETREG_UNREGISTERING;
7240 }
41852497 7241 flush_all_backlogs();
93ee31f1
DL
7242
7243 synchronize_net();
7244
9b5e383c 7245 list_for_each_entry(dev, head, unreg_list) {
395eea6c
MB
7246 struct sk_buff *skb = NULL;
7247
9b5e383c
ED
7248 /* Shutdown queueing discipline. */
7249 dev_shutdown(dev);
93ee31f1
DL
7250
7251
9b5e383c 7252 /* Notify protocols, that we are about to destroy
eb13da1a 7253 * this device. They should clean all the things.
7254 */
9b5e383c 7255 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 7256
395eea6c
MB
7257 if (!dev->rtnl_link_ops ||
7258 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
3d3ea5af 7259 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
6621dd29 7260 GFP_KERNEL, NULL);
395eea6c 7261
9b5e383c
ED
7262 /*
7263 * Flush the unicast and multicast chains
7264 */
a748ee24 7265 dev_uc_flush(dev);
22bedad3 7266 dev_mc_flush(dev);
93ee31f1 7267
9b5e383c
ED
7268 if (dev->netdev_ops->ndo_uninit)
7269 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 7270
395eea6c
MB
7271 if (skb)
7272 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
56bfa7ee 7273
9ff162a8
JP
7274 /* Notifier chain MUST detach us all upper devices. */
7275 WARN_ON(netdev_has_any_upper_dev(dev));
0f524a80 7276 WARN_ON(netdev_has_any_lower_dev(dev));
93ee31f1 7277
9b5e383c
ED
7278 /* Remove entries from kobject tree */
7279 netdev_unregister_kobject(dev);
024e9679
AD
7280#ifdef CONFIG_XPS
7281 /* Remove XPS queueing entries */
7282 netif_reset_xps_queues_gt(dev, 0);
7283#endif
9b5e383c 7284 }
93ee31f1 7285
850a545b 7286 synchronize_net();
395264d5 7287
a5ee1551 7288 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
7289 dev_put(dev);
7290}
7291
7292static void rollback_registered(struct net_device *dev)
7293{
7294 LIST_HEAD(single);
7295
7296 list_add(&dev->unreg_list, &single);
7297 rollback_registered_many(&single);
ceaaec98 7298 list_del(&single);
93ee31f1
DL
7299}
7300
fd867d51
JW
7301static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
7302 struct net_device *upper, netdev_features_t features)
7303{
7304 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7305 netdev_features_t feature;
5ba3f7d6 7306 int feature_bit;
fd867d51 7307
5ba3f7d6
JW
7308 for_each_netdev_feature(&upper_disables, feature_bit) {
7309 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
7310 if (!(upper->wanted_features & feature)
7311 && (features & feature)) {
7312 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
7313 &feature, upper->name);
7314 features &= ~feature;
7315 }
7316 }
7317
7318 return features;
7319}
7320
7321static void netdev_sync_lower_features(struct net_device *upper,
7322 struct net_device *lower, netdev_features_t features)
7323{
7324 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7325 netdev_features_t feature;
5ba3f7d6 7326 int feature_bit;
fd867d51 7327
5ba3f7d6
JW
7328 for_each_netdev_feature(&upper_disables, feature_bit) {
7329 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
7330 if (!(features & feature) && (lower->features & feature)) {
7331 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
7332 &feature, lower->name);
7333 lower->wanted_features &= ~feature;
7334 netdev_update_features(lower);
7335
7336 if (unlikely(lower->features & feature))
7337 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
7338 &feature, lower->name);
7339 }
7340 }
7341}
7342
c8f44aff
MM
7343static netdev_features_t netdev_fix_features(struct net_device *dev,
7344 netdev_features_t features)
b63365a2 7345{
57422dc5
MM
7346 /* Fix illegal checksum combinations */
7347 if ((features & NETIF_F_HW_CSUM) &&
7348 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 7349 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
57422dc5
MM
7350 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
7351 }
7352
b63365a2 7353 /* TSO requires that SG is present as well. */
ea2d3688 7354 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6f404e44 7355 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
ea2d3688 7356 features &= ~NETIF_F_ALL_TSO;
b63365a2
HX
7357 }
7358
ec5f0615
PS
7359 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
7360 !(features & NETIF_F_IP_CSUM)) {
7361 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
7362 features &= ~NETIF_F_TSO;
7363 features &= ~NETIF_F_TSO_ECN;
7364 }
7365
7366 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
7367 !(features & NETIF_F_IPV6_CSUM)) {
7368 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
7369 features &= ~NETIF_F_TSO6;
7370 }
7371
b1dc497b
AD
7372 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
7373 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
7374 features &= ~NETIF_F_TSO_MANGLEID;
7375
31d8b9e0
BH
7376 /* TSO ECN requires that TSO is present as well. */
7377 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
7378 features &= ~NETIF_F_TSO_ECN;
7379
212b573f
MM
7380 /* Software GSO depends on SG. */
7381 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6f404e44 7382 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
212b573f
MM
7383 features &= ~NETIF_F_GSO;
7384 }
7385
802ab55a
AD
7386 /* GSO partial features require GSO partial be set */
7387 if ((features & dev->gso_partial_features) &&
7388 !(features & NETIF_F_GSO_PARTIAL)) {
7389 netdev_dbg(dev,
7390 "Dropping partially supported GSO features since no GSO partial.\n");
7391 features &= ~dev->gso_partial_features;
7392 }
7393
b63365a2
HX
7394 return features;
7395}
b63365a2 7396
6cb6a27c 7397int __netdev_update_features(struct net_device *dev)
5455c699 7398{
fd867d51 7399 struct net_device *upper, *lower;
c8f44aff 7400 netdev_features_t features;
fd867d51 7401 struct list_head *iter;
e7868a85 7402 int err = -1;
5455c699 7403
87267485
MM
7404 ASSERT_RTNL();
7405
5455c699
MM
7406 features = netdev_get_wanted_features(dev);
7407
7408 if (dev->netdev_ops->ndo_fix_features)
7409 features = dev->netdev_ops->ndo_fix_features(dev, features);
7410
7411 /* driver might be less strict about feature dependencies */
7412 features = netdev_fix_features(dev, features);
7413
fd867d51
JW
7414 /* some features can't be enabled if they're off an an upper device */
7415 netdev_for_each_upper_dev_rcu(dev, upper, iter)
7416 features = netdev_sync_upper_features(dev, upper, features);
7417
5455c699 7418 if (dev->features == features)
e7868a85 7419 goto sync_lower;
5455c699 7420
c8f44aff
MM
7421 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
7422 &dev->features, &features);
5455c699
MM
7423
7424 if (dev->netdev_ops->ndo_set_features)
7425 err = dev->netdev_ops->ndo_set_features(dev, features);
5f8dc33e
NA
7426 else
7427 err = 0;
5455c699 7428
6cb6a27c 7429 if (unlikely(err < 0)) {
5455c699 7430 netdev_err(dev,
c8f44aff
MM
7431 "set_features() failed (%d); wanted %pNF, left %pNF\n",
7432 err, &features, &dev->features);
17b85d29
NA
7433 /* return non-0 since some features might have changed and
7434 * it's better to fire a spurious notification than miss it
7435 */
7436 return -1;
6cb6a27c
MM
7437 }
7438
e7868a85 7439sync_lower:
fd867d51
JW
7440 /* some features must be disabled on lower devices when disabled
7441 * on an upper device (think: bonding master or bridge)
7442 */
7443 netdev_for_each_lower_dev(dev, lower, iter)
7444 netdev_sync_lower_features(dev, lower, features);
7445
ae847f40
SD
7446 if (!err) {
7447 netdev_features_t diff = features ^ dev->features;
7448
7449 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
7450 /* udp_tunnel_{get,drop}_rx_info both need
7451 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
7452 * device, or they won't do anything.
7453 * Thus we need to update dev->features
7454 * *before* calling udp_tunnel_get_rx_info,
7455 * but *after* calling udp_tunnel_drop_rx_info.
7456 */
7457 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
7458 dev->features = features;
7459 udp_tunnel_get_rx_info(dev);
7460 } else {
7461 udp_tunnel_drop_rx_info(dev);
7462 }
7463 }
7464
6cb6a27c 7465 dev->features = features;
ae847f40 7466 }
6cb6a27c 7467
e7868a85 7468 return err < 0 ? 0 : 1;
6cb6a27c
MM
7469}
7470
afe12cc8
MM
7471/**
7472 * netdev_update_features - recalculate device features
7473 * @dev: the device to check
7474 *
7475 * Recalculate dev->features set and send notifications if it
7476 * has changed. Should be called after driver or hardware dependent
7477 * conditions might have changed that influence the features.
7478 */
6cb6a27c
MM
7479void netdev_update_features(struct net_device *dev)
7480{
7481 if (__netdev_update_features(dev))
7482 netdev_features_change(dev);
5455c699
MM
7483}
7484EXPORT_SYMBOL(netdev_update_features);
7485
afe12cc8
MM
7486/**
7487 * netdev_change_features - recalculate device features
7488 * @dev: the device to check
7489 *
7490 * Recalculate dev->features set and send notifications even
7491 * if they have not changed. Should be called instead of
7492 * netdev_update_features() if also dev->vlan_features might
7493 * have changed to allow the changes to be propagated to stacked
7494 * VLAN devices.
7495 */
7496void netdev_change_features(struct net_device *dev)
7497{
7498 __netdev_update_features(dev);
7499 netdev_features_change(dev);
7500}
7501EXPORT_SYMBOL(netdev_change_features);
7502
fc4a7489
PM
7503/**
7504 * netif_stacked_transfer_operstate - transfer operstate
7505 * @rootdev: the root or lower level device to transfer state from
7506 * @dev: the device to transfer operstate to
7507 *
7508 * Transfer operational state from root to device. This is normally
7509 * called when a stacking relationship exists between the root
7510 * device and the device(a leaf device).
7511 */
7512void netif_stacked_transfer_operstate(const struct net_device *rootdev,
7513 struct net_device *dev)
7514{
7515 if (rootdev->operstate == IF_OPER_DORMANT)
7516 netif_dormant_on(dev);
7517 else
7518 netif_dormant_off(dev);
7519
0575c86b
ZS
7520 if (netif_carrier_ok(rootdev))
7521 netif_carrier_on(dev);
7522 else
7523 netif_carrier_off(dev);
fc4a7489
PM
7524}
7525EXPORT_SYMBOL(netif_stacked_transfer_operstate);
7526
a953be53 7527#ifdef CONFIG_SYSFS
1b4bf461
ED
7528static int netif_alloc_rx_queues(struct net_device *dev)
7529{
1b4bf461 7530 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 7531 struct netdev_rx_queue *rx;
10595902 7532 size_t sz = count * sizeof(*rx);
1b4bf461 7533
bd25fa7b 7534 BUG_ON(count < 1);
1b4bf461 7535
dcda9b04 7536 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
da6bc57a
MH
7537 if (!rx)
7538 return -ENOMEM;
7539
bd25fa7b
TH
7540 dev->_rx = rx;
7541
bd25fa7b 7542 for (i = 0; i < count; i++)
fe822240 7543 rx[i].dev = dev;
1b4bf461
ED
7544 return 0;
7545}
bf264145 7546#endif
1b4bf461 7547
aa942104
CG
7548static void netdev_init_one_queue(struct net_device *dev,
7549 struct netdev_queue *queue, void *_unused)
7550{
7551 /* Initialize queue lock */
7552 spin_lock_init(&queue->_xmit_lock);
7553 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
7554 queue->xmit_lock_owner = -1;
b236da69 7555 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104 7556 queue->dev = dev;
114cf580
TH
7557#ifdef CONFIG_BQL
7558 dql_init(&queue->dql, HZ);
7559#endif
aa942104
CG
7560}
7561
60877a32
ED
7562static void netif_free_tx_queues(struct net_device *dev)
7563{
4cb28970 7564 kvfree(dev->_tx);
60877a32
ED
7565}
7566
e6484930
TH
7567static int netif_alloc_netdev_queues(struct net_device *dev)
7568{
7569 unsigned int count = dev->num_tx_queues;
7570 struct netdev_queue *tx;
60877a32 7571 size_t sz = count * sizeof(*tx);
e6484930 7572
d339727c
ED
7573 if (count < 1 || count > 0xffff)
7574 return -EINVAL;
62b5942a 7575
dcda9b04 7576 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
da6bc57a
MH
7577 if (!tx)
7578 return -ENOMEM;
7579
e6484930 7580 dev->_tx = tx;
1d24eb48 7581
e6484930
TH
7582 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
7583 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
7584
7585 return 0;
e6484930
TH
7586}
7587
a2029240
DV
7588void netif_tx_stop_all_queues(struct net_device *dev)
7589{
7590 unsigned int i;
7591
7592 for (i = 0; i < dev->num_tx_queues; i++) {
7593 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
f4563a75 7594
a2029240
DV
7595 netif_tx_stop_queue(txq);
7596 }
7597}
7598EXPORT_SYMBOL(netif_tx_stop_all_queues);
7599
1da177e4
LT
7600/**
7601 * register_netdevice - register a network device
7602 * @dev: device to register
7603 *
7604 * Take a completed network device structure and add it to the kernel
7605 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7606 * chain. 0 is returned on success. A negative errno code is returned
7607 * on a failure to set up the device, or if the name is a duplicate.
7608 *
7609 * Callers must hold the rtnl semaphore. You may want
7610 * register_netdev() instead of this.
7611 *
7612 * BUGS:
7613 * The locking appears insufficient to guarantee two parallel registers
7614 * will not get the same name.
7615 */
7616
7617int register_netdevice(struct net_device *dev)
7618{
1da177e4 7619 int ret;
d314774c 7620 struct net *net = dev_net(dev);
1da177e4
LT
7621
7622 BUG_ON(dev_boot_phase);
7623 ASSERT_RTNL();
7624
b17a7c17
SH
7625 might_sleep();
7626
1da177e4
LT
7627 /* When net_device's are persistent, this will be fatal. */
7628 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 7629 BUG_ON(!net);
1da177e4 7630
f1f28aa3 7631 spin_lock_init(&dev->addr_list_lock);
cf508b12 7632 netdev_set_addr_lockdep_class(dev);
1da177e4 7633
828de4f6 7634 ret = dev_get_valid_name(net, dev, dev->name);
0696c3a8
PP
7635 if (ret < 0)
7636 goto out;
7637
1da177e4 7638 /* Init, if this function is available */
d314774c
SH
7639 if (dev->netdev_ops->ndo_init) {
7640 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
7641 if (ret) {
7642 if (ret > 0)
7643 ret = -EIO;
90833aa4 7644 goto out;
1da177e4
LT
7645 }
7646 }
4ec93edb 7647
f646968f
PM
7648 if (((dev->hw_features | dev->features) &
7649 NETIF_F_HW_VLAN_CTAG_FILTER) &&
d2ed273d
MM
7650 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
7651 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
7652 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
7653 ret = -EINVAL;
7654 goto err_uninit;
7655 }
7656
9c7dafbf
PE
7657 ret = -EBUSY;
7658 if (!dev->ifindex)
7659 dev->ifindex = dev_new_index(net);
7660 else if (__dev_get_by_index(net, dev->ifindex))
7661 goto err_uninit;
7662
5455c699
MM
7663 /* Transfer changeable features to wanted_features and enable
7664 * software offloads (GSO and GRO).
7665 */
7666 dev->hw_features |= NETIF_F_SOFT_FEATURES;
14d1232f 7667 dev->features |= NETIF_F_SOFT_FEATURES;
d764a122
SD
7668
7669 if (dev->netdev_ops->ndo_udp_tunnel_add) {
7670 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
7671 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
7672 }
7673
14d1232f 7674 dev->wanted_features = dev->features & dev->hw_features;
1da177e4 7675
cbc53e08 7676 if (!(dev->flags & IFF_LOOPBACK))
34324dc2 7677 dev->hw_features |= NETIF_F_NOCACHE_COPY;
cbc53e08 7678
7f348a60
AD
7679 /* If IPv4 TCP segmentation offload is supported we should also
7680 * allow the device to enable segmenting the frame with the option
7681 * of ignoring a static IP ID value. This doesn't enable the
7682 * feature itself but allows the user to enable it later.
7683 */
cbc53e08
AD
7684 if (dev->hw_features & NETIF_F_TSO)
7685 dev->hw_features |= NETIF_F_TSO_MANGLEID;
7f348a60
AD
7686 if (dev->vlan_features & NETIF_F_TSO)
7687 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
7688 if (dev->mpls_features & NETIF_F_TSO)
7689 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
7690 if (dev->hw_enc_features & NETIF_F_TSO)
7691 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
c6e1a0d1 7692
1180e7d6 7693 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
16c3ea78 7694 */
1180e7d6 7695 dev->vlan_features |= NETIF_F_HIGHDMA;
16c3ea78 7696
ee579677
PS
7697 /* Make NETIF_F_SG inheritable to tunnel devices.
7698 */
802ab55a 7699 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
ee579677 7700
0d89d203
SH
7701 /* Make NETIF_F_SG inheritable to MPLS.
7702 */
7703 dev->mpls_features |= NETIF_F_SG;
7704
7ffbe3fd
JB
7705 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
7706 ret = notifier_to_errno(ret);
7707 if (ret)
7708 goto err_uninit;
7709
8b41d188 7710 ret = netdev_register_kobject(dev);
b17a7c17 7711 if (ret)
7ce1b0ed 7712 goto err_uninit;
b17a7c17
SH
7713 dev->reg_state = NETREG_REGISTERED;
7714
6cb6a27c 7715 __netdev_update_features(dev);
8e9b59b2 7716
1da177e4
LT
7717 /*
7718 * Default initial state at registry is that the
7719 * device is present.
7720 */
7721
7722 set_bit(__LINK_STATE_PRESENT, &dev->state);
7723
8f4cccbb
BH
7724 linkwatch_init_dev(dev);
7725
1da177e4 7726 dev_init_scheduler(dev);
1da177e4 7727 dev_hold(dev);
ce286d32 7728 list_netdevice(dev);
7bf23575 7729 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 7730
948b337e
JP
7731 /* If the device has permanent device address, driver should
7732 * set dev_addr and also addr_assign_type should be set to
7733 * NET_ADDR_PERM (default value).
7734 */
7735 if (dev->addr_assign_type == NET_ADDR_PERM)
7736 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
7737
1da177e4 7738 /* Notify protocols, that a new device appeared. */
056925ab 7739 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 7740 ret = notifier_to_errno(ret);
93ee31f1
DL
7741 if (ret) {
7742 rollback_registered(dev);
7743 dev->reg_state = NETREG_UNREGISTERED;
7744 }
d90a909e
EB
7745 /*
7746 * Prevent userspace races by waiting until the network
7747 * device is fully setup before sending notifications.
7748 */
a2835763
PM
7749 if (!dev->rtnl_link_ops ||
7750 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7f294054 7751 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
1da177e4
LT
7752
7753out:
7754 return ret;
7ce1b0ed
HX
7755
7756err_uninit:
d314774c
SH
7757 if (dev->netdev_ops->ndo_uninit)
7758 dev->netdev_ops->ndo_uninit(dev);
cf124db5
DM
7759 if (dev->priv_destructor)
7760 dev->priv_destructor(dev);
7ce1b0ed 7761 goto out;
1da177e4 7762}
d1b19dff 7763EXPORT_SYMBOL(register_netdevice);
1da177e4 7764
937f1ba5
BH
7765/**
7766 * init_dummy_netdev - init a dummy network device for NAPI
7767 * @dev: device to init
7768 *
7769 * This takes a network device structure and initialize the minimum
7770 * amount of fields so it can be used to schedule NAPI polls without
7771 * registering a full blown interface. This is to be used by drivers
7772 * that need to tie several hardware interfaces to a single NAPI
7773 * poll scheduler due to HW limitations.
7774 */
7775int init_dummy_netdev(struct net_device *dev)
7776{
7777 /* Clear everything. Note we don't initialize spinlocks
7778 * are they aren't supposed to be taken by any of the
7779 * NAPI code and this dummy netdev is supposed to be
7780 * only ever used for NAPI polls
7781 */
7782 memset(dev, 0, sizeof(struct net_device));
7783
7784 /* make sure we BUG if trying to hit standard
7785 * register/unregister code path
7786 */
7787 dev->reg_state = NETREG_DUMMY;
7788
937f1ba5
BH
7789 /* NAPI wants this */
7790 INIT_LIST_HEAD(&dev->napi_list);
7791
7792 /* a dummy interface is started by default */
7793 set_bit(__LINK_STATE_PRESENT, &dev->state);
7794 set_bit(__LINK_STATE_START, &dev->state);
7795
29b4433d
ED
7796 /* Note : We dont allocate pcpu_refcnt for dummy devices,
7797 * because users of this 'device' dont need to change
7798 * its refcount.
7799 */
7800
937f1ba5
BH
7801 return 0;
7802}
7803EXPORT_SYMBOL_GPL(init_dummy_netdev);
7804
7805
1da177e4
LT
7806/**
7807 * register_netdev - register a network device
7808 * @dev: device to register
7809 *
7810 * Take a completed network device structure and add it to the kernel
7811 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7812 * chain. 0 is returned on success. A negative errno code is returned
7813 * on a failure to set up the device, or if the name is a duplicate.
7814 *
38b4da38 7815 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
7816 * and expands the device name if you passed a format string to
7817 * alloc_netdev.
7818 */
7819int register_netdev(struct net_device *dev)
7820{
7821 int err;
7822
7823 rtnl_lock();
1da177e4 7824 err = register_netdevice(dev);
1da177e4
LT
7825 rtnl_unlock();
7826 return err;
7827}
7828EXPORT_SYMBOL(register_netdev);
7829
29b4433d
ED
7830int netdev_refcnt_read(const struct net_device *dev)
7831{
7832 int i, refcnt = 0;
7833
7834 for_each_possible_cpu(i)
7835 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
7836 return refcnt;
7837}
7838EXPORT_SYMBOL(netdev_refcnt_read);
7839
2c53040f 7840/**
1da177e4 7841 * netdev_wait_allrefs - wait until all references are gone.
3de7a37b 7842 * @dev: target net_device
1da177e4
LT
7843 *
7844 * This is called when unregistering network devices.
7845 *
7846 * Any protocol or device that holds a reference should register
7847 * for netdevice notification, and cleanup and put back the
7848 * reference if they receive an UNREGISTER event.
7849 * We can get stuck here if buggy protocols don't correctly
4ec93edb 7850 * call dev_put.
1da177e4
LT
7851 */
7852static void netdev_wait_allrefs(struct net_device *dev)
7853{
7854 unsigned long rebroadcast_time, warning_time;
29b4433d 7855 int refcnt;
1da177e4 7856
e014debe
ED
7857 linkwatch_forget_dev(dev);
7858
1da177e4 7859 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
7860 refcnt = netdev_refcnt_read(dev);
7861
7862 while (refcnt != 0) {
1da177e4 7863 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 7864 rtnl_lock();
1da177e4
LT
7865
7866 /* Rebroadcast unregister notification */
056925ab 7867 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
1da177e4 7868
748e2d93 7869 __rtnl_unlock();
0115e8e3 7870 rcu_barrier();
748e2d93
ED
7871 rtnl_lock();
7872
0115e8e3 7873 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
1da177e4
LT
7874 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
7875 &dev->state)) {
7876 /* We must not have linkwatch events
7877 * pending on unregister. If this
7878 * happens, we simply run the queue
7879 * unscheduled, resulting in a noop
7880 * for this device.
7881 */
7882 linkwatch_run_queue();
7883 }
7884
6756ae4b 7885 __rtnl_unlock();
1da177e4
LT
7886
7887 rebroadcast_time = jiffies;
7888 }
7889
7890 msleep(250);
7891
29b4433d
ED
7892 refcnt = netdev_refcnt_read(dev);
7893
1da177e4 7894 if (time_after(jiffies, warning_time + 10 * HZ)) {
7b6cd1ce
JP
7895 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
7896 dev->name, refcnt);
1da177e4
LT
7897 warning_time = jiffies;
7898 }
7899 }
7900}
7901
7902/* The sequence is:
7903 *
7904 * rtnl_lock();
7905 * ...
7906 * register_netdevice(x1);
7907 * register_netdevice(x2);
7908 * ...
7909 * unregister_netdevice(y1);
7910 * unregister_netdevice(y2);
7911 * ...
7912 * rtnl_unlock();
7913 * free_netdev(y1);
7914 * free_netdev(y2);
7915 *
58ec3b4d 7916 * We are invoked by rtnl_unlock().
1da177e4 7917 * This allows us to deal with problems:
b17a7c17 7918 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
7919 * without deadlocking with linkwatch via keventd.
7920 * 2) Since we run with the RTNL semaphore not held, we can sleep
7921 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
7922 *
7923 * We must not return until all unregister events added during
7924 * the interval the lock was held have been completed.
1da177e4 7925 */
1da177e4
LT
7926void netdev_run_todo(void)
7927{
626ab0e6 7928 struct list_head list;
1da177e4 7929
1da177e4 7930 /* Snapshot list, allow later requests */
626ab0e6 7931 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
7932
7933 __rtnl_unlock();
626ab0e6 7934
0115e8e3
ED
7935
7936 /* Wait for rcu callbacks to finish before next phase */
850a545b
EB
7937 if (!list_empty(&list))
7938 rcu_barrier();
7939
1da177e4
LT
7940 while (!list_empty(&list)) {
7941 struct net_device *dev
e5e26d75 7942 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
7943 list_del(&dev->todo_list);
7944
748e2d93 7945 rtnl_lock();
0115e8e3 7946 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
748e2d93 7947 __rtnl_unlock();
0115e8e3 7948
b17a7c17 7949 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7b6cd1ce 7950 pr_err("network todo '%s' but state %d\n",
b17a7c17
SH
7951 dev->name, dev->reg_state);
7952 dump_stack();
7953 continue;
7954 }
1da177e4 7955
b17a7c17 7956 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 7957
b17a7c17 7958 netdev_wait_allrefs(dev);
1da177e4 7959
b17a7c17 7960 /* paranoia */
29b4433d 7961 BUG_ON(netdev_refcnt_read(dev));
7866a621
SN
7962 BUG_ON(!list_empty(&dev->ptype_all));
7963 BUG_ON(!list_empty(&dev->ptype_specific));
33d480ce
ED
7964 WARN_ON(rcu_access_pointer(dev->ip_ptr));
7965 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
547b792c 7966 WARN_ON(dev->dn_ptr);
1da177e4 7967
cf124db5
DM
7968 if (dev->priv_destructor)
7969 dev->priv_destructor(dev);
7970 if (dev->needs_free_netdev)
7971 free_netdev(dev);
9093bbb2 7972
50624c93
EB
7973 /* Report a network device has been unregistered */
7974 rtnl_lock();
7975 dev_net(dev)->dev_unreg_count--;
7976 __rtnl_unlock();
7977 wake_up(&netdev_unregistering_wq);
7978
9093bbb2
SH
7979 /* Free network device */
7980 kobject_put(&dev->dev.kobj);
1da177e4 7981 }
1da177e4
LT
7982}
7983
9256645a
JW
7984/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
7985 * all the same fields in the same order as net_device_stats, with only
7986 * the type differing, but rtnl_link_stats64 may have additional fields
7987 * at the end for newer counters.
3cfde79c 7988 */
77a1abf5
ED
7989void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7990 const struct net_device_stats *netdev_stats)
3cfde79c
BH
7991{
7992#if BITS_PER_LONG == 64
9256645a 7993 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
9af9959e 7994 memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
9256645a
JW
7995 /* zero out counters that only exist in rtnl_link_stats64 */
7996 memset((char *)stats64 + sizeof(*netdev_stats), 0,
7997 sizeof(*stats64) - sizeof(*netdev_stats));
3cfde79c 7998#else
9256645a 7999 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
3cfde79c
BH
8000 const unsigned long *src = (const unsigned long *)netdev_stats;
8001 u64 *dst = (u64 *)stats64;
8002
9256645a 8003 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
3cfde79c
BH
8004 for (i = 0; i < n; i++)
8005 dst[i] = src[i];
9256645a
JW
8006 /* zero out counters that only exist in rtnl_link_stats64 */
8007 memset((char *)stats64 + n * sizeof(u64), 0,
8008 sizeof(*stats64) - n * sizeof(u64));
3cfde79c
BH
8009#endif
8010}
77a1abf5 8011EXPORT_SYMBOL(netdev_stats_to_stats64);
3cfde79c 8012
eeda3fd6
SH
8013/**
8014 * dev_get_stats - get network device statistics
8015 * @dev: device to get statistics from
28172739 8016 * @storage: place to store stats
eeda3fd6 8017 *
d7753516
BH
8018 * Get network statistics from device. Return @storage.
8019 * The device driver may provide its own method by setting
8020 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
8021 * otherwise the internal statistics structure is used.
eeda3fd6 8022 */
d7753516
BH
8023struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
8024 struct rtnl_link_stats64 *storage)
7004bf25 8025{
eeda3fd6
SH
8026 const struct net_device_ops *ops = dev->netdev_ops;
8027
28172739
ED
8028 if (ops->ndo_get_stats64) {
8029 memset(storage, 0, sizeof(*storage));
caf586e5
ED
8030 ops->ndo_get_stats64(dev, storage);
8031 } else if (ops->ndo_get_stats) {
3cfde79c 8032 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
8033 } else {
8034 netdev_stats_to_stats64(storage, &dev->stats);
28172739 8035 }
6f64ec74
ED
8036 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
8037 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
8038 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
28172739 8039 return storage;
c45d286e 8040}
eeda3fd6 8041EXPORT_SYMBOL(dev_get_stats);
c45d286e 8042
24824a09 8043struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 8044{
24824a09 8045 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 8046
24824a09
ED
8047#ifdef CONFIG_NET_CLS_ACT
8048 if (queue)
8049 return queue;
8050 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
8051 if (!queue)
8052 return NULL;
8053 netdev_init_one_queue(dev, queue, NULL);
2ce1ee17 8054 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
24824a09
ED
8055 queue->qdisc_sleeping = &noop_qdisc;
8056 rcu_assign_pointer(dev->ingress_queue, queue);
8057#endif
8058 return queue;
bb949fbd
DM
8059}
8060
2c60db03
ED
8061static const struct ethtool_ops default_ethtool_ops;
8062
d07d7507
SG
8063void netdev_set_default_ethtool_ops(struct net_device *dev,
8064 const struct ethtool_ops *ops)
8065{
8066 if (dev->ethtool_ops == &default_ethtool_ops)
8067 dev->ethtool_ops = ops;
8068}
8069EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
8070
74d332c1
ED
8071void netdev_freemem(struct net_device *dev)
8072{
8073 char *addr = (char *)dev - dev->padded;
8074
4cb28970 8075 kvfree(addr);
74d332c1
ED
8076}
8077
1da177e4 8078/**
722c9a0c 8079 * alloc_netdev_mqs - allocate network device
8080 * @sizeof_priv: size of private data to allocate space for
8081 * @name: device name format string
8082 * @name_assign_type: origin of device name
8083 * @setup: callback to initialize device
8084 * @txqs: the number of TX subqueues to allocate
8085 * @rxqs: the number of RX subqueues to allocate
8086 *
8087 * Allocates a struct net_device with private data area for driver use
8088 * and performs basic initialization. Also allocates subqueue structs
8089 * for each queue on the device.
1da177e4 8090 */
36909ea4 8091struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
c835a677 8092 unsigned char name_assign_type,
36909ea4
TH
8093 void (*setup)(struct net_device *),
8094 unsigned int txqs, unsigned int rxqs)
1da177e4 8095{
1da177e4 8096 struct net_device *dev;
52a59bd5 8097 unsigned int alloc_size;
1ce8e7b5 8098 struct net_device *p;
1da177e4 8099
b6fe17d6
SH
8100 BUG_ON(strlen(name) >= sizeof(dev->name));
8101
36909ea4 8102 if (txqs < 1) {
7b6cd1ce 8103 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
55513fb4
TH
8104 return NULL;
8105 }
8106
a953be53 8107#ifdef CONFIG_SYSFS
36909ea4 8108 if (rxqs < 1) {
7b6cd1ce 8109 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
36909ea4
TH
8110 return NULL;
8111 }
8112#endif
8113
fd2ea0a7 8114 alloc_size = sizeof(struct net_device);
d1643d24
AD
8115 if (sizeof_priv) {
8116 /* ensure 32-byte alignment of private area */
1ce8e7b5 8117 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
8118 alloc_size += sizeof_priv;
8119 }
8120 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 8121 alloc_size += NETDEV_ALIGN - 1;
1da177e4 8122
dcda9b04 8123 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
62b5942a 8124 if (!p)
1da177e4 8125 return NULL;
1da177e4 8126
1ce8e7b5 8127 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 8128 dev->padded = (char *)dev - (char *)p;
ab9c73cc 8129
29b4433d
ED
8130 dev->pcpu_refcnt = alloc_percpu(int);
8131 if (!dev->pcpu_refcnt)
74d332c1 8132 goto free_dev;
ab9c73cc 8133
ab9c73cc 8134 if (dev_addr_init(dev))
29b4433d 8135 goto free_pcpu;
ab9c73cc 8136
22bedad3 8137 dev_mc_init(dev);
a748ee24 8138 dev_uc_init(dev);
ccffad25 8139
c346dca1 8140 dev_net_set(dev, &init_net);
1da177e4 8141
8d3bdbd5 8142 dev->gso_max_size = GSO_MAX_SIZE;
30b678d8 8143 dev->gso_max_segs = GSO_MAX_SEGS;
8d3bdbd5 8144
8d3bdbd5
DM
8145 INIT_LIST_HEAD(&dev->napi_list);
8146 INIT_LIST_HEAD(&dev->unreg_list);
5cde2829 8147 INIT_LIST_HEAD(&dev->close_list);
8d3bdbd5 8148 INIT_LIST_HEAD(&dev->link_watch_list);
2f268f12
VF
8149 INIT_LIST_HEAD(&dev->adj_list.upper);
8150 INIT_LIST_HEAD(&dev->adj_list.lower);
7866a621
SN
8151 INIT_LIST_HEAD(&dev->ptype_all);
8152 INIT_LIST_HEAD(&dev->ptype_specific);
59cc1f61
JK
8153#ifdef CONFIG_NET_SCHED
8154 hash_init(dev->qdisc_hash);
8155#endif
02875878 8156 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
8d3bdbd5
DM
8157 setup(dev);
8158
a813104d 8159 if (!dev->tx_queue_len) {
f84bb1ea 8160 dev->priv_flags |= IFF_NO_QUEUE;
11597084 8161 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
a813104d 8162 }
906470c1 8163
36909ea4
TH
8164 dev->num_tx_queues = txqs;
8165 dev->real_num_tx_queues = txqs;
ed9af2e8 8166 if (netif_alloc_netdev_queues(dev))
8d3bdbd5 8167 goto free_all;
e8a0464c 8168
a953be53 8169#ifdef CONFIG_SYSFS
36909ea4
TH
8170 dev->num_rx_queues = rxqs;
8171 dev->real_num_rx_queues = rxqs;
fe822240 8172 if (netif_alloc_rx_queues(dev))
8d3bdbd5 8173 goto free_all;
df334545 8174#endif
0a9627f2 8175
1da177e4 8176 strcpy(dev->name, name);
c835a677 8177 dev->name_assign_type = name_assign_type;
cbda10fa 8178 dev->group = INIT_NETDEV_GROUP;
2c60db03
ED
8179 if (!dev->ethtool_ops)
8180 dev->ethtool_ops = &default_ethtool_ops;
e687ad60
PN
8181
8182 nf_hook_ingress_init(dev);
8183
1da177e4 8184 return dev;
ab9c73cc 8185
8d3bdbd5
DM
8186free_all:
8187 free_netdev(dev);
8188 return NULL;
8189
29b4433d
ED
8190free_pcpu:
8191 free_percpu(dev->pcpu_refcnt);
74d332c1
ED
8192free_dev:
8193 netdev_freemem(dev);
ab9c73cc 8194 return NULL;
1da177e4 8195}
36909ea4 8196EXPORT_SYMBOL(alloc_netdev_mqs);
1da177e4
LT
8197
8198/**
722c9a0c 8199 * free_netdev - free network device
8200 * @dev: device
1da177e4 8201 *
722c9a0c 8202 * This function does the last stage of destroying an allocated device
8203 * interface. The reference to the device object is released. If this
8204 * is the last reference then it will be freed.Must be called in process
8205 * context.
1da177e4
LT
8206 */
8207void free_netdev(struct net_device *dev)
8208{
d565b0a1 8209 struct napi_struct *p, *n;
b5cdae32 8210 struct bpf_prog *prog;
d565b0a1 8211
93d05d4a 8212 might_sleep();
60877a32 8213 netif_free_tx_queues(dev);
a953be53 8214#ifdef CONFIG_SYSFS
10595902 8215 kvfree(dev->_rx);
fe822240 8216#endif
e8a0464c 8217
33d480ce 8218 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
24824a09 8219
f001fde5
JP
8220 /* Flush device addresses */
8221 dev_addr_flush(dev);
8222
d565b0a1
HX
8223 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
8224 netif_napi_del(p);
8225
29b4433d
ED
8226 free_percpu(dev->pcpu_refcnt);
8227 dev->pcpu_refcnt = NULL;
8228
b5cdae32
DM
8229 prog = rcu_dereference_protected(dev->xdp_prog, 1);
8230 if (prog) {
8231 bpf_prog_put(prog);
8232 static_key_slow_dec(&generic_xdp_needed);
8233 }
8234
3041a069 8235 /* Compatibility with error handling in drivers */
1da177e4 8236 if (dev->reg_state == NETREG_UNINITIALIZED) {
74d332c1 8237 netdev_freemem(dev);
1da177e4
LT
8238 return;
8239 }
8240
8241 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
8242 dev->reg_state = NETREG_RELEASED;
8243
43cb76d9
GKH
8244 /* will free via device release */
8245 put_device(&dev->dev);
1da177e4 8246}
d1b19dff 8247EXPORT_SYMBOL(free_netdev);
4ec93edb 8248
f0db275a
SH
8249/**
8250 * synchronize_net - Synchronize with packet receive processing
8251 *
8252 * Wait for packets currently being received to be done.
8253 * Does not block later packets from starting.
8254 */
4ec93edb 8255void synchronize_net(void)
1da177e4
LT
8256{
8257 might_sleep();
be3fc413
ED
8258 if (rtnl_is_locked())
8259 synchronize_rcu_expedited();
8260 else
8261 synchronize_rcu();
1da177e4 8262}
d1b19dff 8263EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
8264
8265/**
44a0873d 8266 * unregister_netdevice_queue - remove device from the kernel
1da177e4 8267 * @dev: device
44a0873d 8268 * @head: list
6ebfbc06 8269 *
1da177e4 8270 * This function shuts down a device interface and removes it
d59b54b1 8271 * from the kernel tables.
44a0873d 8272 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
8273 *
8274 * Callers must hold the rtnl semaphore. You may want
8275 * unregister_netdev() instead of this.
8276 */
8277
44a0873d 8278void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 8279{
a6620712
HX
8280 ASSERT_RTNL();
8281
44a0873d 8282 if (head) {
9fdce099 8283 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
8284 } else {
8285 rollback_registered(dev);
8286 /* Finish processing unregister after unlock */
8287 net_set_todo(dev);
8288 }
1da177e4 8289}
44a0873d 8290EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 8291
9b5e383c
ED
8292/**
8293 * unregister_netdevice_many - unregister many devices
8294 * @head: list of devices
87757a91
ED
8295 *
8296 * Note: As most callers use a stack allocated list_head,
8297 * we force a list_del() to make sure stack wont be corrupted later.
9b5e383c
ED
8298 */
8299void unregister_netdevice_many(struct list_head *head)
8300{
8301 struct net_device *dev;
8302
8303 if (!list_empty(head)) {
8304 rollback_registered_many(head);
8305 list_for_each_entry(dev, head, unreg_list)
8306 net_set_todo(dev);
87757a91 8307 list_del(head);
9b5e383c
ED
8308 }
8309}
63c8099d 8310EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 8311
1da177e4
LT
8312/**
8313 * unregister_netdev - remove device from the kernel
8314 * @dev: device
8315 *
8316 * This function shuts down a device interface and removes it
d59b54b1 8317 * from the kernel tables.
1da177e4
LT
8318 *
8319 * This is just a wrapper for unregister_netdevice that takes
8320 * the rtnl semaphore. In general you want to use this and not
8321 * unregister_netdevice.
8322 */
8323void unregister_netdev(struct net_device *dev)
8324{
8325 rtnl_lock();
8326 unregister_netdevice(dev);
8327 rtnl_unlock();
8328}
1da177e4
LT
8329EXPORT_SYMBOL(unregister_netdev);
8330
ce286d32
EB
8331/**
8332 * dev_change_net_namespace - move device to different nethost namespace
8333 * @dev: device
8334 * @net: network namespace
8335 * @pat: If not NULL name pattern to try if the current device name
8336 * is already taken in the destination network namespace.
8337 *
8338 * This function shuts down a device interface and moves it
8339 * to a new network namespace. On success 0 is returned, on
8340 * a failure a netagive errno code is returned.
8341 *
8342 * Callers must hold the rtnl semaphore.
8343 */
8344
8345int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
8346{
6621dd29 8347 int err, new_nsid;
ce286d32
EB
8348
8349 ASSERT_RTNL();
8350
8351 /* Don't allow namespace local devices to be moved. */
8352 err = -EINVAL;
8353 if (dev->features & NETIF_F_NETNS_LOCAL)
8354 goto out;
8355
8356 /* Ensure the device has been registrered */
ce286d32
EB
8357 if (dev->reg_state != NETREG_REGISTERED)
8358 goto out;
8359
8360 /* Get out if there is nothing todo */
8361 err = 0;
878628fb 8362 if (net_eq(dev_net(dev), net))
ce286d32
EB
8363 goto out;
8364
8365 /* Pick the destination device name, and ensure
8366 * we can use it in the destination network namespace.
8367 */
8368 err = -EEXIST;
d9031024 8369 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
8370 /* We get here if we can't use the current device name */
8371 if (!pat)
8372 goto out;
828de4f6 8373 if (dev_get_valid_name(net, dev, pat) < 0)
ce286d32
EB
8374 goto out;
8375 }
8376
8377 /*
8378 * And now a mini version of register_netdevice unregister_netdevice.
8379 */
8380
8381 /* If device is running close it first. */
9b772652 8382 dev_close(dev);
ce286d32
EB
8383
8384 /* And unlink it from device chain */
8385 err = -ENODEV;
8386 unlist_netdevice(dev);
8387
8388 synchronize_net();
8389
8390 /* Shutdown queueing discipline. */
8391 dev_shutdown(dev);
8392
8393 /* Notify protocols, that we are about to destroy
eb13da1a 8394 * this device. They should clean all the things.
8395 *
8396 * Note that dev->reg_state stays at NETREG_REGISTERED.
8397 * This is wanted because this way 8021q and macvlan know
8398 * the device is just moving and can keep their slaves up.
8399 */
ce286d32 8400 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6549dd43
G
8401 rcu_barrier();
8402 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6621dd29
ND
8403 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net)
8404 new_nsid = peernet2id_alloc(dev_net(dev), net);
8405 else
8406 new_nsid = peernet2id(dev_net(dev), net);
8407 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid);
ce286d32
EB
8408
8409 /*
8410 * Flush the unicast and multicast chains
8411 */
a748ee24 8412 dev_uc_flush(dev);
22bedad3 8413 dev_mc_flush(dev);
ce286d32 8414
4e66ae2e
SH
8415 /* Send a netdev-removed uevent to the old namespace */
8416 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
4c75431a 8417 netdev_adjacent_del_links(dev);
4e66ae2e 8418
ce286d32 8419 /* Actually switch the network namespace */
c346dca1 8420 dev_net_set(dev, net);
ce286d32 8421
ce286d32 8422 /* If there is an ifindex conflict assign a new one */
7a66bbc9 8423 if (__dev_get_by_index(net, dev->ifindex))
ce286d32 8424 dev->ifindex = dev_new_index(net);
ce286d32 8425
4e66ae2e
SH
8426 /* Send a netdev-add uevent to the new namespace */
8427 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
4c75431a 8428 netdev_adjacent_add_links(dev);
4e66ae2e 8429
8b41d188 8430 /* Fixup kobjects */
a1b3f594 8431 err = device_rename(&dev->dev, dev->name);
8b41d188 8432 WARN_ON(err);
ce286d32
EB
8433
8434 /* Add the device back in the hashes */
8435 list_netdevice(dev);
8436
8437 /* Notify protocols, that a new device appeared. */
8438 call_netdevice_notifiers(NETDEV_REGISTER, dev);
8439
d90a909e
EB
8440 /*
8441 * Prevent userspace races by waiting until the network
8442 * device is fully setup before sending notifications.
8443 */
7f294054 8444 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
d90a909e 8445
ce286d32
EB
8446 synchronize_net();
8447 err = 0;
8448out:
8449 return err;
8450}
463d0183 8451EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 8452
f0bf90de 8453static int dev_cpu_dead(unsigned int oldcpu)
1da177e4
LT
8454{
8455 struct sk_buff **list_skb;
1da177e4 8456 struct sk_buff *skb;
f0bf90de 8457 unsigned int cpu;
97d8b6e3 8458 struct softnet_data *sd, *oldsd, *remsd = NULL;
1da177e4 8459
1da177e4
LT
8460 local_irq_disable();
8461 cpu = smp_processor_id();
8462 sd = &per_cpu(softnet_data, cpu);
8463 oldsd = &per_cpu(softnet_data, oldcpu);
8464
8465 /* Find end of our completion_queue. */
8466 list_skb = &sd->completion_queue;
8467 while (*list_skb)
8468 list_skb = &(*list_skb)->next;
8469 /* Append completion queue from offline CPU. */
8470 *list_skb = oldsd->completion_queue;
8471 oldsd->completion_queue = NULL;
8472
1da177e4 8473 /* Append output queue from offline CPU. */
a9cbd588
CG
8474 if (oldsd->output_queue) {
8475 *sd->output_queue_tailp = oldsd->output_queue;
8476 sd->output_queue_tailp = oldsd->output_queue_tailp;
8477 oldsd->output_queue = NULL;
8478 oldsd->output_queue_tailp = &oldsd->output_queue;
8479 }
ac64da0b
ED
8480 /* Append NAPI poll list from offline CPU, with one exception :
8481 * process_backlog() must be called by cpu owning percpu backlog.
8482 * We properly handle process_queue & input_pkt_queue later.
8483 */
8484 while (!list_empty(&oldsd->poll_list)) {
8485 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
8486 struct napi_struct,
8487 poll_list);
8488
8489 list_del_init(&napi->poll_list);
8490 if (napi->poll == process_backlog)
8491 napi->state = 0;
8492 else
8493 ____napi_schedule(sd, napi);
264524d5 8494 }
1da177e4
LT
8495
8496 raise_softirq_irqoff(NET_TX_SOFTIRQ);
8497 local_irq_enable();
8498
773fc8f6 8499#ifdef CONFIG_RPS
8500 remsd = oldsd->rps_ipi_list;
8501 oldsd->rps_ipi_list = NULL;
8502#endif
8503 /* send out pending IPI's on offline CPU */
8504 net_rps_send_ipi(remsd);
8505
1da177e4 8506 /* Process offline CPU's input_pkt_queue */
76cc8b13 8507 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
91e83133 8508 netif_rx_ni(skb);
76cc8b13 8509 input_queue_head_incr(oldsd);
fec5e652 8510 }
ac64da0b 8511 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
91e83133 8512 netif_rx_ni(skb);
76cc8b13
TH
8513 input_queue_head_incr(oldsd);
8514 }
1da177e4 8515
f0bf90de 8516 return 0;
1da177e4 8517}
1da177e4 8518
7f353bf2 8519/**
b63365a2
HX
8520 * netdev_increment_features - increment feature set by one
8521 * @all: current feature set
8522 * @one: new feature set
8523 * @mask: mask feature set
7f353bf2
HX
8524 *
8525 * Computes a new feature set after adding a device with feature set
b63365a2
HX
8526 * @one to the master device with current feature set @all. Will not
8527 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 8528 */
c8f44aff
MM
8529netdev_features_t netdev_increment_features(netdev_features_t all,
8530 netdev_features_t one, netdev_features_t mask)
b63365a2 8531{
c8cd0989 8532 if (mask & NETIF_F_HW_CSUM)
a188222b 8533 mask |= NETIF_F_CSUM_MASK;
1742f183 8534 mask |= NETIF_F_VLAN_CHALLENGED;
7f353bf2 8535
a188222b 8536 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
1742f183 8537 all &= one | ~NETIF_F_ALL_FOR_ALL;
c6e1a0d1 8538
1742f183 8539 /* If one device supports hw checksumming, set for all. */
c8cd0989
TH
8540 if (all & NETIF_F_HW_CSUM)
8541 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
7f353bf2
HX
8542
8543 return all;
8544}
b63365a2 8545EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 8546
430f03cd 8547static struct hlist_head * __net_init netdev_create_hash(void)
30d97d35
PE
8548{
8549 int i;
8550 struct hlist_head *hash;
8551
8552 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
8553 if (hash != NULL)
8554 for (i = 0; i < NETDEV_HASHENTRIES; i++)
8555 INIT_HLIST_HEAD(&hash[i]);
8556
8557 return hash;
8558}
8559
881d966b 8560/* Initialize per network namespace state */
4665079c 8561static int __net_init netdev_init(struct net *net)
881d966b 8562{
734b6541
RM
8563 if (net != &init_net)
8564 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 8565
30d97d35
PE
8566 net->dev_name_head = netdev_create_hash();
8567 if (net->dev_name_head == NULL)
8568 goto err_name;
881d966b 8569
30d97d35
PE
8570 net->dev_index_head = netdev_create_hash();
8571 if (net->dev_index_head == NULL)
8572 goto err_idx;
881d966b
EB
8573
8574 return 0;
30d97d35
PE
8575
8576err_idx:
8577 kfree(net->dev_name_head);
8578err_name:
8579 return -ENOMEM;
881d966b
EB
8580}
8581
f0db275a
SH
8582/**
8583 * netdev_drivername - network driver for the device
8584 * @dev: network device
f0db275a
SH
8585 *
8586 * Determine network driver for device.
8587 */
3019de12 8588const char *netdev_drivername(const struct net_device *dev)
6579e57b 8589{
cf04a4c7
SH
8590 const struct device_driver *driver;
8591 const struct device *parent;
3019de12 8592 const char *empty = "";
6579e57b
AV
8593
8594 parent = dev->dev.parent;
6579e57b 8595 if (!parent)
3019de12 8596 return empty;
6579e57b
AV
8597
8598 driver = parent->driver;
8599 if (driver && driver->name)
3019de12
DM
8600 return driver->name;
8601 return empty;
6579e57b
AV
8602}
8603
6ea754eb
JP
8604static void __netdev_printk(const char *level, const struct net_device *dev,
8605 struct va_format *vaf)
256df2f3 8606{
b004ff49 8607 if (dev && dev->dev.parent) {
6ea754eb
JP
8608 dev_printk_emit(level[1] - '0',
8609 dev->dev.parent,
8610 "%s %s %s%s: %pV",
8611 dev_driver_string(dev->dev.parent),
8612 dev_name(dev->dev.parent),
8613 netdev_name(dev), netdev_reg_state(dev),
8614 vaf);
b004ff49 8615 } else if (dev) {
6ea754eb
JP
8616 printk("%s%s%s: %pV",
8617 level, netdev_name(dev), netdev_reg_state(dev), vaf);
b004ff49 8618 } else {
6ea754eb 8619 printk("%s(NULL net_device): %pV", level, vaf);
b004ff49 8620 }
256df2f3
JP
8621}
8622
6ea754eb
JP
8623void netdev_printk(const char *level, const struct net_device *dev,
8624 const char *format, ...)
256df2f3
JP
8625{
8626 struct va_format vaf;
8627 va_list args;
256df2f3
JP
8628
8629 va_start(args, format);
8630
8631 vaf.fmt = format;
8632 vaf.va = &args;
8633
6ea754eb 8634 __netdev_printk(level, dev, &vaf);
b004ff49 8635
256df2f3 8636 va_end(args);
256df2f3
JP
8637}
8638EXPORT_SYMBOL(netdev_printk);
8639
8640#define define_netdev_printk_level(func, level) \
6ea754eb 8641void func(const struct net_device *dev, const char *fmt, ...) \
256df2f3 8642{ \
256df2f3
JP
8643 struct va_format vaf; \
8644 va_list args; \
8645 \
8646 va_start(args, fmt); \
8647 \
8648 vaf.fmt = fmt; \
8649 vaf.va = &args; \
8650 \
6ea754eb 8651 __netdev_printk(level, dev, &vaf); \
b004ff49 8652 \
256df2f3 8653 va_end(args); \
256df2f3
JP
8654} \
8655EXPORT_SYMBOL(func);
8656
8657define_netdev_printk_level(netdev_emerg, KERN_EMERG);
8658define_netdev_printk_level(netdev_alert, KERN_ALERT);
8659define_netdev_printk_level(netdev_crit, KERN_CRIT);
8660define_netdev_printk_level(netdev_err, KERN_ERR);
8661define_netdev_printk_level(netdev_warn, KERN_WARNING);
8662define_netdev_printk_level(netdev_notice, KERN_NOTICE);
8663define_netdev_printk_level(netdev_info, KERN_INFO);
8664
4665079c 8665static void __net_exit netdev_exit(struct net *net)
881d966b
EB
8666{
8667 kfree(net->dev_name_head);
8668 kfree(net->dev_index_head);
ee21b18b
VA
8669 if (net != &init_net)
8670 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
881d966b
EB
8671}
8672
022cbae6 8673static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
8674 .init = netdev_init,
8675 .exit = netdev_exit,
8676};
8677
4665079c 8678static void __net_exit default_device_exit(struct net *net)
ce286d32 8679{
e008b5fc 8680 struct net_device *dev, *aux;
ce286d32 8681 /*
e008b5fc 8682 * Push all migratable network devices back to the
ce286d32
EB
8683 * initial network namespace
8684 */
8685 rtnl_lock();
e008b5fc 8686 for_each_netdev_safe(net, dev, aux) {
ce286d32 8687 int err;
aca51397 8688 char fb_name[IFNAMSIZ];
ce286d32
EB
8689
8690 /* Ignore unmoveable devices (i.e. loopback) */
8691 if (dev->features & NETIF_F_NETNS_LOCAL)
8692 continue;
8693
e008b5fc
EB
8694 /* Leave virtual devices for the generic cleanup */
8695 if (dev->rtnl_link_ops)
8696 continue;
d0c082ce 8697
25985edc 8698 /* Push remaining network devices to init_net */
aca51397
PE
8699 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
8700 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 8701 if (err) {
7b6cd1ce
JP
8702 pr_emerg("%s: failed to move %s to init_net: %d\n",
8703 __func__, dev->name, err);
aca51397 8704 BUG();
ce286d32
EB
8705 }
8706 }
8707 rtnl_unlock();
8708}
8709
50624c93
EB
8710static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
8711{
8712 /* Return with the rtnl_lock held when there are no network
8713 * devices unregistering in any network namespace in net_list.
8714 */
8715 struct net *net;
8716 bool unregistering;
ff960a73 8717 DEFINE_WAIT_FUNC(wait, woken_wake_function);
50624c93 8718
ff960a73 8719 add_wait_queue(&netdev_unregistering_wq, &wait);
50624c93 8720 for (;;) {
50624c93
EB
8721 unregistering = false;
8722 rtnl_lock();
8723 list_for_each_entry(net, net_list, exit_list) {
8724 if (net->dev_unreg_count > 0) {
8725 unregistering = true;
8726 break;
8727 }
8728 }
8729 if (!unregistering)
8730 break;
8731 __rtnl_unlock();
ff960a73
PZ
8732
8733 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
50624c93 8734 }
ff960a73 8735 remove_wait_queue(&netdev_unregistering_wq, &wait);
50624c93
EB
8736}
8737
04dc7f6b
EB
8738static void __net_exit default_device_exit_batch(struct list_head *net_list)
8739{
8740 /* At exit all network devices most be removed from a network
b595076a 8741 * namespace. Do this in the reverse order of registration.
04dc7f6b
EB
8742 * Do this across as many network namespaces as possible to
8743 * improve batching efficiency.
8744 */
8745 struct net_device *dev;
8746 struct net *net;
8747 LIST_HEAD(dev_kill_list);
8748
50624c93
EB
8749 /* To prevent network device cleanup code from dereferencing
8750 * loopback devices or network devices that have been freed
8751 * wait here for all pending unregistrations to complete,
8752 * before unregistring the loopback device and allowing the
8753 * network namespace be freed.
8754 *
8755 * The netdev todo list containing all network devices
8756 * unregistrations that happen in default_device_exit_batch
8757 * will run in the rtnl_unlock() at the end of
8758 * default_device_exit_batch.
8759 */
8760 rtnl_lock_unregistering(net_list);
04dc7f6b
EB
8761 list_for_each_entry(net, net_list, exit_list) {
8762 for_each_netdev_reverse(net, dev) {
b0ab2fab 8763 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
04dc7f6b
EB
8764 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
8765 else
8766 unregister_netdevice_queue(dev, &dev_kill_list);
8767 }
8768 }
8769 unregister_netdevice_many(&dev_kill_list);
8770 rtnl_unlock();
8771}
8772
022cbae6 8773static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 8774 .exit = default_device_exit,
04dc7f6b 8775 .exit_batch = default_device_exit_batch,
ce286d32
EB
8776};
8777
1da177e4
LT
8778/*
8779 * Initialize the DEV module. At boot time this walks the device list and
8780 * unhooks any devices that fail to initialise (normally hardware not
8781 * present) and leaves us with a valid list of present and active devices.
8782 *
8783 */
8784
8785/*
8786 * This is called single threaded during boot, so no need
8787 * to take the rtnl semaphore.
8788 */
8789static int __init net_dev_init(void)
8790{
8791 int i, rc = -ENOMEM;
8792
8793 BUG_ON(!dev_boot_phase);
8794
1da177e4
LT
8795 if (dev_proc_init())
8796 goto out;
8797
8b41d188 8798 if (netdev_kobject_init())
1da177e4
LT
8799 goto out;
8800
8801 INIT_LIST_HEAD(&ptype_all);
82d8a867 8802 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
8803 INIT_LIST_HEAD(&ptype_base[i]);
8804
62532da9
VY
8805 INIT_LIST_HEAD(&offload_base);
8806
881d966b
EB
8807 if (register_pernet_subsys(&netdev_net_ops))
8808 goto out;
1da177e4
LT
8809
8810 /*
8811 * Initialise the packet receive queues.
8812 */
8813
6f912042 8814 for_each_possible_cpu(i) {
41852497 8815 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
e36fa2f7 8816 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 8817
41852497
ED
8818 INIT_WORK(flush, flush_backlog);
8819
e36fa2f7 8820 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 8821 skb_queue_head_init(&sd->process_queue);
e36fa2f7 8822 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588 8823 sd->output_queue_tailp = &sd->output_queue;
df334545 8824#ifdef CONFIG_RPS
e36fa2f7
ED
8825 sd->csd.func = rps_trigger_softirq;
8826 sd->csd.info = sd;
e36fa2f7 8827 sd->cpu = i;
1e94d72f 8828#endif
0a9627f2 8829
e36fa2f7
ED
8830 sd->backlog.poll = process_backlog;
8831 sd->backlog.weight = weight_p;
1da177e4
LT
8832 }
8833
1da177e4
LT
8834 dev_boot_phase = 0;
8835
505d4f73
EB
8836 /* The loopback device is special if any other network devices
8837 * is present in a network namespace the loopback device must
8838 * be present. Since we now dynamically allocate and free the
8839 * loopback device ensure this invariant is maintained by
8840 * keeping the loopback device as the first device on the
8841 * list of network devices. Ensuring the loopback devices
8842 * is the first device that appears and the last network device
8843 * that disappears.
8844 */
8845 if (register_pernet_device(&loopback_net_ops))
8846 goto out;
8847
8848 if (register_pernet_device(&default_device_ops))
8849 goto out;
8850
962cf36c
CM
8851 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
8852 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4 8853
f0bf90de
SAS
8854 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
8855 NULL, dev_cpu_dead);
8856 WARN_ON(rc < 0);
1da177e4
LT
8857 rc = 0;
8858out:
8859 return rc;
8860}
8861
8862subsys_initcall(net_dev_init);